HIVE-17466: Metastore API to list unique partition-key-value combinations 
(Thiruvel Thirumoolan, reviewed by Mithun Radhakrishnan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9a149843
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9a149843
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9a149843

Branch: refs/heads/hive-14535
Commit: 9a1498439dd8b7d29a272516b71741d814954777
Parents: 32e854e
Author: Mithun RK <mit...@apache.org>
Authored: Tue Sep 12 13:59:47 2017 -0700
Committer: Mithun RK <mit...@apache.org>
Committed: Tue Sep 19 13:46:06 2017 -0700

----------------------------------------------------------------------
 .../listener/DummyRawStoreFailEvent.java        |    7 +
 .../hadoop/hive/metastore/HiveMetaStore.java    |   11 +
 .../hive/metastore/HiveMetaStoreClient.java     |    6 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |    6 +-
 .../hadoop/hive/metastore/ObjectStore.java      |  246 +
 .../apache/hadoop/hive/metastore/RawStore.java  |    7 +-
 .../hive/metastore/cache/CachedStore.java       |    9 +
 .../DummyRawStoreControlledCommit.java          |    8 +-
 .../DummyRawStoreForJdoConnection.java          |    8 +-
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  | 2575 ++++----
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h    |  142 +
 .../ThriftHiveMetastore_server.skeleton.cpp     |    5 +
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp | 2395 +++++---
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |  181 +
 .../hive/metastore/api/AbortTxnsRequest.java    |   32 +-
 .../metastore/api/AddDynamicPartitions.java     |   32 +-
 .../metastore/api/ClearFileMetadataRequest.java |   32 +-
 .../hive/metastore/api/ClientCapabilities.java  |   32 +-
 .../hive/metastore/api/CompactionRequest.java   |   44 +-
 .../hive/metastore/api/FireEventRequest.java    |   32 +-
 .../hadoop/hive/metastore/api/Function.java     |   36 +-
 .../metastore/api/GetAllFunctionsResponse.java  |   36 +-
 .../api/GetFileMetadataByExprRequest.java       |   32 +-
 .../api/GetFileMetadataByExprResult.java        |   48 +-
 .../metastore/api/GetFileMetadataRequest.java   |   32 +-
 .../metastore/api/GetFileMetadataResult.java    |   44 +-
 .../metastore/api/GetOpenTxnsInfoResponse.java  |   36 +-
 .../hive/metastore/api/GetOpenTxnsResponse.java |   32 +-
 .../hive/metastore/api/GetTablesRequest.java    |   32 +-
 .../hive/metastore/api/GetTablesResult.java     |   36 +-
 .../api/HeartbeatTxnRangeResponse.java          |   64 +-
 .../metastore/api/InsertEventRequestData.java   |   64 +-
 .../hadoop/hive/metastore/api/LockRequest.java  |   36 +-
 .../api/NotificationEventResponse.java          |   36 +-
 .../hive/metastore/api/OpenTxnsResponse.java    |   32 +-
 .../metastore/api/PartitionValuesRequest.java   | 1222 ++++
 .../metastore/api/PartitionValuesResponse.java  |  443 ++
 .../hive/metastore/api/PartitionValuesRow.java  |  438 ++
 .../metastore/api/PutFileMetadataRequest.java   |   64 +-
 .../hive/metastore/api/ShowCompactResponse.java |   36 +-
 .../hive/metastore/api/ShowLocksResponse.java   |   36 +-
 .../hive/metastore/api/ThriftHiveMetastore.java | 5667 +++++++++++-------
 .../gen-php/metastore/ThriftHiveMetastore.php   | 1582 +++--
 .../src/gen/thrift/gen-php/metastore/Types.php  | 1040 +++-
 .../hive_metastore/ThriftHiveMetastore-remote   |    7 +
 .../hive_metastore/ThriftHiveMetastore.py       | 1109 ++--
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  685 ++-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |   67 +
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |   66 +
 .../src/main/thrift/hive_metastore.thrift       |   22 +
 50 files changed, 12572 insertions(+), 6318 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/9a149843/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
----------------------------------------------------------------------
diff --git 
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
 
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 8d861e4..d94d920 100644
--- 
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ 
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
@@ -51,6 +52,7 @@ import 
org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
 import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
@@ -286,6 +288,11 @@ public class DummyRawStoreFailEvent implements RawStore, 
Configurable {
   }
 
   @Override
+  public PartitionValuesResponse listPartitionValues(String db_name, String 
tbl_name, List<FieldSchema> cols, boolean applyDistinct, String filter, boolean 
ascending, List<FieldSchema> order, long maxParts) throws MetaException {
+    return null;
+  }
+
+  @Override
   public List<String> listPartitionNamesByFilter(String dbName, String tblName,
                                                  String filter, short 
maxParts) throws MetaException {
     return objectStore.listPartitionNamesByFilter(dbName, tblName, filter, 
maxParts);

http://git-wip-us.apache.org/repos/asf/hive/blob/9a149843/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index cf33cca..f2747f9 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -4140,6 +4140,17 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     }
 
     @Override
+    public PartitionValuesResponse get_partition_values(PartitionValuesRequest 
request) throws MetaException {
+      String dbName = request.getDbName();
+      String tblName = request.getTblName();
+      List<FieldSchema> partCols = new ArrayList<FieldSchema>();
+      partCols.add(request.getPartitionKeys().get(0));
+      return getMS().listPartitionValues(dbName, tblName, 
request.getPartitionKeys(),
+          request.isApplyDistinct(), request.getFilter(), 
request.isAscending(),
+          request.getPartitionOrder(), request.getMaxParts());
+    }
+
+    @Override
     public void alter_partition(final String db_name, final String tbl_name,
         final Partition new_part)
         throws InvalidOperationException, MetaException, TException {

http://git-wip-us.apache.org/repos/asf/hive/blob/9a149843/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 75e4180..f839ee7 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -1368,6 +1368,12 @@ public class HiveMetaStoreClient implements 
IMetaStoreClient, AutoCloseable {
   }
 
   @Override
+  public PartitionValuesResponse listPartitionValues(PartitionValuesRequest 
request)
+      throws MetaException, TException, NoSuchObjectException {
+    return client.get_partition_values(request);
+  }
+
+  @Override
   public Partition getPartitionWithAuthInfo(String db_name, String tbl_name,
       List<String> part_vals, String user_name, List<String> group_names)
       throws MetaException, UnknownTableException, NoSuchObjectException,

http://git-wip-us.apache.org/repos/asf/hive/blob/9a149843/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 4d251d1..a08fc72 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -26,7 +26,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 
 import org.apache.hadoop.hive.common.ObjectPair;
-import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience.Public;
@@ -81,6 +80,8 @@ import 
org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
 import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesRequest;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
@@ -587,6 +588,9 @@ public interface IMetaStoreClient {
       List<String> part_vals, short max_parts)
       throws MetaException, TException, NoSuchObjectException;
 
+  public PartitionValuesResponse listPartitionValues(PartitionValuesRequest 
request)
+      throws MetaException, TException, NoSuchObjectException;
+
   /**
    * Get number of partitions matching specified filter
    * @param dbName the database name

http://git-wip-us.apache.org/repos/asf/hive/blob/9a149843/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 3053dcb..7ab98ef 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -103,6 +103,8 @@ import 
org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesRow;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
@@ -2313,6 +2315,250 @@ public class ObjectStore implements RawStore, 
Configurable {
     return pns;
   }
 
+  private String extractPartitionKey(FieldSchema key, List<FieldSchema> pkeys) 
{
+    StringBuilder buffer = new StringBuilder(256);
+
+    assert pkeys.size() >= 1;
+
+    String partKey = "/" + key.getName() + "=";
+
+    // Table is partitioned by single key
+    if (pkeys.size() == 1 && (pkeys.get(0).getName().matches(key.getName()))) {
+      buffer.append("partitionName.substring(partitionName.indexOf(\"")
+          .append(key.getName()).append("=\") + 
").append(key.getName().length() + 1)
+          .append(")");
+
+      // First partition key - anything between key= and first /
+    } else if ((pkeys.get(0).getName().matches(key.getName()))) {
+
+      buffer.append("partitionName.substring(partitionName.indexOf(\"")
+          .append(key.getName()).append("=\") + 
").append(key.getName().length() + 1).append(", ")
+          .append("partitionName.indexOf(\"/\")")
+          .append(")");
+
+      // Last partition key - anything between /key= and end
+    } else if ((pkeys.get(pkeys.size() - 1).getName().matches(key.getName()))) 
{
+      buffer.append("partitionName.substring(partitionName.indexOf(\"")
+          .append(partKey).append("\") + ").append(partKey.length())
+          .append(")");
+
+      // Intermediate key - anything between /key= and the following /
+    } else {
+
+      buffer.append("partitionName.substring(partitionName.indexOf(\"")
+          .append(partKey).append("\") + ").append(partKey.length()).append(", 
")
+          .append("partitionName.indexOf(\"/\", 
partitionName.indexOf(\"").append(partKey)
+          .append("\") + 1))");
+    }
+    LOG.info("Query for Key:" + key.getName() + " is :" + buffer);
+    return buffer.toString();
+  }
+
+  @Override
+  public PartitionValuesResponse listPartitionValues(String dbName, String 
tableName, List<FieldSchema> cols,
+                                                     boolean applyDistinct, 
String filter, boolean ascending,
+                                                     List<FieldSchema> order, 
long maxParts) throws MetaException {
+
+    dbName = dbName.toLowerCase().trim();
+    tableName = tableName.toLowerCase().trim();
+    try {
+      if (filter == null || filter.isEmpty()) {
+        PartitionValuesResponse response =
+            getDistinctValuesForPartitionsNoTxn(dbName, tableName, cols, 
applyDistinct, ascending, maxParts);
+        LOG.info("Number of records fetched: " + 
response.getPartitionValues().size());
+        return response;
+      } else {
+        PartitionValuesResponse response =
+            extractPartitionNamesByFilter(dbName, tableName, filter, cols, 
ascending, applyDistinct, maxParts);
+        if (response != null && response.getPartitionValues() != null) {
+          LOG.info("Number of records fetched with filter: " + 
response.getPartitionValues().size());
+        }
+        return response;
+      }
+    } catch (Exception t) {
+      LOG.error("Exception in ORM", t);
+      throw new MetaException("Error retrieving partition values: " + t);
+    } finally {
+    }
+  }
+
+  private PartitionValuesResponse extractPartitionNamesByFilter(String dbName, 
String tableName, String filter,
+                                                                
List<FieldSchema> cols, boolean ascending, boolean applyDistinct, long maxParts)
+      throws MetaException, NoSuchObjectException {
+
+    LOG.info("Database: " + dbName + " Table:" + tableName + " filter\"" + 
filter + "\" cols:" + cols);
+    List<String> partitionResults = new ArrayList<String>();
+    List<String> partitionNames = null;
+    List<Partition> partitions = null;
+    Table tbl = getTable(dbName, tableName);
+    try {
+      // Get partitions by name - ascending or descending
+      partitionNames = getPartitionNamesByFilter(dbName, tableName, filter, 
ascending, maxParts);
+    } catch (MetaException e) {
+      LOG.warn("Querying by partition names failed, trying out with partition 
objects, filter:" + filter);
+    }
+
+    if (partitionNames == null) {
+      partitions = getPartitionsByFilter(dbName, tableName, filter, (short) 
maxParts);
+    }
+
+    if (partitions != null) {
+      partitionNames = new ArrayList<String>(partitions.size());
+      for (Partition partition : partitions) {
+        // Check for NULL's just to be safe
+        if (tbl.getPartitionKeys() != null && partition.getValues() != null) {
+          partitionNames.add(Warehouse.makePartName(tbl.getPartitionKeys(), 
partition.getValues()));
+        }
+      }
+    }
+
+    if (partitionNames == null && partitions == null) {
+      throw new MetaException("Cannot obtain list of partitions by filter:\"" 
+ filter +
+          "\" for " + dbName + ":" + tableName);
+    }
+
+    if (!ascending) {
+      Collections.sort(partitionNames, Collections.reverseOrder());
+    }
+
+    // Return proper response
+    PartitionValuesResponse response = new PartitionValuesResponse();
+    response.setPartitionValues(new 
ArrayList<PartitionValuesRow>(partitionNames.size()));
+    LOG.info("Converting responses to Partition values for items:" + 
partitionNames.size());
+    for (String partName : partitionNames) {
+      ArrayList<String> vals = new 
ArrayList<String>(tbl.getPartitionKeys().size());
+      for (FieldSchema key : tbl.getPartitionKeys()) {
+        vals.add(null);
+      }
+      PartitionValuesRow row = new PartitionValuesRow();
+      Warehouse.makeValsFromName(partName, vals);
+      for (String value : vals) {
+        row.addToRow(value);
+      }
+      response.addToPartitionValues(row);
+    }
+    return response;
+  }
+
+  private List<String> getPartitionNamesByFilter(String dbName, String 
tableName,
+                                                 String filter, boolean 
ascending, long maxParts)
+      throws MetaException {
+
+    boolean success = false;
+    List<String> partNames = new ArrayList<String>();
+    try {
+      openTransaction();
+      LOG.debug("Executing getPartitionNamesByFilter");
+      dbName = dbName.toLowerCase();
+      tableName = tableName.toLowerCase();
+
+      MTable mtable = getMTable(dbName, tableName);
+      if( mtable == null ) {
+        // To be consistent with the behavior of listPartitionNames, if the
+        // table or db does not exist, we return an empty list
+        return partNames;
+      }
+      Map<String, Object> params = new HashMap<String, Object>();
+      String queryFilterString = makeQueryFilterString(dbName, mtable, filter, 
params);
+      Query query = pm.newQuery(
+          "select partitionName from 
org.apache.hadoop.hive.metastore.model.MPartition "
+              + "where " + queryFilterString);
+
+      if (maxParts >= 0) {
+        //User specified a row limit, set it on the Query
+        query.setRange(0, maxParts);
+      }
+
+      LOG.debug("Filter specified is " + filter + "," +
+          " JDOQL filter is " + queryFilterString);
+      LOG.debug("Parms is " + params);
+
+      String parameterDeclaration = makeParameterDeclarationStringObj(params);
+      query.declareParameters(parameterDeclaration);
+      if (ascending) {
+        query.setOrdering("partitionName ascending");
+      } else {
+        query.setOrdering("partitionName descending");
+      }
+      query.setResult("partitionName");
+
+      Collection names = (Collection) query.executeWithMap(params);
+      partNames = new ArrayList<String>();
+      for (Iterator i = names.iterator(); i.hasNext();) {
+        partNames.add((String) i.next());
+      }
+
+      LOG.debug("Done executing query for getPartitionNamesByFilter");
+      success = commitTransaction();
+      LOG.debug("Done retrieving all objects for getPartitionNamesByFilter, 
size:" + partNames.size());
+      query.closeAll();
+    } finally {
+      if (!success) {
+        rollbackTransaction();
+      }
+    }
+    return partNames;
+  }
+
+  private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn(String 
dbName, String tableName, List<FieldSchema> cols,
+                                                                      boolean 
applyDistinct, boolean ascending, long maxParts)
+      throws MetaException {
+
+    try {
+      openTransaction();
+      Query q = pm.newQuery("select partitionName from 
org.apache.hadoop.hive.metastore.model.MPartition "
+          + "where table.database.name == t1 && table.tableName == t2 ");
+      q.declareParameters("java.lang.String t1, java.lang.String t2");
+
+      // TODO: Ordering seems to affect the distinctness, needs checking, 
disabling.
+/*
+      if (ascending) {
+        q.setOrdering("partitionName ascending");
+      } else {
+        q.setOrdering("partitionName descending");
+      }
+*/
+      if (maxParts > 0) {
+        q.setRange(0, maxParts);
+      }
+      StringBuilder partValuesSelect = new StringBuilder(256);
+      if (applyDistinct) {
+        partValuesSelect.append("DISTINCT ");
+      }
+      List<FieldSchema> partitionKeys = getTable(dbName, 
tableName).getPartitionKeys();
+      for (FieldSchema key : cols) {
+        partValuesSelect.append(extractPartitionKey(key, 
partitionKeys)).append(", ");
+      }
+      partValuesSelect.setLength(partValuesSelect.length() - 2);
+      LOG.info("Columns to be selected from Partitions: " + partValuesSelect);
+      q.setResult(partValuesSelect.toString());
+
+      PartitionValuesResponse response = new PartitionValuesResponse();
+      response.setPartitionValues(new ArrayList<PartitionValuesRow>());
+      if (cols.size() > 1) {
+        List<Object[]> results = (List<Object[]>) q.execute(dbName, tableName);
+        for (Object[] row : results) {
+          PartitionValuesRow rowResponse = new PartitionValuesRow();
+          for (Object columnValue : row) {
+            rowResponse.addToRow((String) columnValue);
+          }
+          response.addToPartitionValues(rowResponse);
+        }
+      } else {
+        List<Object> results = (List<Object>) q.execute(dbName, tableName);
+        for (Object row : results) {
+          PartitionValuesRow rowResponse = new PartitionValuesRow();
+          rowResponse.addToRow((String) row);
+          response.addToPartitionValues(rowResponse);
+        }
+      }
+      q.closeAll();
+      return response;
+    } finally {
+      commitTransaction();
+    }
+  }
+
   private List<String> getPartitionNamesNoTxn(String dbName, String tableName, 
short max) {
     List<String> pns = new ArrayList<String>();
     dbName = HiveStringUtils.normalizeIdentifier(dbName);

http://git-wip-us.apache.org/repos/asf/hive/blob/9a149843/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
index 71982a0..2bc4d99 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -28,12 +28,12 @@ import java.util.Map;
 
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.hive.common.classification.InterfaceStability;
-import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
@@ -50,6 +50,7 @@ import 
org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
 import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
@@ -202,6 +203,10 @@ public interface RawStore extends Configurable {
   public abstract List<String> listPartitionNames(String db_name,
       String tbl_name, short max_parts) throws MetaException;
 
+  public abstract PartitionValuesResponse listPartitionValues(String db_name, 
String tbl_name,
+                                                              
List<FieldSchema> cols, boolean applyDistinct, String filter, boolean ascending,
+                                                              
List<FieldSchema> order, long maxParts) throws MetaException;
+
   public abstract List<String> listPartitionNamesByFilter(String db_name,
       String tbl_name, String filter, short max_parts) throws MetaException;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9a149843/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index 3ba81ce..7939bfe 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -50,6 +50,7 @@ import 
org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
@@ -66,6 +67,7 @@ import 
org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
 import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
@@ -935,6 +937,13 @@ public class CachedStore implements RawStore, Configurable 
{
   }
 
   @Override
+  public PartitionValuesResponse listPartitionValues(String db_name, String 
tbl_name, List<FieldSchema> cols,
+                                                     boolean applyDistinct, 
String filter, boolean ascending,
+                                                     List<FieldSchema> order, 
long maxParts) throws MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
   public List<String> listPartitionNamesByFilter(String db_name,
       String tbl_name, String filter, short max_parts) throws MetaException {
     // TODO Translate filter -> expr

http://git-wip-us.apache.org/repos/asf/hive/blob/9a149843/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
 
b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 4db203d..a75dbb0 100644
--- 
a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ 
b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -26,12 +26,12 @@ import java.util.Map;
 
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
@@ -48,6 +48,7 @@ import 
org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
 import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
@@ -268,6 +269,11 @@ public class DummyRawStoreControlledCommit implements 
RawStore, Configurable {
   }
 
   @Override
+  public PartitionValuesResponse listPartitionValues(String db_name, String 
tbl_name, List<FieldSchema> cols, boolean applyDistinct, String filter, boolean 
ascending, List<FieldSchema> order, long maxParts) throws MetaException {
+    return null;
+  }
+
+  @Override
   public List<String> listPartitionNamesByFilter(String dbName, String tblName,
       String filter, short maxParts) throws MetaException {
     return objectStore.listPartitionNamesByFilter(dbName, tblName, filter, 
maxParts);

http://git-wip-us.apache.org/repos/asf/hive/blob/9a149843/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
 
b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index fb16cfc..bbb4bf1 100644
--- 
a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ 
b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -27,12 +27,12 @@ import junit.framework.Assert;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
@@ -49,6 +49,7 @@ import 
org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
 import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
@@ -276,6 +277,11 @@ public class DummyRawStoreForJdoConnection implements 
RawStore {
   }
 
   @Override
+  public PartitionValuesResponse listPartitionValues(String db_name, String 
tbl_name, List<FieldSchema> cols, boolean applyDistinct, String filter, boolean 
ascending, List<FieldSchema> order, long maxParts) throws MetaException {
+    return null;
+  }
+
+  @Override
   public List<String> listPartitionNamesByFilter(String db_name, String 
tbl_name, String filter,
       short max_parts) throws MetaException {
 

Reply via email to