Repository: hive
Updated Branches:
  refs/heads/branch-2.1 29f48b7c6 -> fdc058752


HIVE-13788 : hive msck listpartitions need to make use of directSQL instead of 
datanucleus

Signed-off-by: Ashutosh Chauhan <hashut...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fdc05875
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fdc05875
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fdc05875

Branch: refs/heads/branch-2.1
Commit: fdc05875242e5539c66812385df703f52c0a0fbc
Parents: 29f48b7
Author: Hari Subramaniyan <harisan...@apache.org>
Authored: Fri Jun 10 14:43:00 2016 -0800
Committer: Ashutosh Chauhan <hashut...@apache.org>
Committed: Fri Jun 17 17:29:54 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java   | 6 +++++-
 .../apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java   | 6 ++++--
 2 files changed, 9 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/fdc05875/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
index 1122f8d..293b0a7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
@@ -46,6 +46,8 @@ import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.ql.metadata.CheckResult.PartitionResult;
+import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner;
+import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
 import org.apache.thrift.TException;
 
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
@@ -207,8 +209,10 @@ public class HiveMetaStoreChecker {
 
     if (table.isPartitioned()) {
       if (partitions == null || partitions.isEmpty()) {
+        PrunedPartitionList prunedPartList =
+        PartitionPruner.prune(table, null, conf, toString(), null);
         // no partitions specified, let's get all
-        parts = hive.getPartitions(table);
+        parts.addAll(prunedPartList.getPartitions());
       } else {
         // we're interested in specific partitions,
         // don't check for any others

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc05875/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
index 02c5a89..26e936e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
@@ -227,7 +227,7 @@ public class PartitionPruner extends Transform {
 
   private static PrunedPartitionList getAllPartsFromCacheOrServer(Table tab, 
String key, boolean unknownPartitions,
     Map<String, PrunedPartitionList> partsCache)  throws SemanticException {
-    PrunedPartitionList ppList = partsCache.get(key);
+    PrunedPartitionList ppList = partsCache == null ? null : 
partsCache.get(key);
     if (ppList != null) {
       return ppList;
     }
@@ -238,7 +238,9 @@ public class PartitionPruner extends Transform {
       throw new SemanticException(e);
     }
     ppList = new PrunedPartitionList(tab, parts, null, unknownPartitions);
-    partsCache.put(key, ppList);
+    if (partsCache != null) {
+      partsCache.put(key, ppList);
+    }
     return ppList;
   }
 

Reply via email to