vineetgarg02 commented on a change in pull request #1317:
URL: https://github.com/apache/hive/pull/1317#discussion_r461200841



##########
File path: ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
##########
@@ -1631,6 +1633,9 @@ public Table 
apply(org.apache.hadoop.hive.metastore.api.Table table) {
       }
     } catch (Exception e) {
       throw new HiveException(e);
+    } finally {
+      long diff = System.nanoTime() - t1;
+      LOG.debug(String.format(logString, "getTablesByType", diff, dbName, ""));

Review comment:
       Can we also make the logging conditional based on 
`(LOG.isDebugEnabled())`

##########
File path: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
##########
@@ -487,6 +487,10 @@ private static void populateLlapDaemonVarsSet(Set<String> 
llapDaemonVarsSetLocal
    * in the underlying Hadoop configuration.
    */
   public static enum ConfVars {
+    MSC_CACHE_ENABLED("hive.metastore.client.cache.enabled", true,
+            "This property enables a Caffeiene LoadingCache for Metastore 
client"),
+    MSC_CACHE_MAX_SIZE("hive.metastore.client.cache.maxSize", 1_000_000_000,
+            "Set the maximum size (number of bytes) of the cache (DEFAULT: 
1GB). Only in effect when the cache is enabled"),

Review comment:
       Can you update the description to make it clear that the cache in 
question is referring to metastore client cache?

##########
File path: ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
##########
@@ -3665,33 +3678,39 @@ public boolean dropPartition(String dbName, String 
tableName, List<String> parti
    * @return list of partition objects
    */
   public List<Partition> getPartitions(Table tbl) throws HiveException {
-    if (tbl.isPartitioned()) {
-      List<org.apache.hadoop.hive.metastore.api.Partition> tParts;
-      try {
-        GetPartitionsPsWithAuthRequest req = new 
GetPartitionsPsWithAuthRequest();
-        req.setTblName(tbl.getTableName());
-        req.setDbName(tbl.getDbName());
-        req.setUserName(getUserName());
-        req.setMaxParts((short) -1);
-        req.setGroupNames(getGroupNames());
-        if (AcidUtils.isTransactionalTable(tbl)) {
-          ValidWriteIdList validWriteIdList = 
getValidWriteIdList(tbl.getDbName(), tbl.getTableName());
-          req.setValidWriteIdList(validWriteIdList != null ? 
validWriteIdList.toString() : null);
-        }
-        GetPartitionsPsWithAuthResponse res = 
getMSC().listPartitionsWithAuthInfoRequest(req);
-        tParts = res.getPartitions();
+    long t1 = System.nanoTime();
+    try {
+      if (tbl.isPartitioned()) {
+        List<org.apache.hadoop.hive.metastore.api.Partition> tParts;
+        try {
+          GetPartitionsPsWithAuthRequest req = new 
GetPartitionsPsWithAuthRequest();
+          req.setTblName(tbl.getTableName());
+          req.setDbName(tbl.getDbName());
+          req.setUserName(getUserName());
+          req.setMaxParts((short) -1);
+          req.setGroupNames(getGroupNames());
+          if (AcidUtils.isTransactionalTable(tbl)) {
+            ValidWriteIdList validWriteIdList = 
getValidWriteIdList(tbl.getDbName(), tbl.getTableName());
+            req.setValidWriteIdList(validWriteIdList != null ? 
validWriteIdList.toString() : null);
+          }
+          GetPartitionsPsWithAuthResponse res = 
getMSC().listPartitionsWithAuthInfoRequest(req);
+          tParts = res.getPartitions();
 
-      } catch (Exception e) {
-        LOG.error(StringUtils.stringifyException(e));
-        throw new HiveException(e);
-      }
-      List<Partition> parts = new ArrayList<>(tParts.size());
-      for (org.apache.hadoop.hive.metastore.api.Partition tpart : tParts) {
-        parts.add(new Partition(tbl, tpart));
+        } catch (Exception e) {
+          LOG.error(StringUtils.stringifyException(e));
+          throw new HiveException(e);
+        }
+        List<Partition> parts = new ArrayList<>(tParts.size());
+        for (org.apache.hadoop.hive.metastore.api.Partition tpart : tParts) {
+          parts.add(new Partition(tbl, tpart));
+        }
+        return parts;
+      } else {
+        return Collections.singletonList(new Partition(tbl));
       }
-      return parts;
-    } else {
-      return Collections.singletonList(new Partition(tbl));
+    } finally {

Review comment:
       Since we want to make logging of the time taken by this API conditional 
(based on if debug logging is turned on or not). I think it make sense to 
remove it from finally block. Basically use the same code we had and log the 
timing just before return statement. (This goes for all the APIs)

##########
File path: ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
##########
@@ -3733,26 +3752,32 @@ public boolean dropPartition(String dbName, String 
tableName, List<String> parti
   public List<Partition> getPartitions(Table tbl, Map<String, String> 
partialPartSpec,
       short limit)
   throws HiveException {
-    if (!tbl.isPartitioned()) {
-      throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, 
tbl.getTableName());
-    }
+    long t1 = System.nanoTime();
+    try {
+      if (!tbl.isPartitioned()) {
+        throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, 
tbl.getTableName());
+      }
 
-    List<String> partialPvals = MetaStoreUtils.getPvals(tbl.getPartCols(), 
partialPartSpec);
+      List<String> partialPvals = MetaStoreUtils.getPvals(tbl.getPartCols(), 
partialPartSpec);
 
-    List<org.apache.hadoop.hive.metastore.api.Partition> partitions = null;
-    try {
-      partitions = getMSC().listPartitionsWithAuthInfo(tbl.getDbName(), 
tbl.getTableName(),
-          partialPvals, limit, getUserName(), getGroupNames());
-    } catch (Exception e) {
-      throw new HiveException(e);
-    }
+      List<org.apache.hadoop.hive.metastore.api.Partition> partitions = null;
+      try {
+        partitions = getMSC().listPartitionsWithAuthInfo(tbl.getDbName(), 
tbl.getTableName(),
+            partialPvals, limit, getUserName(), getGroupNames());
+      } catch (Exception e) {
+        throw new HiveException(e);
+      }
 
-    List<Partition> qlPartitions = new ArrayList<Partition>();
-    for (org.apache.hadoop.hive.metastore.api.Partition p : partitions) {
-      qlPartitions.add( new Partition(tbl, p));
-    }
+      List<Partition> qlPartitions = new ArrayList<Partition>();
+      for (org.apache.hadoop.hive.metastore.api.Partition p : partitions) {
+        qlPartitions.add( new Partition(tbl, p));
+      }
 
-    return qlPartitions;
+      return qlPartitions;
+    } finally {
+      long diff = System.nanoTime() - t1;
+      LOG.debug(String.format(logString, "getPartitions(Table, Map, short)", 
diff, tbl.getDbName(), tbl.getTableName()));

Review comment:
       Same as above comment

##########
File path: ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java
##########
@@ -18,22 +18,14 @@
 
 package org.apache.hadoop.hive.metastore;
 
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Stack;
 
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.Order;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionSpec;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.*;

Review comment:
       Remove import *




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to