ngsg commented on code in PR #5771:
URL: https://github.com/apache/hive/pull/5771#discussion_r2160755638


##########
ql/src/java/org/apache/hadoop/hive/ql/metadata/client/SessionMetaStoreClientProxy.java:
##########
@@ -0,0 +1,3242 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.metadata.client;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.common.TableName;
+import org.apache.hadoop.hive.common.ValidTxnList;
+import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
+import org.apache.hadoop.hive.common.ValidWriteIdList;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.io.HdfsUtils;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.PartFilterExprUtil;
+import org.apache.hadoop.hive.metastore.PartitionDropOptions;
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.CreateTableRequest;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.DeleteColumnStatisticsRequest;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest;
+import org.apache.hadoop.hive.metastore.api.ForeignKeysResponse;
+import org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsRequest;
+import org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsResponse;
+import org.apache.hadoop.hive.metastore.api.GetPartitionRequest;
+import org.apache.hadoop.hive.metastore.api.GetPartitionResponse;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesResult;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthRequest;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthResponse;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsRequest;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsResponse;
+import org.apache.hadoop.hive.metastore.api.GetTableRequest;
+import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsRequest;
+import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+import org.apache.hadoop.hive.metastore.api.HiveObjectType;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest;
+import org.apache.hadoop.hive.metastore.api.NotNullConstraintsResponse;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionListComposingSpec;
+import org.apache.hadoop.hive.metastore.api.PartitionSpec;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesRequest;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesRow;
+import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest;
+import org.apache.hadoop.hive.metastore.api.PartitionsRequest;
+import org.apache.hadoop.hive.metastore.api.PartitionsResponse;
+import org.apache.hadoop.hive.metastore.api.PartitionsStatsRequest;
+import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest;
+import org.apache.hadoop.hive.metastore.api.PrimaryKeysResponse;
+import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
+import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.TableMeta;
+import org.apache.hadoop.hive.metastore.api.TableValidWriteIds;
+import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest;
+import org.apache.hadoop.hive.metastore.api.UniqueConstraintsResponse;
+import org.apache.hadoop.hive.metastore.client.HiveMetaStoreClientUtils;
+import org.apache.hadoop.hive.metastore.client.BaseMetaStoreClientProxy;
+import org.apache.hadoop.hive.metastore.client.ThriftHiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.TempTable;
+import 
org.apache.hadoop.hive.ql.metadata.client.MetaStoreClientCacheUtils.CacheKey;
+import 
org.apache.hadoop.hive.ql.metadata.client.MetaStoreClientCacheUtils.CacheI;
+import 
org.apache.hadoop.hive.ql.metadata.client.MetaStoreClientCacheUtils.KeyType;
+import 
org.apache.hadoop.hive.ql.metadata.client.MetaStoreClientCacheUtils.PartitionSpecsWrapper;
+import 
org.apache.hadoop.hive.ql.metadata.client.MetaStoreClientCacheUtils.PartitionsWrapper;
+import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.ql.stats.StatsUtils;
+import org.apache.hadoop.hive.shims.HadoopShims;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.thrift.TException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import static 
org.apache.hadoop.hive.metastore.Warehouse.getCatalogQualifiedTableName;
+import static org.apache.hadoop.hive.metastore.Warehouse.makePartName;
+import static org.apache.hadoop.hive.metastore.Warehouse.makeSpecFromName;
+import static org.apache.hadoop.hive.metastore.Warehouse.makeValsFromName;
+import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.compareFieldColumns;
+import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getColumnNamesForTable;
+import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getPvals;
+import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.isExternalTable;
+
+/**
+ * This class provides three features:
+ * 1. temporary table managements
+ * 2. query-level caching
+ * 3. communication to the transaction manager for the current session.
+ */
+public class SessionMetaStoreClientProxy extends BaseMetaStoreClientProxy
+    implements IMetaStoreClient {
+  private static final Logger LOG = 
LoggerFactory.getLogger(SessionMetaStoreClientProxy.class);
+
+  private final Configuration conf;
+  private volatile Warehouse wh = null;
+
+  public SessionMetaStoreClientProxy(Configuration conf, IMetaStoreClient 
delegate) {
+    super(delegate);
+    this.conf = conf;
+  }
+
+
+
+  private Warehouse getWh() throws MetaException {
+    if (wh == null) {
+      synchronized (this) {
+        if (wh == null) {
+          wh = new Warehouse(conf);
+        }
+      }
+    }
+
+    return wh;
+  }
+
+  /**
+   * Methods for supporting multiple features
+   */
+
+  @Override
+  public Table getTable(String dbname, String name) throws TException {
+    Table tempTable = getTempTable(dbname, name);
+    if (tempTable != null) {
+      // Original method used deepCopy(), do the same here.
+      return HiveMetaStoreClientUtils.deepCopy(tempTable);
+    }
+
+    GetTableRequest req = new GetTableRequest(dbname, name);
+    return getTableInternal(req);
+  }
+
+  @Override
+  public Table getTable(String dbname, String name, boolean getColumnStats, 
String engine) throws TException {
+    Table tempTable = getTempTable(dbname, name);
+    if (tempTable != null) {
+      // Original method used deepCopy(), do the same here.
+      return HiveMetaStoreClientUtils.deepCopy(tempTable);
+    }
+
+    GetTableRequest req = new GetTableRequest(dbname, name);
+    req.setGetColumnStats(getColumnStats);
+    if (getColumnStats) {
+      req.setEngine(engine);
+    }
+    return getTableInternal(req);
+  }
+
+  @Override
+  public Table getTable(String catName, String dbName, String tableName) 
throws TException {
+    if (getDefaultCatalog(conf).equals(catName)) {
+      Table tempTable = getTempTable(dbName, tableName);
+      if (tempTable != null) {
+        // Original method used deepCopy(), do the same here.
+        return HiveMetaStoreClientUtils.deepCopy(tempTable);
+      }
+    }
+
+    GetTableRequest req = new GetTableRequest(dbName, tableName);
+    req.setCatName(catName);
+    return getTableInternal(req);
+  }
+
+  @Override
+  public Table getTable(String catName, String dbName, String tableName, 
String validWriteIdList)
+      throws TException {
+    if (getDefaultCatalog(conf).equals(catName)) {
+      Table tempTable = getTempTable(dbName, tableName);
+      if (tempTable != null) {
+        // Original method used deepCopy(), do the same here.
+        return HiveMetaStoreClientUtils.deepCopy(tempTable);
+      }
+    }
+
+    GetTableRequest req = new GetTableRequest(dbName, tableName);
+    req.setCatName(catName);
+    req.setValidWriteIdList(validWriteIdList);
+    return getTableInternal(req);
+  }
+
+  @Override
+  public Table getTable(String catName, String dbName, String tableName, 
String validWriteIdList,
+      boolean getColumnStats, String engine) throws TException {
+    if (getDefaultCatalog(conf).equals(catName)) {
+      Table tempTable = getTempTable(dbName, tableName);
+      if (tempTable != null) {
+        // Original method used deepCopy(), do the same here.
+        return HiveMetaStoreClientUtils.deepCopy(tempTable);
+      }
+    }
+
+    GetTableRequest req = new GetTableRequest(dbName, tableName);
+    req.setCatName(catName);
+    req.setValidWriteIdList(validWriteIdList);
+    req.setGetColumnStats(getColumnStats);
+    if (getColumnStats) {
+      req.setEngine(engine);
+    }
+    return getTableInternal(req);
+  }
+
+  @Override
+  public Table getTable(GetTableRequest req) throws TException {
+    if (!req.isSetCatName() || 
getDefaultCatalog(conf).equals(req.getCatName())) {
+      Table tempTable = getTempTable(req.getDbName(), req.getTblName());
+      if (tempTable != null) {
+        // Original method used deepCopy(), do the same here.
+        return HiveMetaStoreClientUtils.deepCopy(tempTable);
+      }
+    }
+
+    return getTableInternal(req);
+  }
+
+  private Table getTableInternal(GetTableRequest req) throws TException {
+    // ThriftHiveMetaStoreClient sets processorCapabilities to GetTableRequest.
+    // Since we use GetTableRequest as a cache key, we update GetTableRequest 
in advance
+    // to make sure that the request inside the cache key and the actual 
request sent to HMS are identical.
+    String[] processorCapabilities = 
ThriftHiveMetaStoreClient.getProcessorCapabilities();
+    if (processorCapabilities != null) {
+      req.setProcessorCapabilities(Arrays.asList(processorCapabilities));
+    }
+    
req.setProcessorIdentifier(ThriftHiveMetaStoreClient.getProcessorIdentifier());
+
+
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      CacheKey cacheKeyTableId =
+          new CacheKey(KeyType.TABLE_ID, req.getCatName(), req.getDbName(), 
req.getTblName());
+      long tableId = -1;
+
+      if (queryCache.containsKey(cacheKeyTableId)) {
+        tableId = (long) queryCache.get(cacheKeyTableId);
+      }
+
+      req.setId(tableId);
+      CacheKey cacheKey = new CacheKey(KeyType.TABLE, req);
+      Table table = (Table) queryCache.get(cacheKey);
+      if (table == null) {
+        table = getDelegate().getTable(req);
+        if (tableId == -1) {
+          queryCache.put(cacheKeyTableId, table.getId());
+          req.setId(table.getId());
+          cacheKey = new CacheKey(KeyType.TABLE, req);
+        }
+        queryCache.put(cacheKey, table);
+      }
+      return table;
+    }
+    return getDelegate().getTable(req);
+  }
+
+  @Override
+  public boolean tableExists(String databaseName, String tableName) throws 
TException {
+    Table tempTable = getTempTable(databaseName, tableName);
+    if (tempTable != null) {
+      return true;
+    }
+
+    return getDelegate().tableExists(databaseName, tableName);
+  }
+
+  @Override
+  public boolean tableExists(String catName, String dbName, String tableName) 
throws TException {
+    if (getDefaultCatalog(conf).equals(catName)) {
+      Table tempTable = getTempTable(dbName, tableName);
+      if (tempTable != null) {
+        return true;
+      }
+    }
+
+    return getDelegate().tableExists(catName, dbName, tableName);
+  }
+
+  @Override
+  public List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, 
String tableName,
+      List<String> colNames, String engine) throws TException {
+    if (getTempTable(dbName, tableName) != null) {
+      return getTempTableColumnStats(dbName, tableName, colNames);
+    }
+
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      MapWrapper cache = new MapWrapper(queryCache);
+      // 1) Retrieve from the cache those ids present, gather the rest
+      Pair<List<ColumnStatisticsObj>, List<String>> p =
+          MetaStoreClientCacheUtils.getTableColumnStatisticsCache(cache, null, 
dbName, tableName,
+              colNames, engine, null, null);
+      List<String> colStatsMissing = p.getRight();
+      List<ColumnStatisticsObj> colStats = p.getLeft();
+      // 2) If they were all present in the cache, return
+      if (colStatsMissing.isEmpty()) {
+        return colStats;
+      }
+      // 3) If they were not, gather the remaining
+      List<ColumnStatisticsObj> newColStats =
+          getDelegate().getTableColumnStatistics(dbName, tableName, 
colStatsMissing, engine);
+      // 4) Populate the cache
+      MetaStoreClientCacheUtils.loadTableColumnStatisticsCache(cache, 
newColStats, null, dbName,
+          tableName, engine, null, null);
+      // 5) Sort result (in case there is any assumption) and return
+      List<ColumnStatisticsObj> result =
+          
MetaStoreClientCacheUtils.computeTableColumnStatisticsFinal(colNames, colStats, 
newColStats);
+      return result;
+    }
+
+    return getDelegate()
+        .getTableColumnStatistics(dbName, tableName, colNames, engine);
+  }
+
+  @Override
+  public List<ColumnStatisticsObj> getTableColumnStatistics(String catName, 
String dbName,
+      String tableName, List<String> colNames, String engine) throws 
TException {
+    if (getDefaultCatalog(conf).equals(catName) && getTempTable(dbName, 
tableName) != null) {
+      return getTempTableColumnStats(dbName, tableName, colNames);
+    }
+
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      MapWrapper cache = new MapWrapper(queryCache);
+      // 1) Retrieve from the cache those ids present, gather the rest
+      Pair<List<ColumnStatisticsObj>, List<String>> p =
+          MetaStoreClientCacheUtils.getTableColumnStatisticsCache(cache, 
catName, dbName, tableName,
+              colNames, engine, null, null);
+      List<String> colStatsMissing = p.getRight();
+      List<ColumnStatisticsObj> colStats = p.getLeft();
+      // 2) If they were all present in the cache, return
+      if (colStatsMissing.isEmpty()) {
+        return colStats;
+      }
+      // 3) If they were not, gather the remaining
+      List<ColumnStatisticsObj> newColStats =
+          getDelegate().getTableColumnStatistics(catName, dbName, tableName, 
colStatsMissing, engine);
+      // 4) Populate the cache
+      MetaStoreClientCacheUtils.loadTableColumnStatisticsCache(cache, 
newColStats, catName, dbName,
+          tableName, engine, null, null);
+      // 5) Sort result (in case there is any assumption) and return
+      List<ColumnStatisticsObj> result =
+          
MetaStoreClientCacheUtils.computeTableColumnStatisticsFinal(colNames, colStats, 
newColStats);
+      return result;
+    }
+
+    return getDelegate()
+        .getTableColumnStatistics(catName, dbName, tableName, colNames, 
engine);
+  }
+
+  @Override
+  public List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, 
String tableName,
+      List<String> colNames, String engine, String validWriteIdList) throws 
TException {
+    if (getTempTable(dbName, tableName) != null) {
+      return getTempTableColumnStats(dbName, tableName, colNames);
+    }
+
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      MapWrapper cache = new MapWrapper(queryCache);
+      // 1) Retrieve from the cache those ids present, gather the rest
+      Pair<List<ColumnStatisticsObj>, List<String>> p =
+          MetaStoreClientCacheUtils.getTableColumnStatisticsCache(cache, null, 
dbName, tableName, colNames,
+              engine, validWriteIdList, null);
+      List<String> colStatsMissing = p.getRight();
+      List<ColumnStatisticsObj> colStats = p.getLeft();
+      // 2) If they were all present in the cache, return
+      if (colStatsMissing.isEmpty()) {
+        return colStats;
+      }
+      // 3) If they were not, gather the remaining
+      List<ColumnStatisticsObj> newColStats = 
getDelegate().getTableColumnStatistics(dbName, tableName,
+          colStatsMissing, engine, validWriteIdList);
+      // 4) Populate the cache
+      MetaStoreClientCacheUtils.loadTableColumnStatisticsCache(cache, 
newColStats, null, dbName, tableName,
+          engine, validWriteIdList, null);
+      // 5) Sort result (in case there is any assumption) and return
+      List<ColumnStatisticsObj> result =
+          
MetaStoreClientCacheUtils.computeTableColumnStatisticsFinal(colNames, colStats, 
newColStats);
+      return result;
+    }
+
+    return getDelegate().getTableColumnStatistics(dbName, tableName, colNames, 
engine, validWriteIdList);
+  }
+
+  @Override
+  public List<ColumnStatisticsObj> getTableColumnStatistics(String catName, 
String dbName,
+      String tableName, List<String> colNames, String engine, String 
validWriteIdList) throws TException {
+    if (getDefaultCatalog(conf).equals(catName) && getTempTable(dbName, 
tableName) != null) {
+      return getTempTableColumnStats(dbName, tableName, colNames);
+    }
+
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      MapWrapper cache = new MapWrapper(queryCache);
+      // 1) Retrieve from the cache those ids present, gather the rest
+      Pair<List<ColumnStatisticsObj>, List<String>> p =
+          MetaStoreClientCacheUtils.getTableColumnStatisticsCache(cache, 
catName, dbName, tableName,
+              colNames, engine, validWriteIdList, null);
+      List<String> colStatsMissing = p.getRight();
+      List<ColumnStatisticsObj> colStats = p.getLeft();
+      // 2) If they were all present in the cache, return
+      if (colStatsMissing.isEmpty()) {
+        return colStats;
+      }
+      // 3) If they were not, gather the remaining
+      List<ColumnStatisticsObj> newColStats =
+          getDelegate().getTableColumnStatistics(catName, dbName, tableName, 
colStatsMissing,
+              engine, validWriteIdList);
+      // 4) Populate the cache
+      MetaStoreClientCacheUtils.loadTableColumnStatisticsCache(cache, 
newColStats, catName, dbName,
+          tableName, engine, validWriteIdList, null);
+      // 5) Sort result (in case there is any assumption) and return
+      List<ColumnStatisticsObj> result =
+          
MetaStoreClientCacheUtils.computeTableColumnStatisticsFinal(colNames, colStats, 
newColStats);
+      return result;
+    }
+
+    return getDelegate()
+        .getTableColumnStatistics(catName, dbName, tableName, colNames, 
engine, validWriteIdList);
+  }
+
+  @Override
+  public List<Partition> listPartitionsWithAuthInfo(String dbName, String 
tableName,
+      List<String> partialPvals, short maxParts, String userName, List<String> 
groupNames) throws TException {
+    Table tmpTable = getTempTable(dbName, tableName);
+    if (tmpTable != null) {
+      TempTable tt = getPartitionedTempTable(tmpTable);
+      List<Partition> parts =
+          tt.listPartitionsByPartitionValsWithAuthInfo(partialPvals, userName, 
groupNames);
+      return getPartitionsForMaxParts(parts, maxParts);
+    }
+
+    // TODO should we add capabilities here as well as it returns Partition 
objects
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      CacheKey cacheKey = new CacheKey(KeyType.LIST_PARTITIONS_AUTH_INFO,
+          null, dbName, tableName, partialPvals, maxParts, userName, 
groupNames);
+      List<Partition> v = (List<Partition>) queryCache.get(cacheKey);
+      if (v == null) {
+        v = getDelegate()
+            .listPartitionsWithAuthInfo(dbName, tableName, partialPvals, 
maxParts, userName, groupNames);
+        queryCache.put(cacheKey, v);
+      } else {
+        LOG.debug(
+            "Query level HMS cache: method=listPartitionsWithAuthInfoInternal, 
dbName={}, tblName={}, partVals={}",
+            dbName, tableName, partialPvals);
+      }
+      return v;
+    }
+    return getDelegate()
+        .listPartitionsWithAuthInfo(dbName, tableName, partialPvals, maxParts, 
userName, groupNames);
+  }
+
+  @Override
+  public List<Partition> listPartitionsWithAuthInfo(String catName, String 
dbName,
+      String tableName, List<String> partialPvals, int maxParts, String 
userName,
+      List<String> groupNames) throws TException {
+    if (getDefaultCatalog(conf).equals(catName)) {
+      Table tmpTable = getTempTable(dbName, tableName);
+      if (tmpTable != null) {
+        TempTable tt = getPartitionedTempTable(tmpTable);
+        List<Partition> parts =
+            tt.listPartitionsByPartitionValsWithAuthInfo(partialPvals, 
userName, groupNames);
+        return getPartitionsForMaxParts(parts, maxParts);
+      }
+    }
+
+    // TODO should we add capabilities here as well as it returns Partition 
objects
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      CacheKey cacheKey = new CacheKey(KeyType.LIST_PARTITIONS_AUTH_INFO,
+          catName, dbName, tableName, partialPvals, maxParts, userName, 
groupNames);
+      List<Partition> v = (List<Partition>) queryCache.get(cacheKey);
+      if (v == null) {
+        v = getDelegate().listPartitionsWithAuthInfo(catName, dbName, 
tableName, partialPvals, maxParts,
+            userName, groupNames);
+        queryCache.put(cacheKey, v);
+      } else {
+        LOG.debug(
+            "Query level HMS cache: method=listPartitionsWithAuthInfoInternal, 
dbName={}, tblName={}, partVals={}",
+            dbName, tableName, partialPvals);
+      }
+      return v;
+    }
+    return getDelegate().listPartitionsWithAuthInfo(catName, dbName, 
tableName, partialPvals, maxParts,
+        userName, groupNames);
+  }
+
+  @Override
+  public List<Partition> listPartitionsWithAuthInfo(String dbName, String 
tableName, short maxParts,
+    String userName, List<String> groupNames) throws TException {
+    Table tmpTable = getTempTable(dbName, tableName);
+    if (tmpTable != null) {
+      TempTable tt = getPartitionedTempTable(tmpTable);
+      List<Partition> parts = tt.listPartitionsWithAuthInfo(userName, 
groupNames);
+      return getPartitionsForMaxParts(parts, maxParts);
+    }
+
+    // TODO should we add capabilities here as well as it returns Partition 
objects
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      // Retrieve or populate cache
+      CacheKey cacheKey = new CacheKey(KeyType.LIST_PARTITIONS_AUTH_INFO_ALL,
+          null, dbName, tableName, maxParts, userName, groupNames);
+      List<Partition> v = (List<Partition>) queryCache.get(cacheKey);
+      if (v == null) {
+        v = getDelegate().listPartitionsWithAuthInfo(dbName, tableName, 
maxParts, userName, groupNames);
+        queryCache.put(cacheKey, v);
+      } else {
+        LOG.debug(
+            "Query level HMS cache: 
method=listPartitionsWithAuthInfoInternalAll, dbName={}, tblName={}",
+            dbName, tableName);
+      }
+      return v;
+    }
+    return getDelegate().listPartitionsWithAuthInfo(dbName, tableName, 
maxParts, userName, groupNames);
+  }
+
+  @Override
+  public List<Partition> listPartitionsWithAuthInfo(String catName, String 
dbName, String tableName,
+      int maxParts, String userName, List<String> groupNames)
+      throws TException {
+    if (getDefaultCatalog(conf).equals(catName)) {
+      Table tmpTable = getTempTable(dbName, tableName);
+      if (tmpTable != null) {
+        TempTable tt = getPartitionedTempTable(tmpTable);
+        List<Partition> parts = tt.listPartitionsWithAuthInfo(userName, 
groupNames);
+        return getPartitionsForMaxParts(parts, maxParts);
+      }
+    }
+
+    // TODO should we add capabilities here as well as it returns Partition 
objects
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      // Retrieve or populate cache
+      CacheKey cacheKey = new CacheKey(KeyType.LIST_PARTITIONS_AUTH_INFO_ALL,
+          catName, dbName, tableName, maxParts, userName, groupNames);
+      List<Partition> v = (List<Partition>) queryCache.get(cacheKey);
+      if (v == null) {
+        v = getDelegate().listPartitionsWithAuthInfo(catName, dbName, 
tableName, maxParts, userName, groupNames);
+        queryCache.put(cacheKey, v);
+      } else {
+        LOG.debug(
+            "Query level HMS cache: 
method=listPartitionsWithAuthInfoInternalAll, dbName={}, tblName={}",
+            dbName, tableName);
+      }
+      return v;
+    }
+    return getDelegate().listPartitionsWithAuthInfo(catName, dbName, 
tableName, maxParts, userName, groupNames);
+  }
+
+  @Override
+  public GetPartitionsPsWithAuthResponse listPartitionsWithAuthInfoRequest(
+      GetPartitionsPsWithAuthRequest req) throws TException {
+    if (!req.isSetCatName() || 
getDefaultCatalog(conf).equals(req.getCatName())) {
+      Table tmpTable = getTempTable(req.getDbName(), req.getTblName());
+      if (tmpTable != null) {
+        TempTable tt = getPartitionedTempTable(tmpTable);
+        List<Partition> partitions = 
tt.listPartitionsWithAuthInfo(req.getUserName(), req.getGroupNames());
+        GetPartitionsPsWithAuthResponse response = new 
GetPartitionsPsWithAuthResponse();
+        response.setPartitions(getPartitionsForMaxParts(partitions, 
req.getMaxParts()));
+        return response;
+      }
+    }
+
+    if (req.getValidWriteIdList() == null) {
+      req.setValidWriteIdList(getValidWriteIdList(req.getDbName(), 
req.getTblName()));
+    }
+
+    
req.setMaxParts(HiveMetaStoreClientUtils.shrinkMaxtoShort(req.getMaxParts()));
+
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      CacheKey cacheKey = new CacheKey(KeyType.LIST_PARTITIONS_AUTH_INFO_REQ, 
req);
+      GetPartitionsPsWithAuthResponse v = (GetPartitionsPsWithAuthResponse) 
queryCache.get(cacheKey);
+      if (v == null) {
+        v = getDelegate().listPartitionsWithAuthInfoRequest(req);
+        queryCache.put(cacheKey, v);
+      } else {
+        LOG.debug(
+            "Query level HMS cache: 
method=listPartitionsWithAuthInfoRequestInternal, dbName={}, tblName={}, 
partVals={}",
+            req.getDbName(), req.getTblName(), req.getPartVals());
+      }
+      return v;
+    }
+    return getDelegate().listPartitionsWithAuthInfoRequest(req);
+  }
+
+  @Override
+  public List<String> listPartitionNames(String dbName, String tableName, 
short max) throws TException {
+    Table tmpTable = getTempTable(dbName, tableName);
+    if (tmpTable != null) {
+      TempTable tt = getPartitionedTempTable(tmpTable);
+      List<Partition> partitions = tt.listPartitions();
+      List<String> result = new ArrayList<>();
+      int lastIndex = (max < 0 || max > partitions.size()) ? partitions.size() 
: max;
+      for (int i = 0; i < lastIndex; i++) {
+        result.add(makePartName(tmpTable.getPartitionKeys(), 
partitions.get(i).getValues()));
+      }
+      Collections.sort(result);
+      return result;
+    }
+
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      // Retrieve or populate cache
+      CacheKey cacheKey = new CacheKey(KeyType.LIST_PARTITIONS_ALL, null, 
dbName, tableName, max);
+      List<String> v = (List<String>) queryCache.get(cacheKey);
+      if (v == null) {
+        v = getDelegate().listPartitionNames(dbName, tableName, max);
+        queryCache.put(cacheKey, v);
+      } else {
+        LOG.debug(
+            "Query level HMS cache: method=listPartitionNamesInternalAll, 
dbName={}, tableName={}",
+            dbName, tableName);
+      }
+      return v;
+    }
+    return getDelegate().listPartitionNames(dbName, tableName, max);
+  }
+
+  @Override
+  public List<String> listPartitionNames(String catName, String dbName, String 
tableName, int maxParts)
+      throws TException {
+    if (getDefaultCatalog(conf).equals(catName)) {
+      Table tmpTable = getTempTable(dbName, tableName);
+      if (tmpTable != null) {
+        TempTable tt = getPartitionedTempTable(tmpTable);
+        List<Partition> partitions = tt.listPartitions();
+        List<String> result = new ArrayList<>();
+        int lastIndex = (maxParts < 0 || maxParts > partitions.size()) ? 
partitions.size() : maxParts;
+        for (int i = 0; i < lastIndex; i++) {
+          result.add(makePartName(tmpTable.getPartitionKeys(), 
partitions.get(i).getValues()));
+        }
+        Collections.sort(result);
+        return result;
+      }
+    }
+
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      // Retrieve or populate cache
+      CacheKey cacheKey = new CacheKey(KeyType.LIST_PARTITIONS_ALL, catName, 
dbName, tableName, maxParts);
+      List<String> v = (List<String>) queryCache.get(cacheKey);
+      if (v == null) {
+        v = getDelegate().listPartitionNames(catName, dbName, tableName, 
maxParts);
+        queryCache.put(cacheKey, v);
+      } else {
+        LOG.debug(
+            "Query level HMS cache: method=listPartitionNamesInternalAll, 
dbName={}, tableName={}",
+            dbName, tableName);
+      }
+      return v;
+    }
+    return getDelegate().listPartitionNames(catName, dbName, tableName, 
maxParts);
+  }
+
+  @Override
+  public List<String> listPartitionNames(String dbName, String tableName, 
List<String> partVals,
+      short maxParts) throws TException {
+    Table tmpTable = getTempTable(dbName, tableName);
+    if (tmpTable != null) {
+      TempTable tt = getPartitionedTempTable(tmpTable);
+      List<Partition> partitions = tt.getPartitionsByPartitionVals(partVals);
+      List<String> result = new ArrayList<>();
+      int lastIndex = (maxParts < 0 || maxParts > partitions.size()) ? 
partitions.size() : maxParts;
+      for (int i = 0; i < lastIndex; i++) {
+        result.add(makePartName(tmpTable.getPartitionKeys(), 
partitions.get(i).getValues()));
+      }
+      Collections.sort(result);
+      return result;
+    }
+
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      // Retrieve or populate cache
+      CacheKey cacheKey =
+          new CacheKey(KeyType.LIST_PARTITIONS, null, dbName, tableName, 
partVals, maxParts);
+      List<String> v = (List<String>) queryCache.get(cacheKey);
+      if (v == null) {
+        v = getDelegate().listPartitionNames(dbName, tableName, partVals, 
maxParts);
+        queryCache.put(cacheKey, v);
+      } else {
+        LOG.debug(
+            "Query level HMS cache: method=listPartitionNamesInternal, 
dbName={}, tblName={}",
+            dbName, tableName);
+      }
+      return v;
+    }
+    return getDelegate().listPartitionNames(dbName, tableName, partVals, 
maxParts);
+  }
+
+  @Override
+  public List<String> listPartitionNames(String catName, String dbName, String 
tableName,
+      List<String> partVals, int maxParts) throws TException {
+    if (getDefaultCatalog(conf).equals(catName)) {
+      Table tmpTable = getTempTable(dbName, tableName);
+      if (tmpTable != null) {
+        TempTable tt = getPartitionedTempTable(tmpTable);
+        List<Partition> partitions = tt.getPartitionsByPartitionVals(partVals);
+        List<String> result = new ArrayList<>();
+        int lastIndex = (maxParts < 0 || maxParts > partitions.size()) ? 
partitions.size() : maxParts;
+        for (int i = 0; i < lastIndex; i++) {
+          result.add(makePartName(tmpTable.getPartitionKeys(), 
partitions.get(i).getValues()));
+        }
+        Collections.sort(result);
+        return result;
+      }
+    }
+
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      // Retrieve or populate cache
+      CacheKey cacheKey =
+          new CacheKey(KeyType.LIST_PARTITIONS, catName, dbName, tableName, 
partVals, maxParts);
+      List<String> v = (List<String>) queryCache.get(cacheKey);
+      if (v == null) {
+        v = getDelegate().listPartitionNames(catName, dbName, tableName, 
partVals, maxParts);
+        queryCache.put(cacheKey, v);
+      } else {
+        LOG.debug(
+            "Query level HMS cache: method=listPartitionNamesInternal, 
dbName={}, tableName={}",
+            dbName, tableName);
+      }
+      return v;
+    }
+    return getDelegate().listPartitionNames(catName, dbName, tableName, 
partVals, maxParts);
+  }
+
+  @Override
+  public GetPartitionNamesPsResponse 
listPartitionNamesRequest(GetPartitionNamesPsRequest req)
+      throws TException {
+    if (!req.isSetCatName() || 
getDefaultCatalog(conf).equals(req.getCatName())) {
+      Table tmpTable = getTempTable(req.getDbName(), req.getTblName());
+      if (tmpTable != null) {
+        TempTable tt = getPartitionedTempTable(tmpTable);
+        List<Partition> partitions = 
tt.getPartitionsByPartitionVals(req.getPartValues());
+        short maxParts = req.getMaxParts();
+        int lastIndex = (maxParts < 0 || maxParts > partitions.size()) ? 
partitions.size() : maxParts;
+
+        List<String> result = new ArrayList<>();
+        for (int i = 0; i < lastIndex; i++) {
+          result.add(makePartName(tmpTable.getPartitionKeys(), 
partitions.get(i).getValues()));
+        }
+        Collections.sort(result);
+        GetPartitionNamesPsResponse response = new 
GetPartitionNamesPsResponse();
+        response.setNames(result);
+        return response;
+      }
+    }
+
+    if (req.getValidWriteIdList() == null) {
+      req.setValidWriteIdList(getValidWriteIdList(req.getDbName(), 
req.getTblName()));
+    }
+
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      // Retrieve or populate cache
+      CacheKey cacheKey = new CacheKey(KeyType.LIST_PARTITIONS_REQ, req);
+      GetPartitionNamesPsResponse v = (GetPartitionNamesPsResponse) 
queryCache.get(cacheKey);
+      if (v == null) {
+        v = getDelegate().listPartitionNamesRequest(req);
+        queryCache.put(cacheKey, v);
+      } else {
+        LOG.debug(
+            "Query level HMS cache: method=listPartitionNamesRequestInternal, 
dbName={}, tblName={}, partValues={}",
+            req.getDbName(), req.getTblName(), req.getPartValues());
+      }
+      return v;
+    }
+    return getDelegate().listPartitionNamesRequest(req);
+  }
+
+  @Override
+  public boolean listPartitionsByExpr(String catName, String dbName, String 
tableName, byte[] expr,
+      String defaultPartitionName, int maxParts, List<Partition> result) 
throws TException {
+    if (getDefaultCatalog(conf).equals(catName)) {
+      Table tmpTable = getTempTable(dbName, tableName);
+      if (tmpTable != null) {
+        
result.addAll(getPartitionsForMaxParts(getPartitionedTempTable(tmpTable).listPartitionsByFilter(
+            generateJDOFilter(tmpTable, expr, defaultPartitionName)), 
maxParts));
+        return result.isEmpty();
+      }
+    }
+
+    PartitionsByExprRequest req = new PartitionsByExprRequest(dbName, 
tableName, ByteBuffer.wrap(expr));
+    req.setCatName(catName);
+    if (defaultPartitionName != null) {
+      req.setDefaultPartitionName(defaultPartitionName);
+    }
+    if (maxParts >= 0) {
+      req.setMaxParts(HiveMetaStoreClientUtils.shrinkMaxtoShort(maxParts));
+    }
+    req.setValidWriteIdList(getValidWriteIdList(dbName, tableName));
+
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      CacheKey cacheKey = new CacheKey(KeyType.PARTITIONS_BY_EXPR, req);
+      PartitionsWrapper v = (PartitionsWrapper) queryCache.get(cacheKey);
+      if (v == null) {
+        List<Partition> parts = new ArrayList<>();
+        // It would be better if we call the listPartitionsByExpr(catName, 
dbName, ...) here.
+        // However, since we set validWriteIdList, which cannot be propagated 
via the original method,
+        // we use listPartitionsByExpr(req) to properly pass it to thrift 
layer.
+        // The same logic applies on non-cache path.
+        boolean hasUnknownPart = getDelegate().listPartitionsByExpr(req, 
parts);
+        v = new MetaStoreClientCacheUtils.PartitionsWrapper(parts, 
hasUnknownPart);
+        queryCache.put(cacheKey, v);
+      } else {
+        LOG.debug(
+            "Query level HMS cache: method=getPartitionsByExprInternal, 
dbName={}, tblName={}",
+            req.getDbName(), req.getTblName());
+      }
+
+      result.addAll(v.partitions);
+      return v.hasUnknownPartition;
+    }
+    return getDelegate().listPartitionsByExpr(req, result);
+  }
+
+
+  @Override
+  public boolean listPartitionsByExpr(String dbName, String tableName, byte[] 
expr,
+      String defaultPartitionName, short maxParts, List<Partition> result) 
throws TException {
+    Table tmpTable = getTempTable(dbName, tableName);
+    if (tmpTable != null) {
+      
result.addAll(getPartitionsForMaxParts(getPartitionedTempTable(tmpTable).listPartitionsByFilter(
+          generateJDOFilter(tmpTable, expr, defaultPartitionName)), maxParts));
+      return result.isEmpty();
+    }
+
+    PartitionsByExprRequest req = new PartitionsByExprRequest(dbName, 
tableName, ByteBuffer.wrap(expr));
+    if (defaultPartitionName != null) {
+      req.setDefaultPartitionName(defaultPartitionName);
+    }
+    if (maxParts >= 0) {
+      req.setMaxParts(HiveMetaStoreClientUtils.shrinkMaxtoShort(maxParts));
+    }
+    req.setValidWriteIdList(getValidWriteIdList(dbName, tableName));
+
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      CacheKey cacheKey = new CacheKey(KeyType.PARTITIONS_BY_EXPR, req);
+      PartitionsWrapper v = (PartitionsWrapper) queryCache.get(cacheKey);
+      if (v == null) {
+        List<Partition> parts = new ArrayList<>();
+        // It would be better if we call the listPartitionsByExpr(catName, 
dbName, ...) here.
+        // However, since we set validWriteIdList, which cannot be propagated 
via the original method,
+        // we use listPartitionsByExpr(req) to properly pass it to thrift 
layer.
+        // The same logic applies on non-cache path.
+        boolean hasUnknownPart = getDelegate().listPartitionsByExpr(req, 
parts);
+        v = new MetaStoreClientCacheUtils.PartitionsWrapper(parts, 
hasUnknownPart);
+        queryCache.put(cacheKey, v);
+      } else {
+        LOG.debug(
+            "Query level HMS cache: method=getPartitionsByExprInternal, 
dbName={}, tblName={}",
+            req.getDbName(), req.getTblName());
+      }
+
+      result.addAll(v.partitions);
+      return v.hasUnknownPartition;
+    }
+    return getDelegate().listPartitionsByExpr(req, result);
+  }
+
+  @Override
+  public boolean listPartitionsByExpr(PartitionsByExprRequest req, 
List<Partition> result) throws TException {
+    if (!req.isSetCatName() || 
getDefaultCatalog(conf).equals(req.getCatName())) {
+      Table tmpTable = getTempTable(req.getDbName(), req.getTblName());
+      if (tmpTable != null) {
+        
result.addAll(getPartitionsForMaxParts(getPartitionedTempTable(tmpTable).listPartitionsByFilter(
+            generateJDOFilter(tmpTable, req.getExpr(), 
req.getDefaultPartitionName())), req.getMaxParts()));
+        return result.isEmpty();
+      }
+    }
+
+    req.setValidWriteIdList(getValidWriteIdList(req.getDbName(), 
req.getTblName()));
+
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      CacheKey cacheKey = new CacheKey(KeyType.PARTITIONS_BY_EXPR, req);
+      PartitionsWrapper v = (PartitionsWrapper) queryCache.get(cacheKey);
+      if (v == null) {
+        List<Partition> parts = new ArrayList<>();
+        boolean hasUnknownPart = getDelegate().listPartitionsByExpr(req, 
parts);
+        v = new MetaStoreClientCacheUtils.PartitionsWrapper(parts, 
hasUnknownPart);
+        queryCache.put(cacheKey, v);
+      } else {
+        LOG.debug(
+            "Query level HMS cache: method=getPartitionsByExprInternal, 
dbName={}, tblName={}",
+            req.getDbName(), req.getTblName());
+      }
+
+      result.addAll(v.partitions);
+      return v.hasUnknownPartition;
+    }
+    return getDelegate().listPartitionsByExpr(req, result);
+  }
+
+  @Override
+  public boolean listPartitionsSpecByExpr(PartitionsByExprRequest req, 
List<PartitionSpec> result)
+      throws TException {
+    if (!req.isSetCatName() || 
getDefaultCatalog(conf).equals(req.getCatName())) {
+      Table tmpTable = getTempTable(req.getDbName(), req.getTblName());
+      if (tmpTable != null) {
+        result.addAll(
+            
MetaStoreServerUtils.getPartitionspecsGroupedByStorageDescriptor(tmpTable,
+                
getPartitionsForMaxParts(getPartitionedTempTable(tmpTable).listPartitionsByFilter(
+                    generateJDOFilter(tmpTable, req.getExpr(), 
req.getDefaultPartitionName())),
+                    req.getMaxParts())));
+        return result.isEmpty();
+      }
+    }
+
+    // TODO: Should we set ValidWriteIdList just like listPartitionsByExpr()?
+
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      // Retrieve or populate cache
+      CacheKey cacheKey = new CacheKey(KeyType.PARTITIONS_SPEC_BY_EXPR, req);
+      PartitionSpecsWrapper v = (PartitionSpecsWrapper) 
queryCache.get(cacheKey);
+      if (v == null) {
+        List<PartitionSpec> parts = new ArrayList<>();
+        boolean hasUnknownPart = getDelegate().listPartitionsSpecByExpr(req, 
parts);
+        v = new PartitionSpecsWrapper(parts, hasUnknownPart);
+        queryCache.put(cacheKey, v);
+      } else {
+        LOG.debug(
+            "Query level HMS cache: method=getPartitionsSpecByExprInternal, 
dbName={}, tblName={}",
+            req.getDbName(), req.getTblName());
+      }
+      result.addAll(v.partitionSpecs);
+      return v.hasUnknownPartition;
+    }
+    return getDelegate().listPartitionsSpecByExpr(req, result);
+  }
+
+  @Override
+  public List<Partition> getPartitionsByNames(String dbName, String tblName, 
List<String> partNames)
+      throws TException {
+    Table tmpTable = getTempTable(dbName, tblName);
+    if (tmpTable != null) {
+      return HiveMetaStoreClientUtils.deepCopyPartitions(
+          getPartitionedTempTable(tmpTable).getPartitionsByNames(partNames));
+    }
+
+    GetPartitionsByNamesRequest req =
+        MetaStoreUtils.convertToGetPartitionsByNamesRequest(dbName, tblName, 
partNames);
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      MapWrapper cache = new MapWrapper(queryCache);
+      // 1) Retrieve from the cache those ids present, gather the rest
+      Pair<List<Partition>, List<String>> p =
+          MetaStoreClientCacheUtils.getPartitionsByNamesCache(cache, req, 
null);
+      List<String> partitionsMissing = p.getRight();
+      List<Partition> partitions = p.getLeft();
+      // 2) If they were all present in the cache, return
+      if (partitionsMissing.isEmpty()) {
+        return partitions;
+      }
+      // 3) If they were not, gather the remaining
+      GetPartitionsByNamesRequest newRqst = new 
GetPartitionsByNamesRequest(req);
+      newRqst.setNames(partitionsMissing);
+      List<Partition> newPartitions = 
getDelegate().getPartitionsByNames(dbName, tblName, partNames);
+      // 4) Populate the cache
+      GetPartitionsByNamesResult r = new 
GetPartitionsByNamesResult(newPartitions);
+      MetaStoreClientCacheUtils.loadPartitionsByNamesCache(cache, r, req, 
null);
+      // 5) Sort result (in case there is any assumption) and return
+      return MetaStoreClientCacheUtils.computePartitionsByNamesFinal(req, 
partitions, newPartitions)
+          .getPartitions();
+    }
+    return getDelegate().getPartitionsByNames(dbName, tblName, partNames);
+  }
+
+  @Override
+  public List<Partition> getPartitionsByNames(String catName, String dbName, 
String tblName,
+      List<String> partNames) throws TException {
+    if (getDefaultCatalog(conf).equals(catName)) {
+      Table tmpTable = getTempTable(dbName, tblName);
+      if (tmpTable != null) {
+        return HiveMetaStoreClientUtils.deepCopyPartitions(
+            getPartitionedTempTable(tmpTable).getPartitionsByNames(partNames));
+      }
+    }
+
+    String catDbName = MetaStoreUtils.prependCatalogToDbName(catName, dbName, 
conf);
+    GetPartitionsByNamesRequest req =
+        MetaStoreUtils.convertToGetPartitionsByNamesRequest(catDbName, 
tblName, partNames);
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      MapWrapper cache = new MapWrapper(queryCache);
+      // 1) Retrieve from the cache those ids present, gather the rest
+      Pair<List<Partition>, List<String>> p =
+          MetaStoreClientCacheUtils.getPartitionsByNamesCache(cache, req, 
null);
+      List<String> partitionsMissing = p.getRight();
+      List<Partition> partitions = p.getLeft();
+      // 2) If they were all present in the cache, return
+      if (partitionsMissing.isEmpty()) {
+        return partitions;
+      }
+      // 3) If they were not, gather the remaining
+      GetPartitionsByNamesRequest newRqst = new 
GetPartitionsByNamesRequest(req);
+      newRqst.setNames(partitionsMissing);
+      List<Partition> newPartitions =
+          getDelegate().getPartitionsByNames(catName, dbName, tblName, 
partNames);
+      // 4) Populate the cache
+      GetPartitionsByNamesResult r = new 
GetPartitionsByNamesResult(newPartitions);
+      MetaStoreClientCacheUtils.loadPartitionsByNamesCache(cache, r, req, 
null);
+      // 5) Sort result (in case there is any assumption) and return
+      return MetaStoreClientCacheUtils.computePartitionsByNamesFinal(req, 
partitions, newPartitions)
+          .getPartitions();
+    }
+    return getDelegate().getPartitionsByNames(catName, dbName, tblName, 
partNames);
+  }
+
+  @Override
+  public GetPartitionsByNamesResult 
getPartitionsByNames(GetPartitionsByNamesRequest req)
+      throws TException {
+    String[] parsedNames = MetaStoreUtils.parseDbName(req.getDb_name(), conf);
+    if (getDefaultCatalog(conf).equals(parsedNames[0])) {
+      Table tmpTable = getTempTable(req.getDb_name(), req.getTbl_name());
+      if (tmpTable != null) {
+        TempTable tt = getPartitionedTempTable(tmpTable);
+        GetPartitionsByNamesResult result = new GetPartitionsByNamesResult();
+        result.setPartitions(
+            
HiveMetaStoreClientUtils.deepCopyPartitions(tt.getPartitionsByNames(req.getNames())));
+
+        return result;
+      }
+    }
+
+    Map<Object, Object> queryCache = getQueryCache();
+    if (queryCache != null) {
+      MapWrapper cache = new MapWrapper(queryCache);
+      // 1) Retrieve from the cache those ids present, gather the rest
+      Pair<List<Partition>, List<String>> p =
+          MetaStoreClientCacheUtils.getPartitionsByNamesCache(cache, req, 
null);
+      List<String> partitionsMissing = p.getRight();
+      List<Partition> partitions = p.getLeft();
+      // 2) If they were all present in the cache, return
+      if (partitionsMissing.isEmpty()) {
+        return new GetPartitionsByNamesResult(partitions);
+      }
+      // 3) If they were not, gather the remaining
+      GetPartitionsByNamesRequest newRqst = new 
GetPartitionsByNamesRequest(req);
+      newRqst.setNames(partitionsMissing);
+      GetPartitionsByNamesResult r = getDelegate().getPartitionsByNames(req);
+      // 4) Populate the cache
+      List<Partition> newPartitions =
+          MetaStoreClientCacheUtils.loadPartitionsByNamesCache(cache, r, req, 
null);
+      // 5) Sort result (in case there is any assumption) and return
+      return MetaStoreClientCacheUtils.computePartitionsByNamesFinal(req, 
partitions, newPartitions);
+    }
+    return getDelegate().getPartitionsByNames(req);
+  }
+
+  /**
+   * Methods for temporary table managements
+   */
+
+  @Override
+  public void createTable(Table tbl) throws TException {
+    if (tbl.isTemporary()) {
+      if (!tbl.isSetCatName()) {
+        // TODO: There should be a single
+        tbl.setCatName(getDefaultCatalog(conf));
+      }
+      createTempTable(tbl);
+    } else {
+      getDelegate().createTable(tbl);
+    }
+  }
+
+  @Override
+  public void createTable(CreateTableRequest request) throws TException {
+    Table tbl = request.getTable();
+    if (tbl.isTemporary()) {
+      if (!tbl.isSetCatName()) {
+        tbl.setCatName(getDefaultCatalog(conf));
+      }
+      createTempTable(tbl);
+    } else {
+      getDelegate().createTable(request);
+    }
+  }
+
+  @Override
+  public void dropTable(Table tbl, boolean deleteData,
+      boolean ignoreUnknownTbl, boolean ifPurge) throws TException {
+    if (tbl.isTemporary()) {
+      EnvironmentContext envContext = new EnvironmentContext();
+      dropTempTable(tbl, deleteData, ifPurge);
+    } else {
+      getDelegate().dropTable(tbl, deleteData, ignoreUnknownTbl, ifPurge);
+    }
+  }
+
+  @Override
+  public void dropTable(String dbname, String name, boolean deleteData, 
boolean ignoreUnknownTab)
+      throws TException, UnsupportedOperationException {
+    Table table = getTempTable(dbname, name);
+    if (table != null) {
+      dropTempTable(table, deleteData, false);
+    } else {
+      getDelegate().dropTable(dbname, name, deleteData, ignoreUnknownTab, 
false);
+    }
+  }
+
+  @Override
+  public void dropTable(String dbname, String name, boolean deleteData, 
boolean ignoreUnknownTab,
+      boolean ifPurge) throws TException {
+    Table table = getTempTable(dbname, name);
+    if (table != null) {
+      dropTempTable(table, deleteData, ifPurge);
+    } else {
+      getDelegate().dropTable(dbname, name, deleteData, ignoreUnknownTab, 
ifPurge);
+    }
+  }
+
+  @Override
+  public void dropTable(String dbname, String name) throws TException {
+    Table table = getTempTable(dbname, name);
+    if (table != null) {
+      dropTempTable(table, true, false);
+    } else {
+      getDelegate().dropTable(dbname, name);
+    }
+  }
+
+  @Override
+  public void dropTable(String catName, String dbName, String tableName, 
boolean deleteData,
+      boolean ignoreUnknownTable, boolean ifPurge) throws TException {
+    // First try temp table
+    // TODO CAT - I think the right thing here is to always put temp tables in 
the current
+    // catalog.  But we don't yet have a notion of current catalog, so we'll 
have to hold on
+    // until we do.
+    Table table = getTempTable(dbName, tableName);
+    if (table != null) {
+      dropTempTable(table, deleteData, ifPurge);
+    } else {
+      getDelegate().dropTable(catName, dbName, tableName, deleteData, 
ignoreUnknownTable, ifPurge);
+    }
+  }
+
+  @Override
+  public void truncateTable(String dbName, String tableName, List<String> 
partNames) throws TException {
+    // First try temp table
+    Table table = getTempTable(dbName, tableName);
+    if (table != null) {
+      truncateTempTable(table);
+      return;
+    }
+    // Try underlying client
+    getDelegate().truncateTable(dbName, tableName, partNames);
+  }
+
+  @Override
+  public void truncateTable(TableName tableName, List<String> partNames) 
throws TException {
+    // First try temp table
+    Table table = getTempTable(tableName.getDb(), tableName.getTable());
+    if (table != null) {
+      truncateTempTable(table);
+      return;
+    }
+    // Try underlying client
+    getDelegate().truncateTable(tableName, partNames);
+  }
+
+  @Override
+  public void truncateTable(String dbName, String tableName,
+      List<String> partNames, String validWriteIds, long writeId)
+      throws TException {
+    Table table = getTempTable(dbName, tableName);
+    if (table != null) {
+      truncateTempTable(table);
+      return;
+    }
+    getDelegate().truncateTable(dbName, tableName, partNames, validWriteIds, 
writeId);
+  }
+
+  @Override
+  public void truncateTable(String dbName, String tableName,
+      List<String> partNames, String validWriteIds, long writeId, boolean 
deleteData)
+      throws TException {
+    Table table = getTempTable(dbName, tableName);
+    if (table != null) {
+      truncateTempTable(table);
+      return;
+    }
+    getDelegate().truncateTable(dbName, tableName, partNames, validWriteIds, 
writeId, deleteData);
+  }
+
+
+  @Override
+  public List<String> getAllTables(String dbName) throws TException {
+    List<String> tableNames = getDelegate().getAllTables(dbName);
+
+    // May need to merge with list of temp tables
+    Map<String, org.apache.hadoop.hive.ql.metadata.Table> tables = 
getTempTablesForDatabase(dbName, "?");
+    if (tables == null || tables.size() == 0) {
+      return tableNames;
+    }
+
+    // Get list of temp table names
+    Set<String> tempTableNames = tables.keySet();
+
+    // Merge and sort result
+    Set<String> allTableNames = new HashSet<String>(tableNames.size() + 
tempTableNames.size());
+    allTableNames.addAll(tableNames);
+    allTableNames.addAll(tempTableNames);
+    tableNames = new ArrayList<String>(allTableNames);
+    Collections.sort(tableNames);
+    return tableNames;
+  }
+
+  @Override
+  public List<String> getTables(String dbName, String tablePattern) throws 
TException {
+    List<String> tableNames = getDelegate().getTables(dbName, tablePattern);
+
+    // May need to merge with list of temp tables
+    dbName = dbName.toLowerCase();
+    tablePattern = tablePattern.toLowerCase();
+    Map<String, org.apache.hadoop.hive.ql.metadata.Table> tables = 
getTempTablesForDatabase(dbName, tablePattern);
+    if (tables == null || tables.size() == 0) {
+      return tableNames;
+    }
+    tablePattern = tablePattern.replaceAll("(?<!\\.)\\*", ".*");
+    Pattern pattern = Pattern.compile(tablePattern);
+    Matcher matcher = pattern.matcher("");
+    Set<String> combinedTableNames = new HashSet<String>();
+    for (String tableName : tables.keySet()) {
+      matcher.reset(tableName);
+      if (matcher.matches()) {
+        combinedTableNames.add(tableName);
+      }
+    }
+
+    // Combine/sort temp and normal table results
+    combinedTableNames.addAll(tableNames);
+    tableNames = new ArrayList<String>(combinedTableNames);
+    Collections.sort(tableNames);
+    return tableNames;
+  }
+
+  @Override
+  public List<String> getTables(String dbname, String tablePattern, TableType 
tableType) throws TException {
+    List<String> tableNames = getDelegate().getTables(dbname, tablePattern, 
tableType);
+
+    if (tableType == TableType.MANAGED_TABLE || tableType == 
TableType.EXTERNAL_TABLE) {
+      // May need to merge with list of temp tables
+      dbname = dbname.toLowerCase();
+      tablePattern = tablePattern.toLowerCase();
+      Map<String, org.apache.hadoop.hive.ql.metadata.Table> tables = 
getTempTablesForDatabase(dbname, tablePattern);
+      if (tables == null || tables.size() == 0) {
+        return tableNames;
+      }
+      tablePattern = tablePattern.replaceAll("(?<!\\.)\\*", ".*");
+      Pattern pattern = Pattern.compile(tablePattern);
+      Matcher matcher = pattern.matcher("");
+      Set<String> combinedTableNames = new HashSet<String>();
+      combinedTableNames.addAll(tableNames);
+      for (Map.Entry<String, org.apache.hadoop.hive.ql.metadata.Table> 
tableData : tables.entrySet()) {
+        matcher.reset(tableData.getKey());
+        if (matcher.matches()) {
+          if (tableData.getValue().getTableType() == tableType) {
+            // If tableType is the same that we are requesting,
+            // add table the the list
+            combinedTableNames.add(tableData.getKey());
+          } else {
+            // If tableType is not the same that we are requesting,
+            // remove it in case it was added before, as temp table
+            // overrides original table
+            combinedTableNames.remove(tableData.getKey());
+          }
+        }
+      }
+      // Combine/sort temp and normal table results
+      tableNames = new ArrayList<>(combinedTableNames);
+      Collections.sort(tableNames);
+    }
+
+    return tableNames;
+  }
+
+  @Override
+  public List<TableMeta> getTableMeta(String dbPatterns, String tablePatterns, 
List<String> tableTypes)
+      throws TException {
+    List<TableMeta> tableMetas = getDelegate().getTableMeta(dbPatterns, 
tablePatterns, tableTypes);
+    Map<String, Map<String, org.apache.hadoop.hive.ql.metadata.Table>> 
tmpTables = getTempTables("dbPatterns='" + dbPatterns +
+        "' tablePatterns='" + tablePatterns + "'");
+    if (tmpTables.isEmpty()) {
+      return tableMetas;
+    }
+
+    List<Matcher> dbPatternList = new ArrayList<>();
+    for (String element : dbPatterns.split("\\|")) {
+      dbPatternList.add(Pattern.compile(element.replaceAll("\\*", 
".*")).matcher(""));
+    }
+    List<Matcher> tblPatternList = new ArrayList<>();
+    for (String element : tablePatterns.split("\\|")) {
+      tblPatternList.add(Pattern.compile(element.replaceAll("\\*", 
".*")).matcher(""));
+    }
+    for (Map.Entry<String, Map<String, 
org.apache.hadoop.hive.ql.metadata.Table>> outer : tmpTables.entrySet()) {
+      if (!matchesAny(outer.getKey(), dbPatternList)) {
+        continue;
+      }
+      for (Map.Entry<String, org.apache.hadoop.hive.ql.metadata.Table> inner : 
outer.getValue().entrySet()) {
+        org.apache.hadoop.hive.ql.metadata.Table table = inner.getValue();
+        String tableName = table.getTableName();
+        String typeString = table.getTableType().name();
+        if (tableTypes != null && !tableTypes.contains(typeString)) {
+          continue;
+        }
+        if (!matchesAny(inner.getKey(), tblPatternList)) {
+          continue;
+        }
+        TableMeta tableMeta = new TableMeta(table.getDbName(), tableName, 
typeString);
+        tableMeta.setComments(table.getProperty("comment"));
+        tableMetas.add(tableMeta);
+      }
+    }
+    return tableMetas;
+  }
+
+  private boolean matchesAny(String string, List<Matcher> matchers) {
+    for (Matcher matcher : matchers) {
+      if (matcher.reset(string).matches()) {
+        return true;
+      }
+    }
+    return matchers.isEmpty();
+  }
+
+  @Override
+  public List<Table> getTableObjectsByName(String dbName,

Review Comment:
   I revised `SessionMetaStoreClientProxy` to implement `getTables` that takes 
`GetProjectionsSpec`. As you noted 
[here](https://gist.github.com/okumin/add5b97c3b6674820cf1aad433856e9f), it 
seems that `getTableObjectsByName` is an alias for `getTables`, or perhaps 
`getTables` is just misnamed, given that there exists another `getTables` 
family.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: gitbox-unsubscr...@hive.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: gitbox-unsubscr...@hive.apache.org
For additional commands, e-mail: gitbox-h...@hive.apache.org

Reply via email to