This is an automated email from the ASF dual-hosted git repository.

dengzh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new f6938521a3c HIVE-29430: Split get partitions from HMSHandler (#6311)
f6938521a3c is described below

commit f6938521a3c4efc2a31093ecbd7b77866c8d97c2
Author: dengzh <[email protected]>
AuthorDate: Wed Mar 18 11:02:36 2026 +0800

    HIVE-29430: Split get partitions from HMSHandler (#6311)
---
 ...HiveMetastoreClientListPartitionsTempTable.java |    2 +-
 .../apache/hadoop/hive/metastore/HMSHandler.java   | 1053 +++++---------------
 .../apache/hadoop/hive/metastore/IHMSHandler.java  |    2 +
 .../client/builder/GetPartitionsArgs.java          |   52 +
 .../metastore/handler/AbstractRequestHandler.java  |   25 +-
 .../metastore/handler/AddPartitionsHandler.java    |    2 +-
 .../metastore/handler/AppendPartitionHandler.java  |  190 ++++
 .../metastore/handler/CreateDatabaseHandler.java   |    2 +-
 .../hive/metastore/handler/CreateTableHandler.java |    2 +-
 .../metastore/handler/DropDatabaseHandler.java     |    4 +-
 .../metastore/handler/DropPartitionsHandler.java   |    2 +-
 .../hive/metastore/handler/DropTableHandler.java   |    4 +-
 .../metastore/handler/GetPartitionsHandler.java    |  490 +++++++++
 .../metastore/handler/SetAggrStatsHandler.java     |    2 +-
 .../hive/metastore/handler/TAbstractBase.java      |   70 ++
 .../metastore/handler/TruncateTableHandler.java    |    2 +-
 .../hadoop/hive/metastore/TestHiveMetaStore.java   |    5 +-
 .../hive/metastore/TestHiveMetaStoreMethods.java   |    4 +-
 .../TestMetaStoreEndFunctionListener.java          |    3 +-
 .../hive/metastore/client/TestListPartitions.java  |    3 +-
 20 files changed, 1068 insertions(+), 851 deletions(-)

diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java
 
b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java
index 006ba6a0584..3dd4bd94d3e 100644
--- 
a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java
+++ 
b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java
@@ -234,7 +234,7 @@ public void testListPartitionNamesNoDb() throws Exception {
     super.testListPartitionNamesNoDb();
   }
 
-  @Test
+  @Test(expected = NoSuchObjectException.class)
   @Override
   public void testListPartitionsAllNoTable() throws Exception {
     super.testListPartitionsAllNoTable();
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
index a3a8951e687..b7801e08e77 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
@@ -23,11 +23,10 @@
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Splitter;
-import com.google.common.base.Supplier;
-import com.google.common.base.Suppliers;
 import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.Striped;
 import org.apache.commons.collections4.CollectionUtils;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.StatsSetupConst;
@@ -35,14 +34,15 @@
 import org.apache.hadoop.hive.common.repl.ReplConst;
 import org.apache.hadoop.hive.metastore.api.*;
 import org.apache.hadoop.hive.metastore.api.Package;
-import org.apache.hadoop.hive.metastore.client.builder.GetPartitionsArgs;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
 import 
org.apache.hadoop.hive.metastore.dataconnector.DataConnectorProviderFactory;
 import org.apache.hadoop.hive.metastore.events.*;
 import org.apache.hadoop.hive.metastore.handler.AbstractRequestHandler;
 import org.apache.hadoop.hive.metastore.handler.AddPartitionsHandler;
+import org.apache.hadoop.hive.metastore.handler.AppendPartitionHandler;
 import org.apache.hadoop.hive.metastore.handler.DropPartitionsHandler;
+import org.apache.hadoop.hive.metastore.handler.GetPartitionsHandler;
 import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
 import org.apache.hadoop.hive.metastore.metrics.Metrics;
 import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
@@ -91,7 +91,6 @@
 import static 
org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_COMMENT;
 import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
 import static 
org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.HIVE_IN_TEST;
-import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.canUpdateStats;
 import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.isDbReplicationTarget;
 import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.CAT_NAME;
 import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DB_NAME;
@@ -122,9 +121,6 @@ public class HMSHandler extends FacebookBase implements 
IHMSHandler {
   public static final String ADMIN = "admin";
   public static final String PUBLIC = "public";
 
-  static final String NO_FILTER_STRING = "";
-  static final int UNLIMITED_MAX_PARTITIONS = -1;
-
   static final int LOG_SAMPLE_PARTITIONS_MAX_SIZE = 4;
 
   static final int LOG_SAMPLE_PARTITIONS_HALF_SIZE = 2;
@@ -286,6 +282,11 @@ public IMetaStoreMetadataTransformer 
getMetadataTransformer() {
     return transformer;
   }
 
+  @Override
+  public MetaStoreFilterHook getMetaFilterHook() {
+    return filterHook;
+  }
+
   @Override
   public void init() throws MetaException {
     init(new Warehouse(conf));
@@ -411,29 +412,6 @@ private MetaStoreFilterHook loadFilterHooks() throws 
IllegalStateException  {
     }
   }
 
-  /**
-   * Check if user can access the table associated with the partition. If not, 
then throw exception
-   * so user cannot access partitions associated with this table
-   * We are not calling Pre event listener for authorization because it 
requires getting the
-   * table object from DB, more overhead. Instead ,we call filter hook to 
filter out table if user
-   * has no access. Filter hook only requires table name, not table object. 
That saves DB access for
-   * table object, and still achieve the same purpose: checking if user can 
access the specified
-   * table
-   *
-   * @param catName catalog name of the table
-   * @param dbName database name of the table
-   * @param tblName table name
-   * @throws NoSuchObjectException
-   * @throws MetaException
-   */
-  private void authorizeTableForPartitionMetadata(
-      final String catName, final String dbName, final String tblName)
-      throws NoSuchObjectException, MetaException {
-
-    FilterUtils.checkDbAndTableFilters(
-        isServerFilterEnabled, filterHook, catName, dbName, tblName);
-  }
-
   @Override
   public void setConf(Configuration conf) {
     HMSHandlerContext.setConfiguration(conf);
@@ -2644,104 +2622,6 @@ public List<String> get_table_names_by_filter(
     return tables;
   }
 
-  private Partition append_partition_common(RawStore ms, String catName, 
String dbName,
-                                            String tableName, List<String> 
part_vals,
-                                            EnvironmentContext envContext)
-      throws InvalidObjectException, AlreadyExistsException, MetaException, 
NoSuchObjectException {
-
-    Partition part = new Partition();
-    boolean success = false, madeDir = false;
-    Path partLocation = null;
-    Table tbl = null;
-    Map<String, String> transactionalListenerResponses = 
Collections.emptyMap();
-    Database db = null;
-    try {
-      ms.openTransaction();
-      part.setCatName(catName);
-      part.setDbName(dbName);
-      part.setTableName(tableName);
-      part.setValues(part_vals);
-
-      MetaStoreServerUtils.validatePartitionNameCharacters(part_vals, 
getConf());
-
-      tbl = ms.getTable(part.getCatName(), part.getDbName(), 
part.getTableName(), null);
-      if (tbl == null) {
-        throw new InvalidObjectException(
-            "Unable to add partition because table or database do not exist");
-      }
-      if (tbl.getSd().getLocation() == null) {
-        throw new MetaException(
-            "Cannot append a partition to a view");
-      }
-
-      db = get_database_core(catName, dbName);
-
-      firePreEvent(new PreAddPartitionEvent(tbl, part, this));
-
-      part.setSd(tbl.getSd().deepCopy());
-      partLocation = new Path(tbl.getSd().getLocation(), Warehouse
-          .makePartName(tbl.getPartitionKeys(), part_vals));
-      part.getSd().setLocation(partLocation.toString());
-
-      Partition old_part;
-      try {
-        old_part = ms.getPartition(part.getCatName(), part.getDbName(), part
-            .getTableName(), part.getValues());
-      } catch (NoSuchObjectException e) {
-        // this means there is no existing partition
-        old_part = null;
-      }
-      if (old_part != null) {
-        throw new AlreadyExistsException("Partition already exists:" + part);
-      }
-
-      if (!wh.isDir(partLocation)) {
-        if (!wh.mkdirs(partLocation)) {
-          throw new MetaException(partLocation
-              + " is not a directory or unable to create one");
-        }
-        madeDir = true;
-      }
-
-      // set create time
-      long time = System.currentTimeMillis() / 1000;
-      part.setCreateTime((int) time);
-      part.putToParameters(hive_metastoreConstants.DDL_TIME, 
Long.toString(time));
-
-      if (canUpdateStats(getConf(), tbl)) {
-        MetaStoreServerUtils.updatePartitionStatsFast(part, tbl, wh, madeDir, 
false, envContext, true);
-      }
-
-      if (ms.addPartition(part)) {
-        if (!transactionalListeners.isEmpty()) {
-          transactionalListenerResponses =
-              MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
-                  EventType.ADD_PARTITION,
-                  new AddPartitionEvent(tbl, part, true, this),
-                  envContext);
-        }
-
-        success = ms.commitTransaction();
-      }
-    } finally {
-      if (!success) {
-        ms.rollbackTransaction();
-        if (madeDir) {
-          wh.deleteDir(partLocation, false, 
ReplChangeManager.shouldEnableCm(db, tbl));
-        }
-      }
-
-      if (!listeners.isEmpty()) {
-        MetaStoreListenerNotifier.notifyEvent(listeners,
-            EventType.ADD_PARTITION,
-            new AddPartitionEvent(tbl, part, success, this),
-            envContext,
-            transactionalListenerResponses, ms);
-      }
-    }
-    return part;
-  }
-
   public void firePreEvent(PreEventContext event) throws MetaException {
     for (MetaStorePreEventListener listener : preListeners) {
       try {
@@ -2774,62 +2654,35 @@ public Partition 
append_partition_with_environment_context(final String dbName,
       final String tableName, final List<String> part_vals, final 
EnvironmentContext envContext)
       throws InvalidObjectException, AlreadyExistsException, MetaException {
     String[] parsedDbName = parseDbName(dbName, conf);
-    startPartitionFunction("append_partition_with_environment_context", 
parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, part_vals);
-    Partition ret = null;
-    Exception ex = null;
-    try {
-      AppendPartitionsRequest appendPartitionsReq = new 
AppendPartitionsRequest();
-      appendPartitionsReq.setDbName(parsedDbName[DB_NAME]);
-      appendPartitionsReq.setTableName(tableName);
-      appendPartitionsReq.setPartVals(part_vals);
-      appendPartitionsReq.setCatalogName(parsedDbName[CAT_NAME]);
-      appendPartitionsReq.setEnvironmentContext(envContext);
-      ret = append_partition_req(appendPartitionsReq);
-    } catch (Exception e) {
-      ex = e;
-      throw handleException(e).throwIfInstance(MetaException.class, 
InvalidObjectException.class, AlreadyExistsException.class)
-          .defaultMetaException();
-    } finally {
-      endFunction("append_partition_with_environment_context", ret != null, 
ex, tableName);
-    }
-    return ret;
+    AppendPartitionsRequest appendPartitionsReq = new 
AppendPartitionsRequest();
+    appendPartitionsReq.setDbName(parsedDbName[DB_NAME]);
+    appendPartitionsReq.setTableName(tableName);
+    appendPartitionsReq.setPartVals(part_vals);
+    appendPartitionsReq.setCatalogName(parsedDbName[CAT_NAME]);
+    appendPartitionsReq.setEnvironmentContext(envContext);
+    return append_partition_req(appendPartitionsReq);
   }
 
   @Override
   public Partition append_partition_req(final AppendPartitionsRequest 
appendPartitionsReq)
       throws InvalidObjectException, AlreadyExistsException, MetaException {
-    List<String> part_vals = appendPartitionsReq.getPartVals();
     String dbName = appendPartitionsReq.getDbName();
     String catName = appendPartitionsReq.isSetCatalogName() ?
         appendPartitionsReq.getCatalogName() : getDefaultCatalog(conf);
     String tableName = appendPartitionsReq.getTableName();
-    String partName = appendPartitionsReq.getName();
-    if (partName == null && (part_vals == null || part_vals.isEmpty())) {
-      throw new MetaException("The partition values must not be null or 
empty.");
-    }
-    if (part_vals == null || part_vals.isEmpty()) {
-      // partition name is set, get partition vals and then append partition
-      part_vals = getPartValsFromName(getMS(), catName, dbName, tableName, 
partName);
-    }
-    startPartitionFunction("append_partition_req", catName, dbName, tableName, 
part_vals);
-    if (LOG.isDebugEnabled()) {
-      for (String part : part_vals) {
-        LOG.debug(part);
-      }
-    }
-    Partition ret = null;
+    startTableFunction("append_partition_req", catName, dbName, tableName);
     Exception ex = null;
     try {
-      ret = append_partition_common(getMS(), catName, dbName, tableName, 
part_vals, appendPartitionsReq.getEnvironmentContext());
+      AppendPartitionHandler appendPartition = 
AbstractRequestHandler.offer(this, appendPartitionsReq);
+      return appendPartition.getResult().partition();
     } catch (Exception e) {
       ex = e;
       throw handleException(e)
           .throwIfInstance(MetaException.class, InvalidObjectException.class, 
AlreadyExistsException.class)
           .defaultMetaException();
     } finally {
-      endFunction("append_partition_req", ret != null, ex, tableName);
+      endFunction("append_partition_req", ex == null, ex, tableName);
     }
-    return ret;
   }
 
   public Lock getTableLockFor(String dbName, String tblName) {
@@ -2860,14 +2713,14 @@ public AddPartitionsResult 
add_partitions_req(AddPartitionsRequest request)
           if (addPartsResult.newParts() != null && 
!addPartsResult.newParts().isEmpty()) {
             StorageDescriptor sd = 
addPartsResult.newParts().getFirst().getSd().deepCopy();
             result.setPartitionColSchema(sd.getCols());
+            addPartsResult.newParts().forEach(partition -> 
partition.getSd().getCols().clear());
           }
-          addPartsResult.newParts().stream().forEach(partition -> 
partition.getSd().getCols().clear());
         }
         result.setPartitions(addPartsResult.newParts());
       }
     } catch (Exception e) {
       ex = e;
-      throw 
handleException(e).throwIfInstance(TException.class).defaultMetaException();
+      throw handleException(e).defaultTException();
     } finally {
       endFunction("add_partitions_req", ex == null, ex, tblName);
     }
@@ -3226,7 +3079,7 @@ public DropPartitionsResult drop_partitions_req(
       return resp;
     } catch (Exception e) {
       ex = e;
-      throw 
handleException(e).throwIfInstance(TException.class).defaultMetaException();
+      throw handleException(e).defaultTException();
     } finally {
       endFunction("drop_partition_req", ex == null, ex,
           TableName.getQualified(request.getCatName(), request.getDbName(), 
request.getTblName()));
@@ -3252,21 +3105,17 @@ public boolean drop_partition_req(final 
DropPartitionRequest dropPartitionReq) t
     String catName = dropPartitionReq.getCatName();
     String tbl_name = dropPartitionReq.getTblName();
     List<String> part_vals = dropPartitionReq.getPartVals();
-    boolean ret = false;
-    Exception ex = null;
     try {
-      Table t = getMS().getTable(catName, dbName, tbl_name,  null);
+      Table t = getMS().getTable(catName, dbName, tbl_name, null);
       if (t == null) {
-        throw new InvalidObjectException(dbName + "." + tbl_name
-            + " table not found");
+        throw new InvalidObjectException(dbName + "." + tbl_name + " table not 
found");
       }
       List<String> partNames = new ArrayList<>();
       if (part_vals == null || part_vals.isEmpty()) {
         part_vals = getPartValsFromName(t, dropPartitionReq.getPartName());
       }
       partNames.add(Warehouse.makePartName(t.getPartitionKeys(), part_vals));
-      startPartitionFunction("drop_partition_req", catName, dbName, tbl_name, 
part_vals);
-      LOG.info("Partition values: {}", part_vals);
+      LOG.info("drop_partition_req partition values: {}", part_vals);
       RequestPartsSpec requestPartsSpec = RequestPartsSpec.names(partNames);
       DropPartitionsRequest request = new DropPartitionsRequest(dbName, 
tbl_name, requestPartsSpec);
       request.setCatName(catName);
@@ -3275,15 +3124,11 @@ public boolean drop_partition_req(final 
DropPartitionRequest dropPartitionReq) t
       request.setDeleteData(dropPartitionReq.isDeleteData());
       request.setEnvironmentContext(dropPartitionReq.getEnvironmentContext());
       drop_partitions_req(request);
-      return true;
     } catch (Exception e) {
-      ex = e;
       handleException(e).convertIfInstance(InvalidObjectException.class, 
NoSuchObjectException.class)
           .rethrowException(e);
-    } finally {
-      endFunction("drop_partition_req", ret, ex, tbl_name);
     }
-    return ret;
+    return true;
   }
 
   /**
@@ -3294,81 +3139,31 @@ public boolean drop_partition_req(final 
DropPartitionRequest dropPartitionReq) t
   @Deprecated
   public Partition get_partition(final String db_name, final String tbl_name,
                                  final List<String> part_vals) throws 
MetaException, NoSuchObjectException {
-    String[] parsedDbName = parseDbName(db_name, conf);
-    startPartitionFunction("get_partition", parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME],
-        tbl_name, part_vals);
-
-    Partition ret = null;
-    Exception ex = null;
     try {
+      String[] parsedDbName = parseDbName(db_name, conf);
       GetPartitionRequest getPartitionRequest = new 
GetPartitionRequest(parsedDbName[DB_NAME], tbl_name, part_vals);
       getPartitionRequest.setCatName(parsedDbName[CAT_NAME]);
-      ret = get_partition_req(getPartitionRequest).getPartition();
-    } catch (Exception e) {
-      ex = e;
-      throw handleException(e).throwIfInstance(MetaException.class, 
NoSuchObjectException.class).defaultMetaException();
-    } finally {
-      endFunction("get_partition", ret != null, ex, tbl_name);
-    }
-    return ret;
-  }
-
-  private Partition get_partition_core(final String db_name, final String 
tbl_name,
-                                 final List<String> part_vals) throws 
MetaException, NoSuchObjectException {
-    String[] parsedDbName = parseDbName(db_name, conf);
-    startPartitionFunction("get_partition_core", parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME],
-            tbl_name, part_vals);
-
-    Partition ret = null;
-    Exception ex = null;
-    try {
-      authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tbl_name);
-      fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], 
tbl_name);
-      ret = getMS().getPartition(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tbl_name, part_vals);
-      ret = FilterUtils.filterPartitionIfEnabled(isServerFilterEnabled, 
filterHook, ret);
-    } catch (Exception e) {
-      ex = e;
-      throw handleException(e).throwIfInstance(MetaException.class, 
NoSuchObjectException.class).defaultMetaException();
-    } finally {
-      endFunction("get_partition_core", ret != null, ex, tbl_name);
+      return get_partition_req(getPartitionRequest).getPartition();
+    } catch (TException e) {
+      throw handleException(e).throwIfInstance(MetaException.class, 
NoSuchObjectException.class)
+          .defaultMetaException();
     }
-    return ret;
   }
 
   @Override
   public GetPartitionResponse get_partition_req(GetPartitionRequest req)
       throws MetaException, NoSuchObjectException, TException {
-    // TODO Move the logic from get_partition to here, as that method is 
getting deprecated
-    String dbName = MetaStoreUtils.prependCatalogToDbName(req.getCatName(), 
req.getDbName(), conf);
-    Partition p = get_partition_core(dbName, req.getTblName(), 
req.getPartVals());
-    GetPartitionResponse res = new GetPartitionResponse();
-    res.setPartition(p);
-    return res;
-  }
-
-  /**
-   * Fire a pre-event for read table operation, if there are any
-   * pre-event listeners registered
-   */
-  private void fireReadTablePreEvent(String catName, String dbName, String 
tblName)
-      throws MetaException, NoSuchObjectException {
-    if(preListeners.size() > 0) {
-      Supplier<Table> tableSupplier = Suppliers.memoize(new Supplier<Table>() {
-        @Override public Table get() {
-          try {
-            Table t = getMS().getTable(catName, dbName, tblName, null);
-            if (t == null) {
-              throw new NoSuchObjectException(TableName.getQualified(catName, 
dbName, tblName)
-                  + " table not found");
-            }
-            return t;
-          } catch(MetaException | NoSuchObjectException e) {
-            throw new RuntimeException(e);
-          }
-        }
-      });
-      firePreEvent(new PreReadTableEvent(tableSupplier, this));
-    }
+    String catName = req.isSetCatName() ? req.getCatName() : 
getDefaultCatalog(conf);
+    TableName tableName = new TableName(catName, req.getDbName(), 
req.getTblName());
+    String partName = GetPartitionsHandler.validatePartVals(this, tableName, 
req.getPartVals());
+    GetPartitionsByNamesRequest gpnr = new 
GetPartitionsByNamesRequest(tableName.getDb(), tableName.getTable());
+    gpnr.setNames(List.of(partName));
+    List<Partition> partitions = GetPartitionsHandler.getPartitions(
+        t -> startTableFunction("get_partition_req", catName, t.getDb(), 
t.getTable()),
+        rex -> endFunction("get_partition_req",
+            rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), 
tableName.toString()),
+        this, tableName, gpnr, true);
+    return new GetPartitionResponse(partitions.getFirst());
   }
 
   /**
@@ -3429,26 +3224,20 @@ public Partition get_partition_with_auth(final String 
db_name,
                                            final String user_name, final 
List<String> group_names)
       throws TException {
     String[] parsedDbName = parseDbName(db_name, conf);
-    startFunction("get_partition_with_auth",
-        " : tbl=" + TableName.getQualified(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tbl_name)
-            + samplePartitionValues(part_vals) + 
getGroupsCountAndUsername(user_name,group_names));
-    fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], 
tbl_name);
-    Partition ret = null;
-    Exception ex = null;
-    try {
-      authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tbl_name);
-
-      ret = getMS().getPartitionWithAuth(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME],
-          tbl_name, part_vals, user_name, group_names);
-      ret = FilterUtils.filterPartitionIfEnabled(isServerFilterEnabled, 
filterHook, ret);
-    } catch (Exception e) {
-      ex = e;
-      handleException(e).convertIfInstance(InvalidObjectException.class, 
NoSuchObjectException.class)
-          .rethrowException(e);
-    } finally {
-      endFunction("get_partition_with_auth", ret != null, ex, tbl_name);
-    }
-    return ret;
+    TableName tableName = new TableName(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tbl_name);
+    String partName = GetPartitionsHandler.validatePartVals(this, tableName, 
part_vals);
+    GetPartitionsPsWithAuthRequest gpar = new 
GetPartitionsPsWithAuthRequest(tableName.getDb(), tableName.getTable());
+    gpar.setCatName(tableName.getCat());
+    gpar.setUserName(user_name);
+    gpar.setGroupNames(group_names);
+    gpar.setPartNames(List.of(partName));
+    List<Partition> partitions = GetPartitionsHandler.getPartitions(
+        t ->  startFunction("get_partition_with_auth",
+            " : tbl=" + t + samplePartitionValues(part_vals) + 
getGroupsCountAndUsername(user_name,group_names)),
+        rex ->   endFunction("get_partition_with_auth",
+            rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), 
tbl_name),
+        this, tableName, gpar, true);
+    return partitions.getFirst();
   }
 
   /**
@@ -3459,70 +3248,27 @@ public Partition get_partition_with_auth(final String 
db_name,
   @Deprecated
   public List<Partition> get_partitions(final String db_name, final String 
tbl_name,
       final short max_parts) throws NoSuchObjectException, MetaException {
-    return get_partitions(db_name, tbl_name,
-        new 
GetPartitionsArgs.GetPartitionsArgsBuilder().max(max_parts).build());
-  }
-
-  private List<Partition> get_partitions(final String db_name, final String 
tbl_name,
-    GetPartitionsArgs args) throws NoSuchObjectException, MetaException {
-    String[] parsedDbName = parseDbName(db_name, conf);
-    List<Partition> ret = null;
-    Exception ex = null;
-    try {
-      PartitionsRequest req = new PartitionsRequest(parsedDbName[DB_NAME], 
tbl_name);
-      req.setCatName(parsedDbName[CAT_NAME]);
-      req.setMaxParts((short)args.getMax());
-      req.setSkipColumnSchemaForPartition(false);
-      req.setIncludeParamKeyPattern(args.getIncludeParamKeyPattern());
-      req.setExcludeParamKeyPattern(args.getExcludeParamKeyPattern());
-      ret = get_partitions_req(req).getPartitions();
-    } catch (Exception e) {
-      ex = e;
-      throwMetaException(e);
-    }
-    return ret;
-
-  }
-
-  private List<Partition> get_partitions_core(final String db_name, final 
String tbl_name,
-      GetPartitionsArgs args) throws NoSuchObjectException, MetaException {
     String[] parsedDbName = parseDbName(db_name, conf);
-    startTableFunction("get_partitions_core", parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tbl_name);
-    fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], 
tbl_name);
-    List<Partition> ret = null;
-    Exception ex = null;
-    try {
-      checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME],
-          tbl_name, NO_FILTER_STRING, args.getMax());
-
-      authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tbl_name);
-
-      ret = getMS().getPartitions(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tbl_name, args);
-      ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, 
filterHook, ret);
-    } catch (Exception e) {
-      ex = e;
-      throwMetaException(e);
-    } finally {
-      endFunction("get_partitions_core", ret != null, ex, tbl_name);
-    }
-    return ret;
-
+    TableName tableName = new TableName(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tbl_name);
+    return GetPartitionsHandler.getPartitions(
+        t -> startTableFunction("get_partitions", tableName.getCat(), 
tableName.getDb(), tableName.getTable()),
+        rex -> endFunction("get_partitions",
+            rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), 
tableName.toString()),
+        this, tableName, 
GetPartitionsHandler.createPartitionsRequest(tableName, max_parts), false);
   }
 
   @Override
   public PartitionsResponse get_partitions_req(PartitionsRequest req)
       throws NoSuchObjectException, MetaException, TException {
-    String dbName = MetaStoreUtils.prependCatalogToDbName(req.getCatName(), 
req.getDbName(), conf);
-    List<Partition> partitions = get_partitions_core(dbName, req.getTblName(),
-            new GetPartitionsArgs.GetPartitionsArgsBuilder()
-            .max(req.getMaxParts())
-            .includeParamKeyPattern(req.getIncludeParamKeyPattern())
-            .excludeParamKeyPattern(req.getExcludeParamKeyPattern())
-            .skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition())
-            .build());
-    PartitionsResponse res = new PartitionsResponse();
-    res.setPartitions(partitions);
-    return res;
+    String catName = req.isSetCatName() ? req.getCatName() : 
getDefaultCatalog(conf);
+    TableName tableName = new TableName(catName, req.getDbName(), 
req.getTblName());
+    List<Partition> partitions = GetPartitionsHandler.getPartitions(
+        t -> startTableFunction("get_partitions_req", catName, 
req.getDbName(), req.getTblName()),
+        rex ->  endFunction("get_partitions_req",
+            rex.getLeft() != null && rex.getLeft().success(), rex.getRight(),
+            TableName.getQualified(catName, req.getDbName(), 
req.getTblName())),
+        this, tableName, req, false);
+    return new PartitionsResponse(partitions);
   }
 
   @Override
@@ -3530,45 +3276,14 @@ public PartitionsResponse 
get_partitions_req(PartitionsRequest req)
   public List<Partition> get_partitions_with_auth(final String dbName,
       final String tblName, final short maxParts, final String userName,
       final List<String> groupNames) throws TException {
-    return get_partitions_ps_with_auth(dbName, tblName,
-        new GetPartitionsArgs.GetPartitionsArgsBuilder()
-            .max(maxParts).userName(userName).groupNames(groupNames)
-            .build());
-  }
-
-  private void checkLimitNumberOfPartitionsByFilter(String catName, String 
dbName,
-                                                    String tblName, String 
filterString,
-                                                    int requestMax) throws 
TException {
-    if (exceedsPartitionFetchLimit(requestMax)) {
-      checkLimitNumberOfPartitions(tblName, 
get_num_partitions_by_filter(prependCatalogToDbName(
-          catName, dbName, conf), tblName, filterString));
-    }
-  }
-
-  private void checkLimitNumberOfPartitionsByPs(String catName, String dbName, 
String tblName,
-                                                List<String> partVals, int 
requestMax)
-          throws TException {
-    if (exceedsPartitionFetchLimit(requestMax)) {
-      checkLimitNumberOfPartitions(tblName, getNumPartitionsByPs(catName, 
dbName, tblName,
-              partVals));
-    }
-  }
-
-  // Check input count exceeding partition limit iff:
-  //  1. partition limit is enabled.
-  //  2. input count is greater than the limit.
-  private boolean exceedsPartitionFetchLimit(int count) {
-    int partitionLimit = MetastoreConf.getIntVar(conf, 
ConfVars.LIMIT_PARTITION_REQUEST);
-    return partitionLimit > -1 && (count < 0 || count > partitionLimit);
-  }
-
-  private void checkLimitNumberOfPartitions(String tblName, int numPartitions) 
throws MetaException {
-    if (exceedsPartitionFetchLimit(numPartitions)) {
-      int partitionLimit = MetastoreConf.getIntVar(conf, 
ConfVars.LIMIT_PARTITION_REQUEST);
-      String configName = ConfVars.LIMIT_PARTITION_REQUEST.toString();
-      throw new MetaException(String.format(PARTITION_NUMBER_EXCEED_LIMIT_MSG, 
numPartitions,
-          tblName, partitionLimit, configName));
-    }
+    String[] parsedDbName = parseDbName(dbName, conf);
+    TableName tableName = new TableName(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tblName);
+    GetPartitionsPsWithAuthRequest getAuthReq = new 
GetPartitionsPsWithAuthRequest(tableName.getDb(), tableName.getTable());
+    getAuthReq.setCatName(tableName.getCat());
+    getAuthReq.setUserName(userName);
+    getAuthReq.setGroupNames(groupNames);
+    getAuthReq.setMaxParts(maxParts);
+    return get_partitions_ps_with_auth_req(getAuthReq).getPartitions();
   }
 
   @Override
@@ -3589,9 +3304,10 @@ public List<PartitionSpec> get_partitions_pspec(final 
String db_name, final Stri
       getTableRequest.setCatName(catName);
       Table table = get_table_core(getTableRequest);
       // get_partitions will parse out the catalog and db names itself
-      List<Partition> partitions = get_partitions(db_name, tableName,
-          new 
GetPartitionsArgs.GetPartitionsArgsBuilder().max(max_parts).build());
-
+      TableName t = new TableName(catName, dbName, tableName);
+      GetPartitionsHandler<PartitionsRequest, Partition> getPartitionsHandler 
= AbstractRequestHandler.offer(this,
+          new 
GetPartitionsHandler.GetPartitionsRequest<>(GetPartitionsHandler.createPartitionsRequest(t,
 max_parts), t));
+      List<Partition> partitions  = getPartitionsHandler.getResult().result();
       if (is_partition_spec_grouping_enabled(table)) {
         partitionSpecs = MetaStoreServerUtils
             .getPartitionspecsGroupedByStorageDescriptor(table, partitions);
@@ -3607,6 +3323,8 @@ public List<PartitionSpec> get_partitions_pspec(final 
String db_name, final Stri
       }
 
       return partitionSpecs;
+    } catch (Exception e) {
+      throw handleException(e).throwIfInstance(NoSuchObjectException.class, 
MetaException.class).defaultMetaException();
     }
     finally {
       endFunction("get_partitions_pspec", partitionSpecs != null && 
!partitionSpecs.isEmpty(), null, tbl_name);
@@ -3672,70 +3390,62 @@ private static boolean 
is_partition_spec_grouping_enabled(Table table) {
   public List<String> get_partition_names(final String db_name, final String 
tbl_name,
                                           final short max_parts) throws 
NoSuchObjectException, MetaException {
     String[] parsedDbName = parseDbName(db_name, conf);
-    startPartitionFunction("get_partition_names", parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tbl_name, max_parts);
-    fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], 
tbl_name);
-    List<String> ret = null;
-    Exception ex = null;
-    try {
-      PartitionsRequest partitionReq = new 
PartitionsRequest(parsedDbName[DB_NAME], tbl_name);
-      partitionReq.setCatName(parsedDbName[CAT_NAME]);
-      partitionReq.setMaxParts(max_parts);
-      ret = fetch_partition_names_req(partitionReq);
-    } catch (Exception e) {
-      ex = e;
-      throw newMetaException(e);
-    } finally {
-      endFunction("get_partition_names", ret != null, ex, tbl_name);
-    }
-    return ret;
+    PartitionsRequest partitionReq = new 
PartitionsRequest(parsedDbName[DB_NAME], tbl_name);
+    partitionReq.setCatName(parsedDbName[CAT_NAME]);
+    partitionReq.setMaxParts(max_parts);
+    return fetch_partition_names_req(partitionReq);
   }
 
   @Override
-  public List<String> fetch_partition_names_req(final PartitionsRequest 
partitionReq)
+  public List<String> fetch_partition_names_req(final PartitionsRequest req)
       throws NoSuchObjectException, MetaException {
-    String catName = partitionReq.getCatName();
-    String dbName = partitionReq.getDbName();
-    String tbl_name = partitionReq.getTblName();
-    startTableFunction("fetch_partition_names_req", catName, dbName, tbl_name);
-    fireReadTablePreEvent(catName, dbName, tbl_name);
-    List<String> ret = null;
-    Exception ex = null;
-    try {
-      authorizeTableForPartitionMetadata(catName, dbName, tbl_name);
-      ret = getMS().listPartitionNames(catName, dbName, tbl_name, 
partitionReq.getMaxParts());
-      ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled,
-              filterHook, catName, dbName, tbl_name, ret);
-    } catch (Exception e) {
-      ex = e;
-      throw newMetaException(e);
-    } finally {
-      endFunction("fetch_partition_names_req", ret != null, ex, tbl_name);
+    String catName = req.isSetCatName() ? req.getCatName() : 
getDefaultCatalog(conf);
+    String dbName = req.getDbName(), tblName = req.getTblName();
+    TableName tableName = new TableName(catName, dbName, tblName);
+    try {
+      return GetPartitionsHandler.getPartitionNames(
+          t -> startTableFunction("fetch_partition_names_req", catName, 
dbName, tblName),
+          rex -> endFunction("fetch_partition_names_req",
+              rex.getLeft() != null && rex.getLeft().success(), 
rex.getRight(), tableName.toString()),
+          this, tableName, req).result();
+    } catch (TException ex) {
+      if (ex instanceof NoSuchObjectException e) {
+        // Keep it here just because some tests in TestListPartitions assume 
NoSuchObjectException
+        // if the input is invalid at first sight.
+        if (StringUtils.isBlank(dbName) || 
StringUtils.isBlank(tableName.getTable())) {
+          throw e;
+        }
+        return Collections.emptyList();
+      }
+      throw handleException(ex).defaultMetaException();
     }
-    return ret;
   }
 
   @Override
   public PartitionValuesResponse get_partition_values(PartitionValuesRequest 
request)
       throws MetaException {
     String catName = request.isSetCatName() ? request.getCatName() : 
getDefaultCatalog(conf);
-    String dbName = request.getDbName();
-    String tblName = request.getTblName();
+    TableName tableName = new TableName(catName, request.getDbName(), 
request.getTblName());
     long maxParts = request.getMaxParts();
     String filter = request.isSetFilter() ? request.getFilter() : "";
-    startPartitionFunction("get_partition_values", catName, dbName, tblName, 
(int) maxParts, filter);
+    GetPartitionsHandler.GetPartitionsRequest<PartitionValuesRequest> 
getPartitionsRequest =
+        new GetPartitionsHandler.GetPartitionsRequest<>(request, tableName);
+    startPartitionFunction("get_partition_values", catName, tableName.getDb(), 
tableName.getTable(),
+        (int) maxParts, filter);
+    Exception ex = null;
     try {
-      authorizeTableForPartitionMetadata(catName, dbName, tblName);
-
-      // This is serious black magic, as the following 2 lines do nothing 
AFAICT but without them
-      // the subsequent call to listPartitionValues fails.
-      List<FieldSchema> partCols = new ArrayList<FieldSchema>();
-      partCols.add(request.getPartitionKeys().get(0));
-      return getMS().listPartitionValues(catName, dbName, tblName, 
request.getPartitionKeys(),
-          request.isApplyDistinct(), request.getFilter(), 
request.isAscending(),
-          request.getPartitionOrder(), request.getMaxParts());
-    } catch (NoSuchObjectException e) {
-      LOG.error(String.format("Unable to get partition for %s.%s.%s", catName, 
dbName, tblName), e);
-      throw new MetaException(e.getMessage());
+      GetPartitionsHandler<PartitionValuesRequest, PartitionValuesResponse> 
getPartitionsHandler =
+          AbstractRequestHandler.offer(this, getPartitionsRequest);
+      List<PartitionValuesResponse> resps = 
getPartitionsHandler.getResult().result();
+      if (resps == null || resps.isEmpty()) {
+        throw new MetaException(String.format("Unable to get partition for 
%s", tableName.toString()));
+      }
+      return resps.getFirst();
+    } catch (Exception e) {
+      ex = e;
+      throw 
handleException(e).throwIfInstance(MetaException.class).defaultMetaException();
+    } finally {
+      endFunction("get_partition_values", ex == null, ex, 
tableName.toString());
     }
   }
 
@@ -4452,7 +4162,7 @@ public String get_config_value(String name, String 
defaultValue)
       return toReturn;
     } catch (Exception e) {
       ex = e;
-      throw 
handleException(e).throwIfInstance(TException.class).defaultMetaException();
+      throw handleException(e).defaultTException();
     } finally {
       endFunction("get_config_value", success, ex);
     }
@@ -4476,59 +4186,23 @@ public static List<String> getPartValsFromName(Table t, 
String partName)
     return partVals;
   }
 
-  private List<String> getPartValsFromName(RawStore ms, String catName, String 
dbName,
-                                           String tblName, String partName)
-      throws MetaException, InvalidObjectException {
-    Table t = ms.getTable(catName, dbName, tblName,  null);
-    if (t == null) {
-      throw new InvalidObjectException(dbName + "." + tblName
-          + " table not found");
-    }
-    return getPartValsFromName(t, partName);
-  }
-
-  private Partition get_partition_by_name_core(final RawStore ms, final String 
catName,
-                                               final String db_name, final 
String tbl_name,
-                                               final String part_name) throws 
TException {
-    fireReadTablePreEvent(catName, db_name, tbl_name);
-    List<String> partVals;
-    try {
-      partVals = getPartValsFromName(ms, catName, db_name, tbl_name, 
part_name);
-    } catch (InvalidObjectException e) {
-      throw new NoSuchObjectException(e.getMessage());
-    }
-    Partition p = ms.getPartition(catName, db_name, tbl_name, partVals);
-    p = FilterUtils.filterPartitionIfEnabled(isServerFilterEnabled, 
filterHook, p);
-
-    if (p == null) {
-      throw new NoSuchObjectException(TableName.getQualified(catName, db_name, 
tbl_name)
-          + " partition (" + part_name + ") not found");
-    }
-    return p;
-  }
-
   @Override
   @Deprecated
   public Partition get_partition_by_name(final String db_name, final String 
tbl_name,
                                          final String part_name) throws 
TException {
-
-    String[] parsedDbName = parseDbName(db_name, conf);
-    startFunction("get_partition_by_name", ": tbl=" +
-        TableName.getQualified(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], 
tbl_name)
-        + " part=" + part_name);
-    Partition ret = null;
-    Exception ex = null;
-    try {
-      ret = get_partition_by_name_core(getMS(), parsedDbName[CAT_NAME],
-          parsedDbName[DB_NAME], tbl_name, part_name);
-      ret = FilterUtils.filterPartitionIfEnabled(isServerFilterEnabled, 
filterHook, ret);
-    } catch (Exception e) {
-      ex = e;
-      rethrowException(e);
-    } finally {
-      endFunction("get_partition_by_name", ret != null, ex, tbl_name);
+    if (StringUtils.isBlank(part_name)) {
+      throw new MetaException("The part_name in get_partition_by_name cannot 
be null or empty");
     }
-    return ret;
+    String[] parsedDbName = parseDbName(db_name, conf);
+    TableName tableName = new TableName(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tbl_name);
+    GetPartitionsByNamesRequest gpnr = new 
GetPartitionsByNamesRequest(tableName.getDb(), tableName.getTable());
+    gpnr.setNames(List.of(part_name));
+    List<Partition> partitions = GetPartitionsHandler.getPartitions(
+        t ->  startFunction("get_partition_by_name", ": tbl=" + t + " part=" + 
part_name),
+        rex -> endFunction("get_partition_by_name",
+            rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), 
tableName.toString()),
+        this, tableName, gpnr, true);
+    return partitions.getFirst();
   }
 
   @Deprecated
@@ -4544,25 +4218,13 @@ public Partition 
append_partition_by_name_with_environment_context(final String
       final String tbl_name, final String part_name, final EnvironmentContext 
env_context)
       throws TException {
     String[] parsedDbName = parseDbName(db_name, conf);
-    Partition ret = null;
-    Exception ex = null;
-    try {
-      AppendPartitionsRequest appendPartitionRequest = new 
AppendPartitionsRequest();
-      appendPartitionRequest.setDbName(parsedDbName[DB_NAME]);
-      appendPartitionRequest.setTableName(tbl_name);
-      appendPartitionRequest.setName(part_name);
-      appendPartitionRequest.setCatalogName(parsedDbName[CAT_NAME]);
-      appendPartitionRequest.setEnvironmentContext(env_context);
-      ret = append_partition_req(appendPartitionRequest);
-    } catch (Exception e) {
-      ex = e;
-      throw handleException(e)
-          .throwIfInstance(InvalidObjectException.class, 
AlreadyExistsException.class, MetaException.class)
-          .defaultMetaException();
-    } finally {
-      endFunction("append_partition_by_name", ret != null, ex, tbl_name);
-    }
-    return ret;
+    AppendPartitionsRequest appendPartitionRequest = new 
AppendPartitionsRequest();
+    appendPartitionRequest.setDbName(parsedDbName[DB_NAME]);
+    appendPartitionRequest.setTableName(tbl_name);
+    appendPartitionRequest.setName(part_name);
+    appendPartitionRequest.setCatalogName(parsedDbName[CAT_NAME]);
+    appendPartitionRequest.setEnvironmentContext(env_context);
+    return append_partition_req(appendPartitionRequest);
   }
 
   @Deprecated
@@ -4592,27 +4254,7 @@ public boolean 
drop_partition_by_name_with_environment_context(final String db_n
   public List<Partition> get_partitions_ps(final String db_name,
                                            final String tbl_name, final 
List<String> part_vals,
                                            final short max_parts) throws 
TException {
-    String[] parsedDbName = parseDbName(db_name, conf);
-    startPartitionFunction("get_partitions_ps", parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tbl_name, max_parts,
-        part_vals);
-
-    List<Partition> ret = null;
-    Exception ex = null;
-    try {
-      authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tbl_name);
-      // Don't send the parsedDbName, as this method will parse itself.
-      ret = get_partitions_ps_with_auth(db_name, tbl_name, new 
GetPartitionsArgs.GetPartitionsArgsBuilder()
-          .part_vals(part_vals).max(max_parts)
-          .build());
-      ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, 
filterHook, ret);
-    } catch (Exception e) {
-      ex = e;
-      rethrowException(e);
-    } finally {
-      endFunction("get_partitions_ps", ret != null, ex, tbl_name);
-    }
-
-    return ret;
+    return get_partitions_ps_with_auth(db_name, tbl_name, part_vals, 
max_parts, null, null);
   }
 
   /**
@@ -4625,56 +4267,28 @@ public List<Partition> 
get_partitions_ps_with_auth(final String db_name,
       final String tbl_name, final List<String> part_vals,
       final short max_parts, final String userName,
       final List<String> groupNames) throws TException {
-    return get_partitions_ps_with_auth(db_name, tbl_name, new 
GetPartitionsArgs.GetPartitionsArgsBuilder()
-            
.part_vals(part_vals).max(max_parts).userName(userName).groupNames(groupNames)
-            .build());
-  }
-
-  private List<Partition> get_partitions_ps_with_auth(final String db_name,
-      final String tbl_name, GetPartitionsArgs args) throws TException {
     String[] parsedDbName = parseDbName(db_name, conf);
-    startPartitionFunction("get_partitions_ps_with_auth", 
parsedDbName[CAT_NAME],
-        parsedDbName[DB_NAME], tbl_name, args.getPart_vals());
-    fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], 
tbl_name);
-    List<Partition> ret = null;
-    Exception ex = null;
-    try {
-      if (args.getPart_vals() != null) {
-        checkLimitNumberOfPartitionsByPs(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME],
-            tbl_name, args.getPart_vals(), args.getMax());
-      } else {
-        checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME],
-            tbl_name, NO_FILTER_STRING, args.getMax());
-      }
-      authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tbl_name);
-      ret = getMS().listPartitionsPsWithAuth(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME],
-          tbl_name, args);
-      ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, 
filterHook, ret);
-    } catch (Exception e) {
-      ex = e;
-      handleException(e).convertIfInstance(InvalidObjectException.class, 
MetaException.class).rethrowException(e);
-    } finally {
-      endFunction("get_partitions_ps_with_auth", ret != null, ex, tbl_name);
-    }
-    return ret;
+    TableName tableName = new TableName(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tbl_name);
+    GetPartitionsPsWithAuthRequest getAuthReq = new 
GetPartitionsPsWithAuthRequest(tableName.getDb(), tableName.getTable());
+    getAuthReq.setCatName(tableName.getCat());
+    getAuthReq.setMaxParts(max_parts);
+    getAuthReq.setUserName(userName);
+    getAuthReq.setGroupNames(groupNames);
+    getAuthReq.setPartVals(part_vals);
+    return get_partitions_ps_with_auth_req(getAuthReq).getPartitions();
   }
 
   @Override
   public GetPartitionsPsWithAuthResponse 
get_partitions_ps_with_auth_req(GetPartitionsPsWithAuthRequest req)
       throws MetaException, NoSuchObjectException, TException {
-    String dbName = MetaStoreUtils.prependCatalogToDbName(req.getCatName(), 
req.getDbName(), conf);
-    List<Partition> partitions =
-        get_partitions_ps_with_auth(dbName, req.getTblName(), new 
GetPartitionsArgs.GetPartitionsArgsBuilder()
-            .part_vals(req.getPartVals()).max(req.getMaxParts())
-            .userName(req.getUserName()).groupNames(req.getGroupNames())
-            .skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition())
-            .includeParamKeyPattern(req.getIncludeParamKeyPattern())
-            .excludeParamKeyPattern(req.getExcludeParamKeyPattern())
-            .partNames(req.getPartNames())
-            .build());
-    GetPartitionsPsWithAuthResponse res = new 
GetPartitionsPsWithAuthResponse();
-    res.setPartitions(partitions);
-    return res;
+    String catName = req.isSetCatName() ? req.getCatName() : 
getDefaultCatalog(conf);
+    TableName tableName = new TableName(catName, req.getDbName(), 
req.getTblName());
+    List<Partition> partitions = GetPartitionsHandler.getPartitionsResult(
+        t ->  startTableFunction("get_partitions_ps_with_auth_req", catName, 
t.getDb(), t.getTable()),
+        rex -> endFunction("get_partitions_ps_with_auth_req",
+            rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), 
tableName.toString()),
+        this, tableName, req).result();
+    return new GetPartitionsPsWithAuthResponse(partitions);
   }
 
   /**
@@ -4707,36 +4321,23 @@ public List<String> get_partition_names_ps(final String 
db_name,
     return ret;
   }
 
-  private List<String> get_partition_names_ps_core(final String db_name,
-      final String tbl_name, final List<String> part_vals, final short 
max_parts)
-      throws TException {
-    String[] parsedDbName = parseDbName(db_name, conf);
-    startPartitionFunction("get_partitions_names_ps", parsedDbName[CAT_NAME],
-            parsedDbName[DB_NAME], tbl_name, part_vals);
-    fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], 
tbl_name);
-    List<String> ret = null;
-    Exception ex = null;
-    try {
-      authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tbl_name);
-      ret = getMS().listPartitionNamesPs(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tbl_name,
-          part_vals, max_parts);
-      ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled,
-          filterHook, parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, 
ret);
-    } catch (Exception e) {
-      ex = e;
-      rethrowException(e);
-    } finally {
-      endFunction("get_partitions_names_ps", ret != null, ex, tbl_name);
-    }
-    return ret;
-  }
-
   @Override
   public GetPartitionNamesPsResponse 
get_partition_names_ps_req(GetPartitionNamesPsRequest req)
       throws MetaException, NoSuchObjectException, TException {
-    String dbName = MetaStoreUtils.prependCatalogToDbName(req.getCatName(), 
req.getDbName(), conf);
-    List<String> names = get_partition_names_ps_core(dbName, req.getTblName(), 
req.getPartValues(),
-        req.getMaxParts());
+    if (req.getPartValues() == null) {
+      throw new MetaException("The partValues in GetPartitionNamesPsRequest is 
null");
+    }
+    String catName = req.isSetCatName() ? req.getCatName() : 
getDefaultCatalog(conf);
+    String dbName = req.getDbName(), tblName = req.getTblName();
+    TableName tableName = new TableName(catName, dbName, tblName);
+    GetPartitionsPsWithAuthRequest gpar = new 
GetPartitionsPsWithAuthRequest(tableName.getDb(), tableName.getTable());
+    gpar.setMaxParts(req.getMaxParts());
+    gpar.setPartVals(req.getPartValues());
+    List<String> names = GetPartitionsHandler.getPartitionNames(
+        t -> startTableFunction("get_partition_names_ps_req", catName, dbName, 
tblName),
+        rex -> endFunction("get_partition_names_ps_req",
+            rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), 
tableName.toString()),
+        this, tableName, gpar).result();
     GetPartitionNamesPsResponse res = new GetPartitionNamesPsResponse();
     res.setNames(names);
     return res;
@@ -4747,24 +4348,12 @@ public List<String> 
get_partition_names_req(PartitionsByExprRequest req)
       throws MetaException, NoSuchObjectException, TException {
     String catName = req.isSetCatName() ? req.getCatName() : 
getDefaultCatalog(conf);
     String dbName = req.getDbName(), tblName = req.getTblName();
-    startTableFunction("get_partition_names_req", catName,
-        dbName, tblName);
-    fireReadTablePreEvent(catName, dbName, tblName);
-    List<String> ret = null;
-    Exception ex = null;
-    try {
-      authorizeTableForPartitionMetadata(catName, dbName, tblName);
-      ret = getMS().listPartitionNames(catName, dbName, tblName,
-          req.getDefaultPartitionName(), req.getExpr(), req.getOrder(), 
req.getMaxParts());
-      ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled,
-          filterHook, catName, dbName, tblName, ret);
-    } catch (Exception e) {
-      ex = e;
-      rethrowException(e);
-    } finally {
-      endFunction("get_partition_names_req", ret != null, ex, tblName);
-    }
-    return ret;
+    TableName tableName = new TableName(catName, dbName, tblName);
+    return GetPartitionsHandler.getPartitionNames(
+        t -> startTableFunction("get_partition_names_req", catName, dbName, 
tblName),
+        rex -> endFunction("get_partition_names_req",
+            rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), 
tableName.toString()),
+        this, tableName, req).result();
   }
 
   @Override
@@ -5108,50 +4697,22 @@ public List<Partition> get_partitions_by_filter(final 
String dbName, final Strin
                                                   final String filter, final 
short maxParts)
       throws TException {
     String[] parsedDbName = parseDbName(dbName, conf);
-    return get_partitions_by_filter_internal(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tblName,
-        new 
GetPartitionsArgs.GetPartitionsArgsBuilder().filter(filter).max(maxParts).build());
-  }
-
-  private List<Partition> get_partitions_by_filter_internal(final String 
catName,
-      final String dbName, final String tblName, GetPartitionsArgs args) 
throws TException {
-    startTableFunction("get_partitions_by_filter", catName, dbName,
-        tblName);
-    fireReadTablePreEvent(catName, dbName, tblName);
-    List<Partition> ret = null;
-    Exception ex = null;
-    RawStore rs = getMS();
-    try {
-      authorizeTableForPartitionMetadata(catName, dbName, tblName);
-      if (exceedsPartitionFetchLimit(args.getMax())) {
-        // Since partition limit is configured, we need fetch at most (limit + 
1) partition names
-        int max = MetastoreConf.getIntVar(conf, 
ConfVars.LIMIT_PARTITION_REQUEST) + 1;
-        args = new 
GetPartitionsArgs.GetPartitionsArgsBuilder(args).max(max).build();
-        List<String> partNames = rs.listPartitionNamesByFilter(catName, 
dbName, tblName, args);
-        checkLimitNumberOfPartitions(tblName, partNames.size());
-        ret = rs.getPartitionsByNames(catName, dbName, tblName,
-            new 
GetPartitionsArgs.GetPartitionsArgsBuilder(args).partNames(partNames).build());
-      } else {
-        ret = rs.getPartitionsByFilter(catName, dbName, tblName, args);
-      }
-
-      ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, 
filterHook, ret);
-    } catch (Exception e) {
-      ex = e;
-      rethrowException(e);
-    } finally {
-      endFunction("get_partitions_by_filter", ret != null, ex, tblName);
-    }
-    return ret;
+    GetPartitionsByFilterRequest gfr =
+        new GetPartitionsByFilterRequest(parsedDbName[DB_NAME], tblName, 
filter);
+    gfr.setMaxParts(maxParts);
+    gfr.setCatName(parsedDbName[CAT_NAME]);
+    return get_partitions_by_filter_req(gfr);
   }
 
+  @Override
   public List<Partition> 
get_partitions_by_filter_req(GetPartitionsByFilterRequest req) throws 
TException {
-    return get_partitions_by_filter_internal(req.getCatName(), 
req.getDbName(), req.getTblName(),
-        new GetPartitionsArgs.GetPartitionsArgsBuilder()
-            .filter(req.getFilter()).max(req.getMaxParts())
-            .skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition())
-            .excludeParamKeyPattern(req.getExcludeParamKeyPattern())
-            .includeParamKeyPattern(req.getIncludeParamKeyPattern())
-            .build());
+    String catName = req.isSetCatName() ? req.getCatName() : 
getDefaultCatalog(conf);
+    TableName tableName = new TableName(catName, req.getDbName(), 
req.getTblName());
+    return GetPartitionsHandler.getPartitionsResult(
+        t -> startTableFunction("get_partitions_by_filter", catName, 
t.getDb(), t.getTable()),
+        rex -> endFunction("get_partitions_by_filter",
+            rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), 
tableName.toString()),
+        this, tableName, req).result();
   }
 
   @Override
@@ -5197,32 +4758,20 @@ public PartitionsSpecByExprResult 
get_partitions_spec_by_expr(
       PartitionsByExprRequest req) throws TException {
     String dbName = req.getDbName(), tblName = req.getTblName();
     String catName = req.isSetCatName() ? req.getCatName() : 
getDefaultCatalog(conf);
-    startTableFunction("get_partitions_spec_by_expr", catName, dbName, 
tblName);
-    fireReadTablePreEvent(catName, dbName, tblName);
-    PartitionsSpecByExprResult ret = null;
-    Exception ex = null;
-    try {
-      PartitionsByExprResult result = get_partitions_by_expr_internal(catName, 
dbName, tblName,
-          new GetPartitionsArgs.GetPartitionsArgsBuilder()
-              
.expr(req.getExpr()).max(req.getMaxParts()).defaultPartName(req.getDefaultPartitionName())
-              
.skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition())
-              .includeParamKeyPattern(req.getIncludeParamKeyPattern())
-              .excludeParamKeyPattern(req.getExcludeParamKeyPattern())
-              .build());
-
-      GetTableRequest getTableRequest = new GetTableRequest(dbName, tblName);
-      getTableRequest.setCatName(catName);
-      Table table = get_table_core(getTableRequest);
-      List<PartitionSpec> partitionSpecs =
-          
MetaStoreServerUtils.getPartitionspecsGroupedByStorageDescriptor(table, 
result.getPartitions());
-      ret = new PartitionsSpecByExprResult(partitionSpecs, 
result.isHasUnknownPartitions());
-    } catch (Exception e) {
-      ex = e;
-      rethrowException(e);
-    } finally {
-      endFunction("get_partitions_spec_by_expr", ret != null, ex, tblName);
-    }
-    return ret;
+    TableName tableName = new TableName(catName, dbName, tblName);
+    GetPartitionsHandler.GetPartitionsResult<Partition> result =
+        GetPartitionsHandler.getPartitionsResult(
+            t -> startTableFunction("get_partitions_spec_by_expr", catName, 
dbName, req.getTblName()),
+            rex ->  endFunction("get_partitions_spec_by_expr",
+                rex.getLeft() != null && rex.getLeft().success(), 
rex.getRight(),
+                TableName.getQualified(catName, req.getDbName(), 
req.getTblName())),
+            this, tableName, req);
+    GetTableRequest getTableRequest = new GetTableRequest(dbName, tblName);
+    getTableRequest.setCatName(catName);
+    Table table = get_table_core(getTableRequest);
+    List<PartitionSpec> partitionSpecs =
+        
MetaStoreServerUtils.getPartitionspecsGroupedByStorageDescriptor(table, 
result.result());
+    return new PartitionsSpecByExprResult(partitionSpecs, 
result.hasUnknownPartitions());
   }
 
   @Override
@@ -5230,46 +4779,15 @@ public PartitionsByExprResult get_partitions_by_expr(
       PartitionsByExprRequest req) throws TException {
     String dbName = req.getDbName(), tblName = req.getTblName();
     String catName = req.isSetCatName() ? req.getCatName() : 
getDefaultCatalog(conf);
-    String expr = req.isSetExpr() ? Arrays.toString((req.getExpr())) : "";
-    String defaultPartitionName = req.isSetDefaultPartitionName() ? 
req.getDefaultPartitionName() : "";
-    int maxParts = req.getMaxParts();
-    startPartitionFunction("get_partitions_by_expr", catName, dbName, tblName, 
maxParts, expr, defaultPartitionName);
-    fireReadTablePreEvent(catName, dbName, tblName);
-    PartitionsByExprResult ret = null;
-    Exception ex = null;
-    try {
-      ret = get_partitions_by_expr_internal(catName, dbName, tblName,
-          new GetPartitionsArgs.GetPartitionsArgsBuilder()
-              
.expr(req.getExpr()).defaultPartName(req.getDefaultPartitionName()).max(req.getMaxParts())
-              
.skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition())
-              .excludeParamKeyPattern(req.getExcludeParamKeyPattern())
-              .includeParamKeyPattern(req.getIncludeParamKeyPattern())
-              .build());
-    } catch (Exception e) {
-      ex = e;
-      rethrowException(e);
-    } finally {
-      endFunction("get_partitions_by_expr", ret != null, ex, tblName);
-    }
-    return ret;
-  }
-
-  private PartitionsByExprResult get_partitions_by_expr_internal(
-      String catName, String dbName, String tblName, GetPartitionsArgs args) 
throws TException {
-    List<Partition> partitions = new LinkedList<>();
-    boolean hasUnknownPartitions = false;
-    RawStore rs = getMS();
-    if (exceedsPartitionFetchLimit(args.getMax())) {
-      // Since partition limit is configured, we need fetch at most (limit + 
1) partition names
-      int max = MetastoreConf.getIntVar(conf, 
ConfVars.LIMIT_PARTITION_REQUEST) + 1;
-      List<String> partNames = rs.listPartitionNames(catName, dbName, tblName, 
args.getDefaultPartName(), args.getExpr(), null, max);
-      checkLimitNumberOfPartitions(tblName, partNames.size());
-      partitions = rs.getPartitionsByNames(catName, dbName, tblName,
-          new 
GetPartitionsArgs.GetPartitionsArgsBuilder(args).partNames(partNames).build());
-    } else {
-      hasUnknownPartitions = rs.getPartitionsByExpr(catName, dbName, tblName, 
partitions, args);
-    }
-    return new PartitionsByExprResult(partitions, hasUnknownPartitions);
+    TableName tableName = new TableName(catName, dbName, tblName);
+    GetPartitionsHandler.GetPartitionsResult<Partition> result =
+        GetPartitionsHandler.getPartitionsResult(
+            t -> startTableFunction("get_partitions_by_expr", catName, dbName, 
req.getTblName()),
+            rex ->  endFunction("get_partitions_by_expr",
+                rex.getLeft() != null && rex.getLeft().success(), 
rex.getRight(),
+                TableName.getQualified(catName, req.getDbName(), 
req.getTblName())),
+            this, tableName, req);
+    return new PartitionsByExprResult(result.result(), 
result.hasUnknownPartitions());
   }
 
   @Override
@@ -5299,116 +4817,33 @@ public int get_num_partitions_by_filter(final String 
dbName,
     return ret;
   }
 
-  private int getNumPartitionsByPs(final String catName, final String dbName,
-                                   final String tblName, List<String> partVals)
-          throws TException {
-    String[] parsedDbName = parseDbName(dbName, conf);
-    startTableFunction("getNumPartitionsByPs", parsedDbName[CAT_NAME],
-            parsedDbName[DB_NAME], tblName);
-
-    int ret = -1;
-    Exception ex = null;
-    try {
-      ret = getMS().getNumPartitionsByPs(catName, dbName, tblName, partVals);
-    } catch (Exception e) {
-      ex = e;
-      rethrowException(e);
-    } finally {
-      endFunction("getNumPartitionsByPs", ret != -1, ex, tblName);
-    }
-    return ret;
-  }
-
   @Override
   @Deprecated
   public List<Partition> get_partitions_by_names(final String dbName, final 
String tblName,
                                                  final List<String> partNames)
       throws TException {
-    return get_partitions_by_names(dbName, tblName, false, null, null, null,
-        new 
GetPartitionsArgs.GetPartitionsArgsBuilder().partNames(partNames).build());
+    if (partNames == null) {
+      throw new MetaException("The partNames is null");
+    }
+    GetPartitionsByNamesRequest request = new 
GetPartitionsByNamesRequest(dbName, tblName);
+    request.setNames(partNames);
+    return get_partitions_by_names_req(request).getPartitions();
   }
 
   @Override
   public GetPartitionsByNamesResult 
get_partitions_by_names_req(GetPartitionsByNamesRequest gpbnr)
       throws TException {
-    List<Partition> partitions = get_partitions_by_names(gpbnr.getDb_name(),
-        gpbnr.getTbl_name(),
-        gpbnr.isSetGet_col_stats() && gpbnr.isGet_col_stats(), 
gpbnr.getEngine(),
-        gpbnr.getProcessorCapabilities(), gpbnr.getProcessorIdentifier(),
-        new GetPartitionsArgs.GetPartitionsArgsBuilder()
-            
.partNames(gpbnr.getNames()).skipColumnSchemaForPartition(gpbnr.isSkipColumnSchemaForPartition())
-            .excludeParamKeyPattern(gpbnr.getExcludeParamKeyPattern())
-            .includeParamKeyPattern(gpbnr.getIncludeParamKeyPattern())
-            .build());
-    GetPartitionsByNamesResult result = new 
GetPartitionsByNamesResult(partitions);
-    return result;
-  }
-
-  private List<Partition> get_partitions_by_names(final String dbName, final 
String tblName,
-      boolean getColStats, String engine,
-      List<String> processorCapabilities, String processorId,
-      GetPartitionsArgs args) throws TException {
-
-    String[] dbNameParts = parseDbName(dbName, conf);
-    String parsedCatName = dbNameParts[CAT_NAME];
-    String parsedDbName = dbNameParts[DB_NAME];
-    List<Partition> ret = null;
-    Table table = null;
-    Exception ex = null;
-    boolean success = false;
-    startTableFunction("get_partitions_by_names", parsedCatName, parsedDbName, 
tblName);
-    try {
-      getMS().openTransaction();
-      authorizeTableForPartitionMetadata(parsedCatName, parsedDbName, tblName);
-
-      fireReadTablePreEvent(parsedCatName, parsedDbName, tblName);
-
-      checkLimitNumberOfPartitions(tblName, args.getPartNames().size());
-      ret = getMS().getPartitionsByNames(parsedCatName, parsedDbName, tblName, 
args);
-      ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, 
filterHook, ret);
-      table = getTable(parsedCatName, parsedDbName, tblName);
-
-      // If requested add column statistics in each of the partition objects
-      if (getColStats) {
-        // Since each partition may have stats collected for different set of 
columns, we
-        // request them separately.
-        for (Partition part: ret) {
-          String partName = Warehouse.makePartName(table.getPartitionKeys(), 
part.getValues());
-          List<ColumnStatistics> partColStatsList =
-              getMS().getPartitionColumnStatistics(parsedCatName, 
parsedDbName, tblName,
-                  Collections.singletonList(partName),
-                  StatsSetupConst.getColumnsHavingStats(part.getParameters()),
-                  engine);
-          if (partColStatsList != null && !partColStatsList.isEmpty()) {
-            ColumnStatistics partColStats = partColStatsList.get(0);
-            if (partColStats != null) {
-              part.setColStats(partColStats);
-            }
-          }
-        }
-      }
-
-      if (processorCapabilities == null || processorCapabilities.size() == 0 ||
-          processorCapabilities.contains("MANAGERAWMETADATA")) {
-        LOG.info("Skipping translation for processor with " + processorId);
-      } else {
-        if (transformer != null) {
-          ret = transformer.transformPartitions(ret, table, 
processorCapabilities, processorId);
-        }
-      }
-      success = getMS().commitTransaction();
-    } catch (Exception e) {
-      ex = e;
-      throw handleException(e)
-          .throwIfInstance(MetaException.class, NoSuchObjectException.class, 
InvalidObjectException.class)
-          .defaultMetaException();
-    } finally {
-      if (!success) {
-        getMS().rollbackTransaction();
-      }
-      endFunction("get_partitions_by_names", ret != null, ex, tblName);
+    if (gpbnr.getNames() == null) {
+      throw new MetaException("The names in GetPartitionsByNamesRequest is 
null");
     }
-    return ret;
+    String[] dbNameParts = parseDbName(gpbnr.getDb_name(), conf);
+    TableName tableName = new TableName(dbNameParts[CAT_NAME], 
dbNameParts[DB_NAME], gpbnr.getTbl_name());
+    List<Partition> partitions = GetPartitionsHandler.getPartitionsResult(
+        t ->  startTableFunction("get_partitions_by_names", 
tableName.getCat(), tableName.getDb(), tableName.getTable()),
+        rex ->  endFunction("get_partitions_by_names",
+            rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), 
tableName.toString()),
+        this, tableName, gpbnr).result();
+    return new GetPartitionsByNamesResult(partitions);
   }
 
   /**
@@ -7086,22 +6521,6 @@ public boolean 
set_aggr_stats_for(SetPartitionsStatsRequest req) throws TExcepti
     }
   }
 
-  private Table getTable(String catName, String dbName, String tableName)
-      throws MetaException, InvalidObjectException {
-    return getTable(catName, dbName, tableName, null);
-  }
-
-  private Table getTable(String catName, String dbName, String tableName,
-                         String writeIdList)
-      throws MetaException, InvalidObjectException {
-    Table t = getMS().getTable(catName, dbName, tableName, writeIdList);
-    if (t == null) {
-      throw new InvalidObjectException(TableName.getQualified(catName, dbName, 
tableName)
-          + " table not found");
-    }
-    return t;
-  }
-
   @Override
   public NotificationEventResponse 
get_next_notification(NotificationEventRequest rqst)
       throws TException {
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
index 3d8c21f0755..7538ce896bf 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
@@ -110,4 +110,6 @@ DataConnector get_dataconnector_core(final String name)
   AbortCompactResponse abort_Compactions(AbortCompactionRequest rqst) throws 
TException;
 
   IMetaStoreMetadataTransformer getMetadataTransformer();
+
+  MetaStoreFilterHook getMetaFilterHook();
 }
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/GetPartitionsArgs.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/GetPartitionsArgs.java
index 627e10ade3f..ed978d4a4ed 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/GetPartitionsArgs.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/GetPartitionsArgs.java
@@ -19,6 +19,13 @@
 
 import java.util.List;
 
+import org.apache.hadoop.hive.metastore.api.GetPartitionRequest;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsByFilterRequest;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthRequest;
+import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest;
+import org.apache.hadoop.hive.metastore.api.PartitionsRequest;
+
 public class GetPartitionsArgs {
   private String filter;
   private byte[] expr;
@@ -183,6 +190,51 @@ public GetPartitionsArgs build() {
     }
   }
 
+  public static GetPartitionsArgs from(GetPartitionsByNamesRequest gpbnr) {
+    return new GetPartitionsArgsBuilder().partNames(gpbnr.getNames())
+        .skipColumnSchemaForPartition(gpbnr.isSkipColumnSchemaForPartition())
+        .excludeParamKeyPattern(gpbnr.getExcludeParamKeyPattern())
+        .includeParamKeyPattern(gpbnr.getIncludeParamKeyPattern()).build();
+  }
+
+  public static GetPartitionsArgs from(GetPartitionsPsWithAuthRequest req) {
+    return new GetPartitionsArgsBuilder()
+        .part_vals(req.getPartVals()).max(req.getMaxParts())
+        .userName(req.getUserName()).groupNames(req.getGroupNames())
+        .skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition())
+        .includeParamKeyPattern(req.getIncludeParamKeyPattern())
+        .excludeParamKeyPattern(req.getExcludeParamKeyPattern())
+        .partNames(req.getPartNames()).build();
+  }
+
+  public static GetPartitionsArgs from(GetPartitionsByFilterRequest req) {
+    return new GetPartitionsArgsBuilder()
+        .filter(req.getFilter()).max(req.getMaxParts())
+        .skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition())
+        .excludeParamKeyPattern(req.getExcludeParamKeyPattern())
+        .includeParamKeyPattern(req.getIncludeParamKeyPattern()).build();
+  }
+
+  public static GetPartitionsArgs from(PartitionsByExprRequest req) {
+    return new GetPartitionsArgsBuilder()
+        
.expr(req.getExpr()).defaultPartName(req.getDefaultPartitionName()).max(req.getMaxParts())
+        .skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition())
+        .excludeParamKeyPattern(req.getExcludeParamKeyPattern())
+        .includeParamKeyPattern(req.getIncludeParamKeyPattern()).build();
+  }
+
+  public static GetPartitionsArgs from(PartitionsRequest req) {
+    return new GetPartitionsArgsBuilder()
+        .includeParamKeyPattern(req.getIncludeParamKeyPattern())
+        .excludeParamKeyPattern(req.getExcludeParamKeyPattern())
+        .skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition())
+        .max(req.getMaxParts()).build();
+  }
+
+  public static GetPartitionsArgs from(GetPartitionRequest req) {
+    return new GetPartitionsArgsBuilder().part_vals(req.getPartVals()).build();
+  }
+
   public static GetPartitionsArgs getAllPartitions() {
     return new GetPartitionsArgsBuilder().max(-1).build();
   }
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/AbstractRequestHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/AbstractRequestHandler.java
index d8d7316e876..1731166fa4d 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/AbstractRequestHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/AbstractRequestHandler.java
@@ -58,6 +58,7 @@
 import static 
org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.HIVE_IN_TEST;
 import static org.apache.hadoop.hive.metastore.utils.JavaUtils.newInstance;
 
+@SuppressWarnings("rawtypes")
 public abstract class AbstractRequestHandler<T extends TBase, A extends 
AbstractRequestHandler.Result> {
   private static final Logger LOG = 
LoggerFactory.getLogger(AbstractRequestHandler.class);
   private static final Map<String, AbstractRequestHandler> ID_TO_HANDLER = new 
ConcurrentHashMap<>();
@@ -185,10 +186,6 @@ public RequestStatus getRequestStatus() throws TException {
             protected A execute() throws TException, IOException {
               throw new UnsupportedOperationException();
             }
-            @Override
-            public String getMessagePrefix() {
-              throw new UnsupportedOperationException();
-            }
           };
         }
       }
@@ -213,7 +210,7 @@ public static <T extends AbstractRequestHandler> T 
offer(IHMSHandler handler, TB
   }
 
   public RequestStatus getRequestStatus() throws TException {
-    String logMsgPrefix = getMessagePrefix();
+    String logMsgPrefix = toString();
     if (future == null) {
       throw new IllegalStateException(logMsgPrefix + " hasn't started yet");
     }
@@ -271,7 +268,7 @@ public void cancelRequest() {
     if (!future.isDone()) {
       future.cancel(true);
       aborted.set(true);
-      LOG.warn("{} is still running, but a close signal is sent out", 
getMessagePrefix());
+      LOG.warn("{} is still running, but a close signal is sent out", this);
     }
     executor.shutdown();
   }
@@ -287,7 +284,7 @@ public final A getResult() throws TException {
     RequestStatus resp = getRequestStatus();
     if (!resp.finished) {
       throw new IllegalStateException("Result is un-available as " +
-          getMessagePrefix() + " is still running");
+          this + " is still running");
     }
     return (A) result;
   }
@@ -318,13 +315,6 @@ protected void afterExecute(A result) throws TException, 
IOException {
     request = null;
   }
 
-  /**
-   * Get the prefix for logging the message on polling the handler's status.
-   *
-   * @return message prefix
-   */
-  protected abstract String getMessagePrefix();
-
   /**
    * Get the handler's progress that will show at the client.
    *
@@ -350,7 +340,7 @@ private String getMetricAlias() {
 
   public void checkInterrupted() throws MetaException {
     if (aborted.get()) {
-      throw new MetaException(getMessagePrefix() + " has been interrupted");
+      throw new MetaException(this + " has been interrupted");
     }
   }
 
@@ -380,6 +370,11 @@ default Result shrinkIfNecessary() {
     }
   }
 
+  @Override
+  public String toString() {
+    return getClass().getSimpleName() + " [" + id + "]";
+  }
+
   private static boolean validateHandler(Class<? extends 
AbstractRequestHandler> clz) {
     if (Modifier.isAbstract(clz.getModifiers())) {
       return false;
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/AddPartitionsHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/AddPartitionsHandler.java
index 45355a36bb3..db7c03cca55 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/AddPartitionsHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/AddPartitionsHandler.java
@@ -543,7 +543,7 @@ private boolean createLocationForAddedPartition(
   }
 
   @Override
-  protected String getMessagePrefix() {
+  public String toString() {
     return "AddPartitionsHandler [" + id + "] -  Add partitions for " + 
tableName + ":";
   }
 
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/AppendPartitionHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/AppendPartitionHandler.java
new file mode 100644
index 00000000000..dd26a3ded60
--- /dev/null
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/AppendPartitionHandler.java
@@ -0,0 +1,190 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.handler;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.TableName;
+import org.apache.hadoop.hive.metastore.HMSHandler;
+import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.hive.metastore.MetaStoreListenerNotifier;
+import org.apache.hadoop.hive.metastore.RawStore;
+import org.apache.hadoop.hive.metastore.ReplChangeManager;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.AppendPartitionsRequest;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent;
+import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+import org.apache.thrift.TException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.hadoop.hive.metastore.HMSHandler.getPartValsFromName;
+import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.canUpdateStats;
+import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.updatePartitionStatsFast;
+import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.validatePartitionNameCharacters;
+import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+import static 
org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier;
+
+@RequestHandler(requestBody = AppendPartitionsRequest.class)
+public class AppendPartitionHandler
+    extends AbstractRequestHandler<AppendPartitionsRequest, 
AppendPartitionHandler.AppendPartitionResult> {
+  private static final Logger LOG = 
LoggerFactory.getLogger(AppendPartitionHandler.class);
+  private RawStore ms;
+  private String catName;
+  private String dbName;
+  private String tableName;
+  private List<String> partVals;
+  private Table tbl;
+  private Warehouse wh;
+
+  AppendPartitionHandler(IHMSHandler handler, AppendPartitionsRequest request) 
{
+    super(handler, false, request);
+  }
+
+  @Override
+  protected void beforeExecute() throws TException, IOException {
+    List<String> part_vals = request.getPartVals();
+    dbName = normalizeIdentifier(request.getDbName());
+    catName = normalizeIdentifier(request.isSetCatalogName() ?
+        request.getCatalogName() : getDefaultCatalog(handler.getConf()));
+    tableName = normalizeIdentifier(request.getTableName());
+    String partName = request.getName();
+    if (partName == null && (part_vals == null || part_vals.isEmpty())) {
+      throw new MetaException("The partition values must not be null or 
empty.");
+    }
+
+    ms = handler.getMS();
+    wh = handler.getWh();
+    tbl = ms.getTable(catName, dbName, tableName,  null);
+    if (tbl == null) {
+      throw new InvalidObjectException(dbName + "." + tableName + " table not 
found");
+    }
+    if (tbl.getSd().getLocation() == null) {
+      throw new MetaException("Cannot append a partition to a view");
+    }
+    if (part_vals == null || part_vals.isEmpty()) {
+      // partition name is set, get partition vals and then append partition
+      part_vals = getPartValsFromName(tbl, partName);
+    }
+    this.partVals = part_vals;
+    Partition old_part;
+    try {
+      old_part = ms.getPartition(catName, dbName, tableName, partVals);
+    } catch (NoSuchObjectException e) {
+      // this means there is no existing partition
+      old_part = null;
+    }
+    if (old_part != null) {
+      throw new AlreadyExistsException("Partition already exists:" + 
part_vals);
+    }
+    LOG.debug("Append partition: {}", part_vals);
+    validatePartitionNameCharacters(partVals, handler.getConf());
+  }
+
+  @Override
+  protected AppendPartitionResult execute() throws TException, IOException {
+    Partition part = new Partition();
+    part.setCatName(catName);
+    part.setDbName(dbName);
+    part.setTableName(tableName);
+    part.setValues(partVals);
+
+    boolean success = false, madeDir = false;
+    Path partLocation = null;
+    Map<String, String> transactionalListenerResponses = 
Collections.emptyMap();
+    Database db = null;
+    try {
+      ms.openTransaction();
+      db = handler.get_database_core(catName, dbName);
+      ((HMSHandler) handler).firePreEvent(new PreAddPartitionEvent(tbl, part, 
handler));
+
+      part.setSd(tbl.getSd().deepCopy());
+      partLocation = new Path(tbl.getSd().getLocation(), Warehouse
+          .makePartName(tbl.getPartitionKeys(), partVals));
+      part.getSd().setLocation(partLocation.toString());
+
+      if (!wh.isDir(partLocation)) {
+        if (!wh.mkdirs(partLocation)) {
+          throw new MetaException(partLocation
+              + " is not a directory or unable to create one");
+        }
+        madeDir = true;
+      }
+
+      // set create time
+      long time = System.currentTimeMillis() / 1000;
+      part.setCreateTime((int) time);
+      part.putToParameters(hive_metastoreConstants.DDL_TIME, 
Long.toString(time));
+      if (canUpdateStats(handler.getConf(), tbl)) {
+        updatePartitionStatsFast(part, tbl, wh, madeDir, false, 
request.getEnvironmentContext(), true);
+      }
+
+      if (ms.addPartition(part)) {
+        if (!handler.getTransactionalListeners().isEmpty()) {
+          transactionalListenerResponses =
+              
MetaStoreListenerNotifier.notifyEvent(handler.getTransactionalListeners(),
+                  EventMessage.EventType.ADD_PARTITION,
+                  new AddPartitionEvent(tbl, part, true, handler),
+                  request.getEnvironmentContext());
+        }
+
+        success = ms.commitTransaction();
+      }
+    } finally {
+      if (!success) {
+        ms.rollbackTransaction();
+        if (madeDir) {
+          wh.deleteDir(partLocation, false, 
ReplChangeManager.shouldEnableCm(db, tbl));
+        }
+      }
+
+      if (!handler.getListeners().isEmpty()) {
+        MetaStoreListenerNotifier.notifyEvent(handler.getListeners(),
+            EventMessage.EventType.ADD_PARTITION,
+            new AddPartitionEvent(tbl, part, success, handler),
+            request.getEnvironmentContext(),
+            transactionalListenerResponses, ms);
+      }
+    }
+    return new AppendPartitionResult(part, success);
+  }
+
+  @Override
+  public String toString() {
+    return "AppendPartitionHandler [" + id + "] -  Append partition for " +
+        TableName.getQualified(catName, dbName, tableName) + ":";
+  }
+
+  public record AppendPartitionResult(Partition partition, boolean success) 
implements Result {
+
+  }
+}
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/CreateDatabaseHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/CreateDatabaseHandler.java
index 1632090192c..523cd335bb2 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/CreateDatabaseHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/CreateDatabaseHandler.java
@@ -260,7 +260,7 @@ protected void afterExecute(CreateDatabaseResult result) 
throws TException, IOEx
   }
 
   @Override
-  protected String getMessagePrefix() {
+  public String toString() {
     return "CreateDatabaseHandler [" + id + "] -  Create database " + name + 
":";
   }
 
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/CreateTableHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/CreateTableHandler.java
index c0eb701c55f..cb66e9c0b2c 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/CreateTableHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/CreateTableHandler.java
@@ -386,7 +386,7 @@ protected void beforeExecute() throws TException, 
IOException {
   }
 
   @Override
-  protected String getMessagePrefix() {
+  public String toString() {
     return "CreateTableHandler [" + id + "] -  create table for " +
         TableName.getQualified(tbl.getCatName(), tbl.getDbName(), 
tbl.getTableName()) + ":";
   }
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/DropDatabaseHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/DropDatabaseHandler.java
index fd632470b42..1a980512160 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/DropDatabaseHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/DropDatabaseHandler.java
@@ -344,14 +344,14 @@ private List<Table> sortTablesToDrop() {
   }
 
   @Override
-  protected String getMessagePrefix() {
+  public String toString() {
     return "DropDatabaseHandler [" + id + "] -  Drop database " + name + ":";
   }
 
   @Override
   protected String getRequestProgress() {
     if (progress == null) {
-      return getMessagePrefix() + " hasn't started yet";
+      return this + " hasn't started yet";
     }
     return progress.get();
   }
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/DropPartitionsHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/DropPartitionsHandler.java
index e2ac7088866..5d96539831c 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/DropPartitionsHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/DropPartitionsHandler.java
@@ -286,7 +286,7 @@ public int compareTo(PathAndDepth o) {
   }
 
   @Override
-  protected String getMessagePrefix() {
+  public String toString() {
     return "DropPartitionsHandler [" + id + "] -  Drop partitions from " + 
tableName + ":";
   }
 
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/DropTableHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/DropTableHandler.java
index 332ae04179b..bc78b0e6d06 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/DropTableHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/DropTableHandler.java
@@ -176,14 +176,14 @@ public void beforeExecute() throws TException, 
IOException {
   }
 
   @Override
-  public String getMessagePrefix() {
+  public String toString() {
     return "DropTableHandler [" + id + "] -  Drop table " + tableName + ":";
   }
 
   @Override
   public String getRequestProgress() {
     if (progress == null) {
-      return getMessagePrefix() + " hasn't started yet";
+      return this + " hasn't started yet";
     }
     return progress.get();
   }
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java
new file mode 100644
index 00000000000..2dcb768e83e
--- /dev/null
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java
@@ -0,0 +1,490 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.handler;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.function.Consumer;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.common.TableName;
+import org.apache.hadoop.hive.metastore.HMSHandler;
+import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.hive.metastore.MetaStoreFilterHook;
+import org.apache.hadoop.hive.metastore.RawStore;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsByFilterRequest;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthRequest;
+import org.apache.hadoop.hive.metastore.api.GetTableRequest;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesRequest;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
+import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest;
+import org.apache.hadoop.hive.metastore.api.PartitionsRequest;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.client.builder.GetPartitionsArgs;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.events.PreReadTableEvent;
+import org.apache.hadoop.hive.metastore.utils.FilterUtils;
+import org.apache.thrift.TException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static 
org.apache.hadoop.hive.metastore.ExceptionHandler.handleException;
+import static 
org.apache.hadoop.hive.metastore.HMSHandler.PARTITION_NUMBER_EXCEED_LIMIT_MSG;
+import static 
org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier;
+
+// Collect get partitions APIs together
+@SuppressWarnings({"unchecked", "rawtypes"})
+@RequestHandler(requestBody = GetPartitionsHandler.GetPartitionsRequest.class)
+public class GetPartitionsHandler<Req, T> extends 
AbstractRequestHandler<GetPartitionsHandler.GetPartitionsRequest<Req>,
+    GetPartitionsHandler.GetPartitionsResult<T>> {
+  private static final Logger LOG = 
LoggerFactory.getLogger(GetPartitionsHandler.class);
+  private static final String NO_FILTER_STRING = "";
+  private RawStore rs;
+  private String catName;
+  private String dbName;
+  private String tblName;
+  private Table table;
+  private Configuration conf;
+  private MetaStoreFilterHook filterHook;
+  private boolean isServerFilterEnabled;
+
+  GetPartitionsHandler(IHMSHandler handler, GetPartitionsRequest request) {
+    super(handler, false, request);
+  }
+
+  @Override
+  protected void beforeExecute() throws TException, IOException {
+    catName = normalizeIdentifier(request.getTableName().getCat());
+    dbName = normalizeIdentifier(request.getTableName().getDb());
+    tblName = normalizeIdentifier(request.getTableName().getTable());
+    conf = handler.getConf();
+    rs = handler.getMS();
+    filterHook = handler.getMetaFilterHook();
+    isServerFilterEnabled = filterHook != null;
+    GetTableRequest getTableRequest = new GetTableRequest(dbName, tblName);
+    getTableRequest.setCatName(catName);
+    table = handler.get_table_core(getTableRequest);
+    ((HMSHandler) handler).firePreEvent(new PreReadTableEvent(table, handler));
+    authorizeTableForPartitionMetadata();
+
+    LOG.info("Starting to get {} of {}", request.isFetchPartNames() ? 
"partition names" : "partitions",
+        TableName.getQualified(catName, dbName, tblName));
+  }
+
+  @Override
+  protected GetPartitionsResult execute() throws TException, IOException {
+    Req req = request.getReq();
+    if (req instanceof PartitionValuesRequest pvq) {
+      return getPartitionValues(pvq);
+    } else if (req instanceof GetPartitionsByNamesRequest gpbr) {
+      return getPartitionsByNames(gpbr);
+    } else if (req instanceof PartitionsRequest pr) {
+      return getPartitions(pr);
+    } else if (req instanceof GetPartitionsByFilterRequest fpr) {
+      return getPartitionsByFilter(fpr);
+    } else if (req instanceof PartitionsByExprRequest pber) {
+      return getPartitionsByExpr(pber);
+    } else if (req instanceof GetPartitionsPsWithAuthRequest gpar) {
+      return getPartitionsByVals(gpar);
+    }
+    throw new UnsupportedOperationException("Not yet implemented");
+  }
+
+  private GetPartitionsResult 
getPartitionsByVals(GetPartitionsPsWithAuthRequest gpar) throws TException {
+    GetPartitionsArgs args = GetPartitionsArgs.from(gpar);
+    if (request.isFetchPartNames()) {
+      List<String> ret = rs.listPartitionNamesPs(catName, dbName, tblName,
+          args.getPart_vals(), (short) args.getMax());
+      return new GetPartitionsResult<>(ret, true);
+    } else {
+      List<Partition> ret;
+      if (args.getPart_vals() != null) {
+        checkLimitNumberOfPartitionsByPs(args.getPart_vals(), args.getMax());
+      } else {
+        checkLimitNumberOfPartitionsByFilter(NO_FILTER_STRING, args.getMax());
+      }
+      ret = rs.listPartitionsPsWithAuth(catName, dbName, tblName, args);
+      return new GetPartitionsResult(ret, true);
+    }
+  }
+
+  private GetPartitionsResult getPartitionValues(PartitionValuesRequest pvq) 
throws MetaException {
+    PartitionValuesResponse resp = rs.listPartitionValues(catName, dbName, 
tblName, pvq.getPartitionKeys(),
+        pvq.isApplyDistinct(), pvq.getFilter(), pvq.isAscending(),
+        pvq.getPartitionOrder(), pvq.getMaxParts());
+    return new GetPartitionsResult<>(List.of(resp), true);
+  }
+
+  private void checkLimitNumberOfPartitionsByPs(List<String> partVals, int 
requestMax) throws TException {
+    if (exceedsPartitionFetchLimit(requestMax)) {
+      checkLimitNumberOfPartitions(tblName, rs.getNumPartitionsByPs(catName, 
dbName, tblName, partVals));
+    }
+  }
+
+  private GetPartitionsResult<Partition> 
getPartitionsByFilter(GetPartitionsByFilterRequest filterReq) throws TException 
{
+    List<Partition> ret;
+    GetPartitionsArgs args = GetPartitionsArgs.from(filterReq);
+    if (exceedsPartitionFetchLimit(args.getMax())) {
+      // Since partition limit is configured, we need fetch at most (limit + 
1) partition names
+      int max = MetastoreConf.getIntVar(conf, 
MetastoreConf.ConfVars.LIMIT_PARTITION_REQUEST) + 1;
+      args = new 
GetPartitionsArgs.GetPartitionsArgsBuilder(args).max(max).build();
+      List<String> partNames = rs.listPartitionNamesByFilter(catName, dbName, 
tblName, args);
+      checkLimitNumberOfPartitions(tblName, partNames.size());
+      ret = rs.getPartitionsByNames(catName, dbName, tblName,
+          new 
GetPartitionsArgs.GetPartitionsArgsBuilder(args).partNames(partNames).build());
+    } else {
+      ret = rs.getPartitionsByFilter(catName, dbName, tblName, args);
+    }
+
+    return new GetPartitionsResult<>(ret, true);
+  }
+
+  /**
+   * Check if user can access the table associated with the partition. If not, 
then throw exception
+   * so user cannot access partitions associated with this table
+   * @throws NoSuchObjectException
+   * @throws MetaException
+   */
+  private void authorizeTableForPartitionMetadata() throws 
NoSuchObjectException, MetaException {
+    FilterUtils.checkDbAndTableFilters(
+        isServerFilterEnabled, filterHook, catName, dbName, tblName);
+  }
+
+  private GetPartitionsResult getPartitionsByNames(GetPartitionsByNamesRequest 
gpbr) throws TException {
+    List<Partition> ret = null;
+    boolean success = false;
+    rs.openTransaction();
+    try {
+      GetPartitionsArgs args = GetPartitionsArgs.from(gpbr);
+      checkLimitNumberOfPartitions(tblName, args.getPartNames().size());
+      ret = rs.getPartitionsByNames(catName, dbName, tblName, args);
+      ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, 
filterHook, ret);
+
+      // If requested add column statistics in each of the partition objects
+      if (gpbr.isGet_col_stats()) {
+        // Since each partition may have stats collected for different set of 
columns, we
+        // request them separately.
+        for (Partition part: ret) {
+          String partName = Warehouse.makePartName(table.getPartitionKeys(), 
part.getValues());
+          List<ColumnStatistics> partColStatsList =
+              rs.getPartitionColumnStatistics(catName, dbName, tblName,
+                  Collections.singletonList(partName),
+                  StatsSetupConst.getColumnsHavingStats(part.getParameters()),
+                  gpbr.getEngine());
+          if (partColStatsList != null && !partColStatsList.isEmpty()) {
+            ColumnStatistics partColStats = partColStatsList.getFirst();
+            if (partColStats != null) {
+              part.setColStats(partColStats);
+            }
+          }
+        }
+      }
+
+      List<String> processorCapabilities = gpbr.getProcessorCapabilities();
+      if (processorCapabilities == null || processorCapabilities.isEmpty() ||
+          processorCapabilities.contains("MANAGERAWMETADATA")) {
+        LOG.info("Skipping translation for processor with {}", 
gpbr.getProcessorIdentifier());
+      } else {
+        if (handler.getMetadataTransformer() != null) {
+          ret = handler.getMetadataTransformer().transformPartitions(ret, 
table,
+              processorCapabilities, gpbr.getProcessorIdentifier());
+        }
+      }
+      success = rs.commitTransaction();
+    } finally {
+      if (!success) {
+        rs.rollbackTransaction();
+      }
+    }
+    return new GetPartitionsResult<>(ret, success);
+  }
+
+  private GetPartitionsResult getPartitions(PartitionsRequest pr) throws 
TException {
+    GetPartitionsArgs args = GetPartitionsArgs.from(pr);
+    if (request.isFetchPartNames()) {
+      List<String> ret = rs.listPartitionNames(catName, dbName, tblName, 
(short) args.getMax());
+      return new GetPartitionsResult<>(ret, true);
+    } else {
+      List<Partition> ret;
+      checkLimitNumberOfPartitionsByFilter(NO_FILTER_STRING, args.getMax());
+      ret = rs.listPartitionsPsWithAuth(catName, dbName, tblName, args);
+      return new GetPartitionsResult<>(ret, true);
+    }
+  }
+
+  private void checkLimitNumberOfPartitionsByFilter(String filterString, int 
requestMax) throws TException {
+    if (exceedsPartitionFetchLimit(requestMax)) {
+      checkLimitNumberOfPartitions(tblName, 
rs.getNumPartitionsByFilter(catName, dbName, tblName, filterString));
+    }
+  }
+
+  private GetPartitionsResult getPartitionsByExpr(PartitionsByExprRequest 
pber) throws TException {
+    GetPartitionsArgs args = GetPartitionsArgs.from(pber);
+    if (request.isFetchPartNames()) {
+      List<String> ret = rs.listPartitionNames(catName, dbName, tblName,
+          args.getDefaultPartName(), args.getExpr(), pber.getOrder(), 
args.getMax());
+      return new GetPartitionsResult(ret, true);
+    } else {
+      List<Partition> partitions = new LinkedList<>();
+      boolean hasUnknownPartitions = false;
+      if (exceedsPartitionFetchLimit(args.getMax())) {
+        // Since partition limit is configured, we need fetch at most (limit + 
1) partition names
+        int max = MetastoreConf.getIntVar(handler.getConf(), 
MetastoreConf.ConfVars.LIMIT_PARTITION_REQUEST) + 1;
+        List<String> partNames = rs.listPartitionNames(catName, dbName, 
tblName, args.getDefaultPartName(),
+            args.getExpr(), null, max);
+        checkLimitNumberOfPartitions(tblName, partNames.size());
+        partitions = rs.getPartitionsByNames(catName, dbName, tblName,
+            new 
GetPartitionsArgs.GetPartitionsArgsBuilder(args).partNames(partNames).build());
+      } else {
+        hasUnknownPartitions = rs.getPartitionsByExpr(catName, dbName, 
tblName, partitions, args);
+      }
+      GetPartitionsResult result = new GetPartitionsResult<>(partitions, true);
+      result.setHasUnknownPartitions(hasUnknownPartitions);
+      return result;
+    }
+  }
+
+  // Check input count exceeding partition limit iff:
+  //  1. partition limit is enabled.
+  //  2. input count is greater than the limit.
+  private boolean exceedsPartitionFetchLimit(int count) {
+    int partitionLimit = MetastoreConf.getIntVar(conf, 
MetastoreConf.ConfVars.LIMIT_PARTITION_REQUEST);
+    return partitionLimit > -1 && (count < 0 || count > partitionLimit);
+  }
+
+  private void checkLimitNumberOfPartitions(String tblName, int numPartitions) 
throws MetaException {
+    if (exceedsPartitionFetchLimit(numPartitions)) {
+      int partitionLimit = MetastoreConf.getIntVar(conf, 
MetastoreConf.ConfVars.LIMIT_PARTITION_REQUEST);
+      String configName = 
MetastoreConf.ConfVars.LIMIT_PARTITION_REQUEST.toString();
+      throw new MetaException(String.format(PARTITION_NUMBER_EXCEED_LIMIT_MSG, 
numPartitions,
+          tblName, partitionLimit, configName));
+    }
+  }
+
+  @Override
+  protected void afterExecute(GetPartitionsResult<T> result) throws 
TException, IOException {
+    if (result != null && result.success()) {
+      List ret = result.result();
+      if (request.isFetchPartNames()) {
+        ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled,
+            filterHook, catName, dbName, tblName, ret);
+      } else if (!(request.req instanceof PartitionValuesRequest) &&
+          !(request.req instanceof GetPartitionsByNamesRequest)) {
+        // GetPartitionsMethod.NAMES has already selected the result
+        ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, 
filterHook, ret);
+      }
+      result.setResult(ret);
+    }
+  }
+
+  @Override
+  public String toString() {
+    return "GetPartitionsHandler [" + id + "] -  Get partitions from " +
+        TableName.getQualified(catName, dbName, tblName) + ":";
+  }
+
+  public static class GetPartitionsResult<T> implements Result {
+    private List<T> result;
+    private final boolean success;
+    private boolean hasUnknownPartitions;
+
+    public GetPartitionsResult(List<T> getPartsResult, boolean success) {
+      this.result = getPartsResult;
+      this.success = success;
+    }
+
+    public void setHasUnknownPartitions(boolean unknownPartitions) {
+      this.hasUnknownPartitions = unknownPartitions;
+    }
+
+    public void setResult(List<T> result) {
+      this.result = result;
+    }
+
+    public boolean hasUnknownPartitions() {
+      return hasUnknownPartitions;
+    }
+
+    @Override
+    public boolean success() {
+      return success;
+    }
+
+    public List<T> result() {
+      return result;
+    }
+  }
+
+  public static class GetPartitionsRequest<Req> extends TAbstractBase {
+    private final TableName tableName;
+    private final boolean fetchPartNames;
+    private final Req req;
+
+    public GetPartitionsRequest(Req req, TableName tableName,
+        boolean fetchPartNames) {
+      this.tableName = tableName;
+      this.fetchPartNames = fetchPartNames;
+      this.req = req;
+    }
+
+    public GetPartitionsRequest(Req req, TableName tableName) {
+      this(req, tableName, false);
+    }
+
+    public Req getReq() {
+      return req;
+    }
+
+    public TableName getTableName() {
+      return tableName;
+    }
+
+    public boolean isFetchPartNames() {
+      return fetchPartNames;
+    }
+  }
+
+  public static <Req> List<Partition> getPartitions(Consumer<TableName> 
preHook,
+      Consumer<Pair<GetPartitionsResult, Exception>> postHook, IHMSHandler 
handler, TableName tableName,
+      Req req, boolean uniqPartition) throws NoSuchObjectException, 
MetaException {
+    Exception ex = null;
+    GetPartitionsResult result = null;
+    try {
+      GetPartitionsRequest getPartitionsRequest = new 
GetPartitionsRequest(req, tableName);
+      preHook.accept(tableName);
+      GetPartitionsHandler<Req, Partition> getPartsHandler =
+          AbstractRequestHandler.offer(handler, getPartitionsRequest);
+      result = getPartsHandler.getResult();
+      List<Partition> partitions = result.result();
+      if (uniqPartition) {
+        List<FieldSchema> partitionKeys = 
getPartsHandler.table.getPartitionKeys();
+        String requestPartName = null;
+        if (req instanceof GetPartitionsPsWithAuthRequest gpar) {
+          if (gpar.getPartNames() != null && !gpar.getPartNames().isEmpty()) {
+            requestPartName = gpar.getPartNames().getFirst();
+          } else {
+            requestPartName = Warehouse.makePartName(partitionKeys, 
gpar.getPartVals());
+          }
+        } else if (req instanceof GetPartitionsByNamesRequest gbnr) {
+          requestPartName = gbnr.getNames().getFirst();
+        }
+        if (partitions == null || partitions.isEmpty()) {
+          throw new NoSuchObjectException(tableName + " partition: " + 
requestPartName + " not found");
+        } else if (partitions.size() > 1) {
+          throw new MetaException(
+              "Expecting only one partition but more than one partitions are 
found.");
+        } else {
+          // Check ObjectStore getPartitionWithAuth
+          // We need to compare partition name with requested name since some 
DBs
+          // (like MySQL, Derby) considers 'a' = 'a ' whereas others like 
(Postgres,
+          // Oracle) doesn't exhibit this problem.
+          Partition partition = partitions.getFirst();
+          String partName = Warehouse.makePartName(partitionKeys, 
partition.getValues());
+          if (!partName.equals(requestPartName)) {
+            throw new MetaException("Expecting a partition with name " + 
requestPartName
+                + ", but metastore is returning a partition with name " + 
partName + ".");
+          }
+        }
+      }
+      return partitions;
+    } catch (Exception e) {
+      ex = e;
+      // Create a new dummy GetPartitionsResult for postHook to consume
+      result = new GetPartitionsResult(List.of(), false);
+      throw handleException(e).throwIfInstance(NoSuchObjectException.class, 
MetaException.class)
+          .defaultMetaException();
+    } finally {
+      postHook.accept(Pair.of(result, ex));
+    }
+  }
+
+  public static <Req> GetPartitionsResult<Partition> getPartitionsResult(
+      Consumer<TableName> preHook,
+      Consumer<Pair<GetPartitionsResult, Exception>> postHook,
+      IHMSHandler handler, TableName tableName, Req req) throws TException {
+    GetPartitionsResult result = null;
+    Exception ex = null;
+    try {
+      GetPartitionsRequest getPartitionsRequest = new 
GetPartitionsRequest(req, tableName);
+      preHook.accept(tableName);
+      GetPartitionsHandler<Req, Partition> getPartsHandler =
+          AbstractRequestHandler.offer(handler, getPartitionsRequest);
+      result = getPartsHandler.getResult();
+      return result;
+    } catch (Exception e) {
+      ex = e;
+      throw handleException(ex).defaultTException();
+    } finally {
+      postHook.accept(Pair.of(result, ex));
+    }
+  }
+
+  public static <Req> GetPartitionsResult<String> 
getPartitionNames(Consumer<TableName> preExecutor,
+      Consumer<Pair<GetPartitionsResult, Exception>> postConsumer, IHMSHandler 
handler, TableName tableName,
+      Req req) throws TException {
+    Exception ex = null;
+    GetPartitionsResult result = null;
+    try {
+      preExecutor.accept(tableName);
+      GetPartitionsRequest getPartitionsRequest = new 
GetPartitionsRequest(req, tableName, true);
+      GetPartitionsHandler<Req, String> getPartNamesHandler =
+          AbstractRequestHandler.offer(handler, getPartitionsRequest);
+      result = getPartNamesHandler.getResult();
+      return result;
+    } catch (Exception e) {
+      ex = e;
+      throw handleException(ex).defaultTException();
+    } finally {
+      postConsumer.accept(Pair.of(result, ex));
+    }
+  }
+
+  public static String validatePartVals(IHMSHandler handler,
+      TableName tableName, List<String> partVals) throws MetaException, 
NoSuchObjectException {
+    if (partVals == null || partVals.isEmpty()) {
+      throw new MetaException("The partVals is null or empty");
+    }
+    GetTableRequest request = new GetTableRequest(tableName.getDb(), 
tableName.getTable());
+    request.setCatName(tableName.getCat());
+    Table table = handler.get_table_core(request);
+    int size = table.getPartitionKeysSize();
+    if (size != partVals.size()) {
+      throw new MetaException("Unmatched partition values, partition keys 
size: " +
+          size + ", partition values size: " + partVals.size());
+    }
+    return Warehouse.makePartName(table.getPartitionKeys(), partVals);
+  }
+
+  public static PartitionsRequest createPartitionsRequest(TableName tableName, 
int max) {
+    PartitionsRequest pr = new PartitionsRequest(tableName.getDb(), 
tableName.getTable());
+    pr.setCatName(tableName.getCat());
+    pr.setMaxParts((short) max);
+    return pr;
+  }
+}
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/SetAggrStatsHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/SetAggrStatsHandler.java
index ff645e004cd..3b43d689553 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/SetAggrStatsHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/SetAggrStatsHandler.java
@@ -449,7 +449,7 @@ private void updatePartitionColStatsForOneBatch(Table tbl, 
Map<String, ColumnSta
 
 
   @Override
-  protected String getMessagePrefix() {
+  public String toString() {
     return "SetAggrStatsHandler [" + id + "] -  aggregating stats for " +
         TableName.getQualified(catName, dbName, tableName) + ":";
   }
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/TAbstractBase.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/TAbstractBase.java
new file mode 100644
index 00000000000..b7779f92b79
--- /dev/null
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/TAbstractBase.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.handler;
+
+import org.apache.thrift.TBase;
+import org.apache.thrift.TException;
+import org.apache.thrift.TFieldIdEnum;
+import org.apache.thrift.protocol.TProtocol;
+
+public class TAbstractBase implements TBase {
+  @Override
+  public TFieldIdEnum fieldForId(int i) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean isSet(TFieldIdEnum tFieldIdEnum) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Object getFieldValue(TFieldIdEnum tFieldIdEnum) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void setFieldValue(TFieldIdEnum tFieldIdEnum, Object o) {
+    throw new UnsupportedOperationException();
+  }
+  @Override
+  public TBase deepCopy() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void clear() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public int compareTo(Object o) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void read(TProtocol tProtocol) throws TException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void write(TProtocol tProtocol) throws TException {
+    throw new UnsupportedOperationException();
+  }
+}
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/TruncateTableHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/TruncateTableHandler.java
index a42807aadcd..723fcc45458 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/TruncateTableHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/TruncateTableHandler.java
@@ -286,7 +286,7 @@ public static void addTruncateBaseFile(Path location, long 
writeId, Configuratio
   }
 
   @Override
-  protected String getMessagePrefix() {
+  public String toString() {
     return "TruncateTableHandler [" + id + "] -  truncate table for " +
         TableName.getQualified(catName, dbName, table.getTableName()) + ":";
   }
diff --git 
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
index 9e52ca13b1f..939f09c3343 100644
--- 
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
+++ 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
@@ -1147,7 +1147,7 @@ public void testRenamePartition() throws Throwable {
     }
   }
 
-  @Test(expected = InvalidObjectException.class)
+  @Test(expected = NoSuchObjectException.class)
   public void testDropTableFetchPartitions() throws Throwable {
     String dbName = "fetchPartitionsDb";
     String tblName = "fetchPartitionsTbl";
@@ -2570,8 +2570,7 @@ public void testPartitionFilter() throws Exception {
       me = e;
     }
     assertNotNull(me);
-    assertTrue("NoSuchObject exception", me.getMessage().contains(
-          "Specified catalog.database.table does not exist : 
hive.invdbname.invtablename"));
+    assertTrue("NoSuchObject exception", 
me.getMessage().contains("hive.invdbname.invtablename table not found"));
 
     client.dropTable(dbName, tblName);
     client.dropDatabase(dbName);
diff --git 
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreMethods.java
 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreMethods.java
index 09000732422..a083a50ae0b 100644
--- 
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreMethods.java
+++ 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreMethods.java
@@ -20,7 +20,7 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
 import org.junit.Before;
@@ -55,7 +55,7 @@ protected void initConf() {
     }
   }
 
-  @Test(expected = InvalidObjectException.class)
+  @Test(expected = NoSuchObjectException.class)
   public void test_get_partitions_by_names() throws Exception {
     hmsHandler.get_partitions_by_names("dbName", "tblName", 
Arrays.asList("partNames"));
   }
diff --git 
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java
 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java
index b919eeffe25..fa0c422f471 100644
--- 
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java
+++ 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java
@@ -125,7 +125,8 @@ public void testEndFunctionListener() throws Exception {
     e = context.getException();
     assertTrue((e!=null));
     assertTrue((e instanceof NoSuchObjectException));
-    assertEquals(context.getInputTableName(), tblName);
+    assertEquals(context.getInputTableName(),
+        Warehouse.DEFAULT_CATALOG_NAME + "." + dbName + "." + tblName);
     try {
       msc.dropTable(dbName, unknownTable);
     } catch (Exception e4) {
diff --git 
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java
 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java
index abaf28217ca..ec602d0b499 100644
--- 
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java
+++ 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java
@@ -1618,8 +1618,7 @@ public void testListPartitionValuesNullSchema() throws 
Exception {
               null);
       client.listPartitionValues(request);
       fail("Should have thrown exception");
-    } catch (NullPointerException | TProtocolException e) {
-      //TODO: should not throw different exceptions for different HMS 
deployment types
+    } catch (MetaException | TProtocolException e) {
     }
   }
 

Reply via email to