This is an automated email from the ASF dual-hosted git repository.

ngsg pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new bcfa7557cad HIVE-12679: Allow users to be able to specify an 
implementation of IMetaStoreClient via HiveConf (#5955)
bcfa7557cad is described below

commit bcfa7557cad599a457948e936bf75e560c5bd695
Author: Seonggon Namgung <[email protected]>
AuthorDate: Thu Jul 31 10:54:53 2025 +0900

    HIVE-12679: Allow users to be able to specify an implementation of 
IMetaStoreClient via HiveConf (#5955)
    
    This patch was originally submitted by Austin Lee.
    
    Co-authored-by: Noritaka Sekiyama <[email protected]>
    Co-authored-by: okumin <[email protected]>
---
 .../org/apache/hadoop/hive/ql/metadata/Hive.java   | 173 ++++++++++-----------
 .../apache/hadoop/hive/ql/metadata/TestHive.java   |  34 ++++
 .../hadoop/hive/metastore/HiveMetaStoreClient.java |  64 +++++---
 .../client/ThriftHiveMetaStoreClient.java          |   7 +-
 .../client/builder/HiveMetaStoreClientBuilder.java |  96 +++++++-----
 .../hadoop/hive/metastore/conf/MetastoreConf.java  |   4 +
 6 files changed, 224 insertions(+), 154 deletions(-)

diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 0a783c81aaa..eec284a9327 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -21,68 +21,13 @@
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Splitter;
+import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
-import static org.apache.hadoop.hive.common.AcidConstants.SOFT_DELETE_TABLE;
-import static 
org.apache.hadoop.hive.conf.Constants.MATERIALIZED_VIEW_REWRITING_TIME_WINDOW;
-import static 
org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS;
-import static 
org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE;
-import static 
org.apache.hadoop.hive.metastore.HiveMetaHook.HIVE_ICEBERG_STORAGE_HANDLER;
-import static 
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.CTAS_LEGACY_CONFIG;
-import static 
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE;
-import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.convertToGetPartitionsByNamesRequest;
-import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
-import static org.apache.hadoop.hive.ql.ddl.DDLUtils.isIcebergStatsSource;
-import static org.apache.hadoop.hive.ql.ddl.DDLUtils.isIcebergTable;
-import static org.apache.hadoop.hive.ql.io.AcidUtils.getFullTableName;
-import static org.apache.hadoop.hive.ql.metadata.RewriteAlgorithm.CALCITE;
-import static org.apache.hadoop.hive.ql.metadata.RewriteAlgorithm.ALL;
-import static 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils.extractTable;
-import static org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT;
-import static org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Optional;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.Set;
-import java.util.function.Supplier;
-import java.util.stream.Collectors;
-
-import javax.annotation.Nullable;
-import javax.jdo.JDODataStoreException;
-
-import com.google.common.collect.ImmutableList;
-
 import org.apache.commons.collections4.CollectionUtils;
 import org.apache.commons.io.FilenameUtils;
 import org.apache.commons.lang3.ObjectUtils;
@@ -96,6 +41,7 @@
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hive.common.DataCopyStatistics;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.common.MaterializationSnapshot;
@@ -104,41 +50,26 @@
 import org.apache.hadoop.hive.common.ValidReaderWriteIdList;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.ValidWriteIdList;
-import org.apache.hadoop.hive.common.DataCopyStatistics;
 import 
org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
 import 
org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.hive.common.log.InPlaceUpdate;
 import org.apache.hadoop.hive.common.type.SnapshotContext;
 import org.apache.hadoop.hive.conf.Constants;
-import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.api.AbortTxnsRequest;
-import org.apache.hadoop.hive.metastore.api.CompactionRequest;
-import org.apache.hadoop.hive.metastore.api.CreateTableRequest;
-import org.apache.hadoop.hive.metastore.api.GetFunctionsRequest;
-import org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest;
-import org.apache.hadoop.hive.metastore.api.GetPartitionsRequest;
-import org.apache.hadoop.hive.metastore.api.GetPartitionsResponse;
-import org.apache.hadoop.hive.metastore.api.GetTableRequest;
-import org.apache.hadoop.hive.metastore.api.SourceTable;
-import org.apache.hadoop.hive.metastore.api.UpdateTransactionalStatsRequest;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.Batchable;
-import 
org.apache.hadoop.hive.metastore.client.builder.HiveMetaStoreClientBuilder;
-import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.metastore.utils.RetryUtilities;
-import org.apache.hadoop.hive.ql.Context;
-import org.apache.hadoop.hive.ql.ddl.table.AlterTableType;
-import org.apache.hadoop.hive.ql.io.HdfsUtils;
-import org.apache.hadoop.hive.metastore.HiveMetaException;
 import org.apache.hadoop.hive.metastore.HiveMetaHook;
 import org.apache.hadoop.hive.metastore.HiveMetaHookLoader;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.PartitionDropOptions;
+import org.apache.hadoop.hive.metastore.ReplChangeManager;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.AbortCompactResponse;
+import org.apache.hadoop.hive.metastore.api.AbortCompactionRequest;
+import org.apache.hadoop.hive.metastore.api.AbortTxnsRequest;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.AllTableConstraintsRequest;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
@@ -148,8 +79,10 @@
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.CompactionRequest;
 import org.apache.hadoop.hive.metastore.api.CompactionResponse;
 import org.apache.hadoop.hive.metastore.api.CompactionType;
+import org.apache.hadoop.hive.metastore.api.CreateTableRequest;
 import org.apache.hadoop.hive.metastore.api.DataConnector;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest;
@@ -160,17 +93,21 @@
 import org.apache.hadoop.hive.metastore.api.FireEventRequestData;
 import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest;
 import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.GetFunctionsRequest;
 import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
 import org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsRequest;
 import org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsResponse;
 import org.apache.hadoop.hive.metastore.api.GetPartitionRequest;
-import org.apache.hadoop.hive.metastore.api.GetPartitionsFilterSpec;
 import org.apache.hadoop.hive.metastore.api.GetPartitionResponse;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsFilterSpec;
 import org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthRequest;
 import org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthResponse;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsRequest;
+import org.apache.hadoop.hive.metastore.api.GetPartitionsResponse;
 import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest;
 import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.api.GetTableRequest;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
 import org.apache.hadoop.hive.metastore.api.HiveObjectType;
@@ -179,6 +116,7 @@
 import org.apache.hadoop.hive.metastore.api.Materialization;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.MetadataPpdResult;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest;
 import org.apache.hadoop.hive.metastore.api.PartitionFilterMode;
 import org.apache.hadoop.hive.metastore.api.PartitionSpec;
@@ -201,7 +139,9 @@
 import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
 import org.apache.hadoop.hive.metastore.api.SkewedInfo;
+import org.apache.hadoop.hive.metastore.api.SourceTable;
 import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest;
+import org.apache.hadoop.hive.metastore.api.UpdateTransactionalStatsRequest;
 import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 import org.apache.hadoop.hive.metastore.api.WMMapping;
 import org.apache.hadoop.hive.metastore.api.WMNullablePool;
@@ -212,21 +152,25 @@
 import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
 import org.apache.hadoop.hive.metastore.api.WriteNotificationLogBatchRequest;
 import org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest;
-import org.apache.hadoop.hive.metastore.api.AbortCompactionRequest;
-import org.apache.hadoop.hive.metastore.api.AbortCompactResponse;
-import org.apache.hadoop.hive.metastore.ReplChangeManager;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import 
org.apache.hadoop.hive.metastore.client.builder.HiveMetaStoreClientBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.utils.RetryUtilities;
+import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.ddl.database.drop.DropDatabaseDesc;
+import org.apache.hadoop.hive.ql.ddl.table.AlterTableType;
 import org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.exec.FunctionUtils;
 import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
-import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.exec.Utilities.PartitionDetails;
-import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.io.AcidUtils.TableSnapshot;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.io.HdfsUtils;
 import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
 import org.apache.hadoop.hive.ql.lockmgr.LockException;
@@ -234,13 +178,13 @@
 import 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils;
 import 
org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
-import org.apache.hadoop.hive.ql.parse.AlterTableSnapshotRefSpec;
 import org.apache.hadoop.hive.ql.parse.AlterTableExecuteSpec;
+import org.apache.hadoop.hive.ql.parse.AlterTableSnapshotRefSpec;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
-import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
 import org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType;
+import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
 import org.apache.hadoop.hive.ql.session.CreateTableAutomaticGrant;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.serde2.Deserializer;
@@ -252,11 +196,63 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hive.common.util.HiveVersionInfo;
-import org.apache.thrift.TException;
 import org.apache.thrift.TApplicationException;
+import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.BitSet;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+import javax.annotation.Nullable;
+import javax.jdo.JDODataStoreException;
+
+import static org.apache.hadoop.hive.common.AcidConstants.SOFT_DELETE_TABLE;
+import static 
org.apache.hadoop.hive.conf.Constants.MATERIALIZED_VIEW_REWRITING_TIME_WINDOW;
+import static 
org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_SCAN_SPECIFIC_PARTITIONS;
+import static 
org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_WRITE_NOTIFICATION_MAX_BATCH_SIZE;
+import static 
org.apache.hadoop.hive.metastore.HiveMetaHook.HIVE_ICEBERG_STORAGE_HANDLER;
+import static 
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.CTAS_LEGACY_CONFIG;
+import static 
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE;
+import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.convertToGetPartitionsByNamesRequest;
+import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+import static org.apache.hadoop.hive.ql.ddl.DDLUtils.isIcebergStatsSource;
+import static org.apache.hadoop.hive.ql.ddl.DDLUtils.isIcebergTable;
+import static org.apache.hadoop.hive.ql.io.AcidUtils.getFullTableName;
+import static org.apache.hadoop.hive.ql.metadata.RewriteAlgorithm.ALL;
+import static org.apache.hadoop.hive.ql.metadata.RewriteAlgorithm.CALCITE;
+import static 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils.extractTable;
+import static org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT;
+import static org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME;
+
 /**
  * This class has functions that implement meta data/DDL operations using calls
  * to the metastore.
@@ -5985,10 +5981,9 @@ public List<Partition> 
exchangeTablePartitions(Map<String, String> partitionSpec
    * File based store support is removed
    *
    * @returns a Meta Store Client
-   * @throws HiveMetaException
+   * @throws MetaException
    *           if a working client can't be created
    */
-  @SuppressWarnings("squid:S2095")
   private IMetaStoreClient createMetaStoreClient(boolean allowEmbedded) throws 
MetaException {
 
     HiveMetaHookLoader hookLoader = new HiveMetaHookLoader() {
@@ -6002,7 +5997,7 @@ public HiveMetaHook getHook(
     };
 
     HiveMetaStoreClientBuilder msClientBuilder = new 
HiveMetaStoreClientBuilder(conf)
-        .newThriftClient(allowEmbedded)
+        .newClient(allowEmbedded)
         .enhanceWith(client ->
             HiveMetaStoreClientWithLocalCache.newClient(conf, client))
         .enhanceWith(client ->
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java 
b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
index 4c0ab53ea2d..f722520d590 100755
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hive.ql.metadata;
 
 import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+import static org.junit.Assert.assertThat;
 
 import java.io.OutputStream;
 import java.util.ArrayList;
@@ -40,6 +41,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.MetaStoreEventListener;
 import org.apache.hadoop.hive.metastore.PartitionDropOptions;
 import org.apache.hadoop.hive.metastore.Warehouse;
@@ -51,6 +53,7 @@
 import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
 import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.client.ThriftHiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.events.InsertEvent;
 import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils;
@@ -1122,6 +1125,37 @@ private String getFileCheckSum(FileSystem fileSystem, 
Path p) throws Exception {
     return "";
   }
 
+  @Test
+  public void testLoadingIMetaStoreClient() throws Throwable {
+    String clientClassName = ThriftHiveMetaStoreClient.class.getName();
+    HiveConf conf = new HiveConf();
+    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.METASTORE_CLIENT_IMPL, 
clientClassName);
+    // The current object was constructed in setUp() before we got here
+    // so clean that up so we can inject our own dummy implementation of 
IMetaStoreClient
+    Hive.closeCurrent();
+    Hive hive = Hive.get(conf);
+    IMetaStoreClient tmp = hive.getMSC();
+    assertNotNull("getMSC() failed.", tmp);
+  }
+
+  @Test
+  public void testLoadingInvalidIMetaStoreClient() throws Throwable {
+    // Intentionally invalid class
+    String clientClassName = String.class.getName();
+    HiveConf conf = new HiveConf();
+    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.METASTORE_CLIENT_IMPL, 
clientClassName);
+    // The current object was constructed in setUp() before we got here
+    // so clean that up so we can inject our own dummy implementation of 
IMetaStoreClient
+    Hive.closeCurrent();
+    Hive hive = Hive.get(conf);
+    try {
+      hive.getMSC();
+      fail("getMSC() was expected to throw MetaException.");
+    } catch (Exception e) {
+      assertTrue("getMSC() failed, which IS expected.", true);
+    }
+  }
+
   // shamelessly copied from Path in hadoop-2
   private static final String SEPARATOR = "/";
   private static final char SEPARATOR_CHAR = '/';
diff --git 
a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
 
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 3bc5993e537..122c0c3c491 100644
--- 
a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ 
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -49,7 +49,7 @@ public class HiveMetaStoreClient extends 
MetaStoreClientWrapper implements IMeta
   public static final String MANUALLY_INITIATED_COMPACTION = "manual";
   public static final String RENAME_PARTITION_MAKE_COPY = 
"renamePartitionMakeCopy";
 
-  private final ThriftHiveMetaStoreClient thriftClient;
+  private ThriftHiveMetaStoreClient thriftClient = null;
 
   public HiveMetaStoreClient(Configuration conf) throws MetaException {
     this(conf, null, true);
@@ -61,19 +61,22 @@ public HiveMetaStoreClient(Configuration conf, 
HiveMetaHookLoader hookLoader) th
 
   public HiveMetaStoreClient(Configuration conf, HiveMetaHookLoader 
hookLoader, Boolean allowEmbedded)
     throws MetaException {
-    this(conf, hookLoader, new ThriftHiveMetaStoreClient(conf, allowEmbedded));
+    this(conf, hookLoader, new 
HiveMetaStoreClientBuilder(conf).newClient(allowEmbedded).build());
   }
 
   private HiveMetaStoreClient(Configuration conf, HiveMetaHookLoader 
hookLoader,
-      ThriftHiveMetaStoreClient thriftClient) {
-    super(createUnderlyingClient(conf, hookLoader, thriftClient), conf);
-    this.thriftClient = thriftClient;
+      IMetaStoreClient baseMetaStoreClient) {
+    super(createUnderlyingClient(conf, hookLoader, baseMetaStoreClient), conf);
+
+    if (baseMetaStoreClient instanceof ThriftHiveMetaStoreClient 
metaStoreClient) {
+      this.thriftClient = metaStoreClient;
+    }
   }
 
   private static IMetaStoreClient createUnderlyingClient(Configuration conf, 
HiveMetaHookLoader hookLoader,
-      ThriftHiveMetaStoreClient thriftClient) {
+      IMetaStoreClient baseMetaStoreClient) {
     return new HiveMetaStoreClientBuilder(conf)
-       .client(thriftClient)
+       .client(baseMetaStoreClient)
        .withHooks(hookLoader)
        .threadSafe()
        .build();
@@ -81,20 +84,33 @@ private static IMetaStoreClient 
createUnderlyingClient(Configuration conf, HiveM
 
   // methods for test
 
+  @FunctionalInterface
+  private interface ThriftCallable<T> {
+    T call() throws TException;
+  }
+
+  private <T> T doCall(ThriftCallable<T> callable) throws TException {
+    if (thriftClient != null) {
+      return callable.call();
+    } else {
+      throw new UnsupportedOperationException();
+    }
+  }
+
   public boolean createType(Type type) throws TException {
-    return thriftClient.createType(type);
+    return doCall(() -> thriftClient.createType(type));
   }
 
   public boolean dropType(String type) throws TException {
-    return thriftClient.dropType(type);
+    return doCall(() -> thriftClient.dropType(type));
   }
 
   public Type getType(String name) throws TException {
-    return thriftClient.getType(name);
+    return doCall(() -> thriftClient.getType(name));
   }
 
   public Map<String, Type> getTypeAll(String name) throws TException {
-    return thriftClient.getTypeAll(name);
+    return doCall(() -> thriftClient.getTypeAll(name));
   }
 
   public void createTable(Table tbl, EnvironmentContext envContext) throws 
TException {
@@ -107,56 +123,58 @@ public void createTable(Table tbl, EnvironmentContext 
envContext) throws TExcept
 
   public Table getTable(String catName, String dbName, String tableName,
       boolean getColumnStats, String engine) throws TException {
-    return thriftClient.getTable(catName, dbName, tableName, getColumnStats, 
engine);
+    return doCall(() -> thriftClient.getTable(catName, dbName, tableName, 
getColumnStats, engine));
   }
 
   public void dropTable(String catName, String dbname, String name, boolean 
deleteData,
       boolean ignoreUnknownTab, EnvironmentContext envContext) throws 
TException {
-    thriftClient.dropTable(catName, dbname, name, deleteData, 
ignoreUnknownTab, envContext);
+    doCall(() -> {
+      thriftClient.dropTable(catName, dbname, name, deleteData, 
ignoreUnknownTab, envContext);
+      return null;
+    });
   }
 
   public Partition add_partition(Partition new_part, EnvironmentContext 
envContext) throws TException {
-    return thriftClient.add_partition(new_part, envContext);
+    return doCall(() -> thriftClient.add_partition(new_part, envContext));
   }
 
   public Partition appendPartition(String dbName, String tableName, 
List<String> partVals,
       EnvironmentContext ec) throws TException {
-    return thriftClient.appendPartition(dbName, tableName, partVals, ec);
+    return doCall(() -> thriftClient.appendPartition(dbName, tableName, 
partVals, ec));
   }
 
   public Partition appendPartitionByName(String dbName, String tableName, 
String partName) throws TException {
-    return thriftClient.appendPartitionByName(dbName, tableName, partName);
+    return doCall(() -> thriftClient.appendPartitionByName(dbName, tableName, 
partName));
   }
 
   public Partition appendPartitionByName(String dbName, String tableName, 
String partName,
       EnvironmentContext envContext) throws TException {
-    return thriftClient.appendPartitionByName(dbName, tableName, partName, 
envContext);
+    return doCall(() -> thriftClient.appendPartitionByName(dbName, tableName, 
partName, envContext));
   }
 
   public boolean dropPartition(String db_name, String tbl_name, List<String> 
part_vals,
       EnvironmentContext env_context) throws TException {
-    return thriftClient.dropPartition(db_name, tbl_name, part_vals, 
env_context);
+    return doCall(() -> thriftClient.dropPartition(db_name, tbl_name, 
part_vals, env_context));
   }
 
   public boolean dropPartition(String dbName, String tableName, String 
partName, boolean dropData,
       EnvironmentContext ec) throws TException {
-    return thriftClient.dropPartition(dbName, tableName, partName, dropData, 
ec);
+    return doCall(() -> thriftClient.dropPartition(dbName, tableName, 
partName, dropData, ec));
   }
 
   public boolean dropPartition(String dbName, String tableName, List<String> 
partVals)
       throws TException {
-    return thriftClient.dropPartition(dbName, tableName, partVals);
+    return doCall(() -> thriftClient.dropPartition(dbName, tableName, 
partVals));
   }
 
   public boolean dropPartitionByName(String dbName, String tableName, String 
partName,
       boolean deleteData) throws TException {
-    return thriftClient.dropPartitionByName(dbName, tableName, partName, 
deleteData);
+    return doCall(() -> thriftClient.dropPartitionByName(dbName, tableName, 
partName, deleteData));
   }
 
   public boolean dropPartitionByName(String dbName, String tableName, String 
partName,
       boolean deleteData, EnvironmentContext envContext) throws TException {
-    return thriftClient.dropPartitionByName(dbName, tableName, partName, 
deleteData, envContext);
-
+    return doCall(() -> thriftClient.dropPartitionByName(dbName, tableName, 
partName, deleteData, envContext));
   }
 
   @VisibleForTesting
diff --git 
a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/ThriftHiveMetaStoreClient.java
 
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/ThriftHiveMetaStoreClient.java
index 3036b9889ef..a2aafd0f3da 100644
--- 
a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/ThriftHiveMetaStoreClient.java
+++ 
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/ThriftHiveMetaStoreClient.java
@@ -155,12 +155,7 @@ public class ThriftHiveMetaStoreClient extends 
BaseMetaStoreClient {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(ThriftHiveMetaStoreClient.class);
 
-  public static ThriftHiveMetaStoreClient newClient(Configuration conf, 
Boolean allowEmbedded)
-      throws MetaException {
-    return new ThriftHiveMetaStoreClient(conf, allowEmbedded);
-  }
-
-  public ThriftHiveMetaStoreClient(Configuration conf, Boolean allowEmbedded) 
throws MetaException {
+  public ThriftHiveMetaStoreClient(Configuration conf, boolean allowEmbedded) 
throws MetaException {
     super(conf);
     version =
         MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST) ? 
TEST_VERSION : DEFAULT_VERSION;
diff --git 
a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveMetaStoreClientBuilder.java
 
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveMetaStoreClientBuilder.java
index ab213e3ea6d..903a3543ee2 100644
--- 
a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveMetaStoreClientBuilder.java
+++ 
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveMetaStoreClientBuilder.java
@@ -18,65 +18,89 @@
 
 package org.apache.hadoop.hive.metastore.client.builder;
 
+import org.apache.commons.lang3.exception.ExceptionUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.HiveMetaHookLoader;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.client.HookEnabledMetaStoreClient;
 import org.apache.hadoop.hive.metastore.client.SynchronizedMetaStoreClient;
 import org.apache.hadoop.hive.metastore.client.ThriftHiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.util.Map;
 import java.util.Objects;
 import java.util.function.Function;
 
 public class HiveMetaStoreClientBuilder {
+  private static final Logger LOG = 
LoggerFactory.getLogger(HiveMetaStoreClientBuilder.class);
 
-    private final Configuration conf;
-    private IMetaStoreClient client;
+  private final Configuration conf;
+  private IMetaStoreClient client;
 
-    public HiveMetaStoreClientBuilder(Configuration conf) {
-        this.conf = Objects.requireNonNull(conf);
-    }
+  public HiveMetaStoreClientBuilder(Configuration conf) {
+    this.conf = Objects.requireNonNull(conf);
+  }
 
-    public HiveMetaStoreClientBuilder newClient() throws MetaException {
-        this.client = new HiveMetaStoreClient(conf);
-        return this;
-    }
+  public HiveMetaStoreClientBuilder newClient(boolean allowEmbedded) throws 
MetaException {
+    this.client = createClient(conf, allowEmbedded);
+    return this;
+  }
 
-    public HiveMetaStoreClientBuilder newThriftClient(boolean allowEmbedded) 
throws MetaException {
-        this.client = ThriftHiveMetaStoreClient.newClient(conf, allowEmbedded);
-        return this;
-    }
+  public HiveMetaStoreClientBuilder client(IMetaStoreClient client) {
+    this.client = client;
+    return this;
+  }
 
-    public HiveMetaStoreClientBuilder client(IMetaStoreClient client) {
-        this.client = client;
-        return this;
-    }
+  public HiveMetaStoreClientBuilder enhanceWith(Function<IMetaStoreClient, 
IMetaStoreClient> wrapperFunction) {
+    client = wrapperFunction.apply(client);
+    return this;
+  }
 
-    public HiveMetaStoreClientBuilder enhanceWith(Function<IMetaStoreClient, 
IMetaStoreClient> wrapperFunction) {
-        client = wrapperFunction.apply(client);
-        return this;
-    }
+  public HiveMetaStoreClientBuilder withHooks(HiveMetaHookLoader hookLoader) {
+    this.client = HookEnabledMetaStoreClient.newClient(conf, hookLoader, 
client);
+    return this;
+  }
 
-    public HiveMetaStoreClientBuilder withHooks(HiveMetaHookLoader hookLoader) 
{
-        this.client = HookEnabledMetaStoreClient.newClient(conf, hookLoader, 
client);
-        return this;
-    }
+  public HiveMetaStoreClientBuilder withRetry(Map<String, Long> 
metaCallTimeMap) throws MetaException {
+    client = RetryingMetaStoreClient.getProxy(conf, metaCallTimeMap, client);
+    return this;
+  }
 
-    public HiveMetaStoreClientBuilder withRetry(Map<String, Long> 
metaCallTimeMap) throws MetaException {
-        client = RetryingMetaStoreClient.getProxy(conf, metaCallTimeMap, 
client);
-        return this;
-    }
+  public HiveMetaStoreClientBuilder threadSafe() {
+    this.client = SynchronizedMetaStoreClient.newClient(conf, client);
+    return this;
+  }
 
-    public HiveMetaStoreClientBuilder threadSafe() {
-        this.client = SynchronizedMetaStoreClient.newClient(conf, client);
-        return this;
-    }
+  public IMetaStoreClient build() {
+    return Objects.requireNonNull(client);
+  }
 
-    public IMetaStoreClient build() {
-        return Objects.requireNonNull(client);
+  private static IMetaStoreClient createClient(Configuration conf, boolean 
allowEmbedded) throws MetaException {
+    Class<? extends IMetaStoreClient> mscClass = MetastoreConf.getClass(
+        conf, MetastoreConf.ConfVars.METASTORE_CLIENT_IMPL,
+        ThriftHiveMetaStoreClient.class, IMetaStoreClient.class);
+    LOG.info("Using {} as a base MetaStoreClient", mscClass.getName());
+
+    IMetaStoreClient baseMetaStoreClient = null;
+    try {
+      baseMetaStoreClient = JavaUtils.newInstance(mscClass,
+          new Class[]{Configuration.class, boolean.class},
+          new Object[]{conf, allowEmbedded});
+    } catch (Throwable t) {
+      // Reflection by JavaUtils will throw RuntimeException, try to get real 
MetaException here.
+      Throwable rootCause = ExceptionUtils.getRootCause(t);
+      if (rootCause instanceof MetaException) {
+        throw (MetaException) rootCause;
+      } else {
+        throw new MetaException(rootCause.getMessage());
+      }
     }
+
+    return baseMetaStoreClient;
+  }
 }
diff --git 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
index 50992936f98..521cbcc2eac 100644
--- 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
+++ 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
@@ -1724,6 +1724,10 @@ public enum ConfVars {
                     " and password. Any other value is ignored right now but 
may be used later."
                 + "If JWT- Supported only in HTTP transport mode. If set, HMS 
Client will pick the value of JWT from "
                 + "environment variable HMS_JWT and set it in Authorization 
header in http request"),
+    METASTORE_CLIENT_IMPL("metastore.client.impl",
+        "hive.metastore.client.impl",
+        "org.apache.hadoop.hive.metastore.client.ThriftHiveMetaStoreClient",
+        "The name of MetaStoreClient class that implements the 
IMetaStoreClient interface."),
     
METASTORE_CLIENT_ADDITIONAL_HEADERS("metastore.client.http.additional.headers",
         "hive.metastore.client.http.additional.headers", "",
         "Comma separated list of headers which are passed to the metastore 
service in the http headers"),


Reply via email to