This is an automated email from the ASF dual-hosted git repository.
dkuzmenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new ea27ed751bb HIVE-20189: Separate metastore client code into its own
module (#5924)
ea27ed751bb is described below
commit ea27ed751bb9050e40d37780cb26f0498e52f934
Author: Denys Kuzmenko <[email protected]>
AuthorDate: Tue Jul 15 13:18:20 2025 +0200
HIVE-20189: Separate metastore client code into its own module (#5924)
---
hbase-handler/pom.xml | 5 -
.../org/apache/hive/hcatalog/common/HCatUtil.java | 188 +++----
.../hive/hcatalog/common/HiveClientCache.java | 541 ---------------------
.../hive/hcatalog/common/TestHiveClientCache.java | 269 ----------
.../hcatalog/mapreduce/TestPassProperties.java | 5 +-
iceberg/iceberg-catalog/pom.xml | 2 +-
iceberg/iceberg-handler/pom.xml | 2 +-
iceberg/pom.xml | 2 +-
itests/qtest-druid/pom.xml | 4 +
metastore/pom.xml | 8 +-
.../hadoop/hive/metastore/HiveClientCache.java | 4 +-
.../hadoop/hive/metastore/TestHiveClientCache.java | 140 ++++++
packaging/src/main/assembly/src.xml | 1 +
pom.xml | 12 +
ql/pom.xml | 1 +
.../org/apache/hadoop/hive/ql/metadata/Hive.java | 34 +-
.../HiveMetaStoreClientWithLocalCache.java | 2 +-
.../ql/metadata/SessionHiveMetaStoreClient.java | 4 +-
standalone-metastore/metastore-client/pom.xml | 59 +++
.../hadoop/hive/metastore/HiveMetaStoreClient.java | 11 +-
.../hadoop/hive/metastore/IMetaStoreClient.java | 0
.../hive/metastore/RetryingMetaStoreClient.java | 37 +-
.../hadoop/hive/metastore/TableIterable.java | 21 +-
.../hive/metastore/client/BaseMetaStoreClient.java | 1 +
.../client/HookEnabledMetaStoreClient.java | 0
.../metastore/client/MetaStoreClientWrapper.java | 0
.../client/SynchronizedMetaStoreClient.java | 3 +-
.../client/ThriftHiveMetaStoreClient.java | 1 +
.../client/builder/HiveMetaStoreClientBuilder.java | 82 ++++
.../client/utils}/HiveMetaStoreClientUtils.java | 2 +-
.../hadoop/hive/metastore/utils/TableFetcher.java | 0
.../hadoop/hive/metastore/TestTableIterable.java | 4 -
.../hive/metastore/utils/TestTableFetcher.java | 0
.../metastore-rest-catalog/pom.xml | 1 -
standalone-metastore/metastore-server/pom.xml | 8 +-
.../hive/metastore/conf/TestMetastoreConf.java | 3 +-
.../metastore/txn/retry/TestSqlRetryHandler.java | 10 +-
standalone-metastore/metastore-tools/pom.xml | 15 -
.../metastore-tools/tools-common/pom.xml | 8 -
.../hadoop/hive/metastore/tools/HMSClient.java | 3 +-
standalone-metastore/pom.xml | 1 +
41 files changed, 463 insertions(+), 1031 deletions(-)
diff --git a/hbase-handler/pom.xml b/hbase-handler/pom.xml
index d0a3e79d4b2..c87d78b2941 100644
--- a/hbase-handler/pom.xml
+++ b/hbase-handler/pom.xml
@@ -292,11 +292,6 @@
<groupId>org.apache.avro</groupId>
<artifactId>avro</artifactId>
</dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-standalone-metastore-server</artifactId>
- <version>${project.version}</version>
- </dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-standalone-metastore-server</artifactId>
diff --git
a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java
b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java
index b0b3276893e..af225d95386 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java
@@ -43,12 +43,12 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveClientCache;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.security.DelegationTokenIdentifier;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat;
@@ -80,6 +80,9 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import static org.apache.hadoop.hive.metastore.HiveClientCache.*;
+import static org.apache.hive.hcatalog.common.HCatConstants.*;
+
public class HCatUtil {
private static final Logger LOG = LoggerFactory.getLogger(HCatUtil.class);
@@ -95,12 +98,9 @@ public class HCatUtil {
}
public static boolean checkJobContextIfRunningFromBackend(JobContext j) {
- if (j.getConfiguration().get("pig.job.converted.fetch", "").equals("") &&
- j.getConfiguration().get("mapred.task.id", "").equals("") &&
- !("true".equals(j.getConfiguration().get("pig.illustrating")))) {
- return false;
- }
- return true;
+ return !j.getConfiguration().get("pig.job.converted.fetch", "").isEmpty()
+ || !j.getConfiguration().get("mapred.task.id", "").isEmpty()
+ || "true".equals(j.getConfiguration().get("pig.illustrating"));
}
public static String serialize(Serializable obj) throws IOException {
@@ -119,7 +119,7 @@ public static String serialize(Serializable obj) throws
IOException {
}
public static Object deserialize(String str) throws IOException {
- if (str == null || str.length() == 0) {
+ if (str == null || str.isEmpty()) {
return null;
}
try {
@@ -140,24 +140,12 @@ public static byte[] decodeBytes(String str) {
return Base64.decodeBase64(str.getBytes());
}
- public static List<HCatFieldSchema> getHCatFieldSchemaList(
- FieldSchema... fields) throws HCatException {
- List<HCatFieldSchema> result = new ArrayList<HCatFieldSchema>(
- fields.length);
-
- for (FieldSchema f : fields) {
- result.add(HCatSchemaUtils.getHCatFieldSchema(f));
- }
-
- return result;
- }
-
public static List<HCatFieldSchema> getHCatFieldSchemaList(
List<FieldSchema> fields) throws HCatException {
if (fields == null) {
return null;
} else {
- List<HCatFieldSchema> result = new ArrayList<HCatFieldSchema>();
+ List<HCatFieldSchema> result = new ArrayList<>();
for (FieldSchema f : fields) {
result.add(HCatSchemaUtils.getHCatFieldSchema(f));
}
@@ -178,7 +166,7 @@ public static List<FieldSchema> getFieldSchemaList(
if (hcatFields == null) {
return null;
} else {
- List<FieldSchema> result = new ArrayList<FieldSchema>();
+ List<FieldSchema> result = new ArrayList<>();
for (HCatFieldSchema f : hcatFields) {
result.add(HCatSchemaUtils.getFieldSchema(f));
}
@@ -187,14 +175,14 @@ public static List<FieldSchema> getFieldSchemaList(
}
public static Table getTable(IMetaStoreClient client, String dbName, String
tableName)
- throws NoSuchObjectException, TException, MetaException {
+ throws TException {
return new Table(client.getTable(dbName, tableName));
}
public static HCatSchema getTableSchemaWithPtnCols(Table table) throws
IOException {
HCatSchema tableSchema = new
HCatSchema(HCatUtil.getHCatFieldSchemaList(table.getCols()));
- if (table.getPartitionKeys().size() != 0) {
+ if (!table.getPartitionKeys().isEmpty()) {
// add partition keys to table schema
// NOTE : this assumes that we do not ever have ptn keys as columns
@@ -214,8 +202,8 @@ public static HCatSchema getTableSchemaWithPtnCols(Table
table) throws IOExcepti
* @throws IOException
*/
public static HCatSchema getPartitionColumns(Table table) throws IOException
{
- HCatSchema cols = new HCatSchema(new LinkedList<HCatFieldSchema>());
- if (table.getPartitionKeys().size() != 0) {
+ HCatSchema cols = new HCatSchema(new LinkedList<>());
+ if (!table.getPartitionKeys().isEmpty()) {
for (FieldSchema fs : table.getPartitionKeys()) {
cols.append(HCatSchemaUtils.getHCatFieldSchema(fs));
}
@@ -233,16 +221,16 @@ public static HCatSchema getPartitionColumns(Table table)
throws IOException {
* @return the list of newly added fields
* @throws IOException Signals that an I/O exception has occurred.
*/
- public static List<FieldSchema> validatePartitionSchema(Table table,
- HCatSchema partitionSchema) throws IOException {
- Map<String, FieldSchema> partitionKeyMap = new HashMap<String,
FieldSchema>();
+ public static List<FieldSchema> validatePartitionSchema(Table table,
HCatSchema partitionSchema)
+ throws IOException {
+ Map<String, FieldSchema> partitionKeyMap = new HashMap<>();
for (FieldSchema field : table.getPartitionKeys()) {
partitionKeyMap.put(field.getName().toLowerCase(), field);
}
List<FieldSchema> tableCols = table.getCols();
- List<FieldSchema> newFields = new ArrayList<FieldSchema>();
+ List<FieldSchema> newFields = new ArrayList<>();
for (int i = 0; i < partitionSchema.getFields().size(); i++) {
@@ -314,15 +302,15 @@ public static boolean validateMorePermissive(FsAction
first, FsAction second) {
|| (first == second)) {
return true;
}
- switch (first) {
- case READ_EXECUTE:
- return ((second == FsAction.READ) || (second == FsAction.EXECUTE));
- case READ_WRITE:
- return ((second == FsAction.READ) || (second == FsAction.WRITE));
- case WRITE_EXECUTE:
- return ((second == FsAction.WRITE) || (second == FsAction.EXECUTE));
- }
- return false;
+ return switch (first) {
+ case READ_EXECUTE ->
+ ((second == FsAction.READ) || (second == FsAction.EXECUTE));
+ case READ_WRITE ->
+ ((second == FsAction.READ) || (second == FsAction.WRITE));
+ case WRITE_EXECUTE ->
+ ((second == FsAction.WRITE) || (second == FsAction.EXECUTE));
+ default -> false;
+ };
}
/**
@@ -335,33 +323,22 @@ public static boolean validateMorePermissive(FsAction
first, FsAction second) {
* by execute permissions
*/
public static boolean validateExecuteBitPresentIfReadOrWrite(FsAction perms)
{
- if ((perms == FsAction.READ) || (perms == FsAction.WRITE)
- || (perms == FsAction.READ_WRITE)) {
- return false;
- }
- return true;
+ return (perms != FsAction.READ) && (perms != FsAction.WRITE)
+ && (perms != FsAction.READ_WRITE);
}
public static
Token<org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier>
getJobTrackerDelegationToken(
- Configuration conf, String userName) throws Exception {
- // LOG.info("getJobTrackerDelegationToken("+conf+","+userName+")");
- JobClient jcl = new JobClient(new JobConf(conf, HCatOutputFormat.class));
-
Token<org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier>
t = jcl
- .getDelegationToken(new Text(userName));
- // LOG.info("got "+t);
- return t;
-
- // return null;
+ Configuration conf, String userName) throws Exception {
+ try (JobClient jcl = new JobClient(new JobConf(conf,
HCatOutputFormat.class))) {
+ return jcl.getDelegationToken(new Text(userName));
+ }
}
public static Token<? extends AbstractDelegationTokenIdentifier>
extractThriftToken(
- String tokenStrForm, String tokenSignature) throws MetaException,
- TException, IOException {
- // LOG.info("extractThriftToken("+tokenStrForm+","+tokenSignature+")");
+ String tokenStrForm, String tokenSignature) throws IOException {
Token<? extends AbstractDelegationTokenIdentifier> t = new
Token<DelegationTokenIdentifier>();
t.decodeFromUrlString(tokenStrForm);
t.setService(new Text(tokenSignature));
- // LOG.info("returning "+t);
return t;
}
@@ -404,11 +381,8 @@ public static HiveStorageHandler
getStorageHandler(Configuration conf, PartInfo
* @throws IOException
*/
public static HiveStorageHandler getStorageHandler(Configuration conf,
- String storageHandler,
- String serDe,
- String inputFormat,
- String outputFormat)
- throws IOException {
+ String storageHandler, String serDe, String inputFormat, String
outputFormat)
+ throws IOException {
if ((storageHandler == null) ||
(storageHandler.equals(FosterStorageHandler.class.getName()))) {
try {
@@ -426,8 +400,7 @@ public static HiveStorageHandler
getStorageHandler(Configuration conf,
Class<? extends HiveStorageHandler> handlerClass =
(Class<? extends HiveStorageHandler>) Class
.forName(storageHandler, true,
Utilities.getSessionSpecifiedClassLoader());
- return (HiveStorageHandler) ReflectionUtils.newInstance(
- handlerClass, conf);
+ return ReflectionUtils.newInstance(handlerClass, conf);
} catch (ClassNotFoundException e) {
throw new IOException("Error in loading storage handler."
+ e.getMessage(), e);
@@ -437,9 +410,9 @@ public static HiveStorageHandler
getStorageHandler(Configuration conf,
public static Pair<String, String> getDbAndTableName(String tableName)
throws IOException {
String[] dbTableNametokens = tableName.split("\\.");
if (dbTableNametokens.length == 1) {
- return new Pair<String, String>(Warehouse.DEFAULT_DATABASE_NAME,
tableName);
+ return new Pair<>(Warehouse.DEFAULT_DATABASE_NAME, tableName);
} else if (dbTableNametokens.length == 2) {
- return new Pair<String, String>(dbTableNametokens[0],
dbTableNametokens[1]);
+ return new Pair<>(dbTableNametokens[0], dbTableNametokens[1]);
} else {
throw new IOException("tableName expected in the form "
+ "<databasename>.<table name> or <table name>. Got " + tableName);
@@ -454,18 +427,18 @@ public static Pair<String, String>
getDbAndTableName(String tableName) throws IO
TableDesc tableDesc = new TableDesc(storageHandler.getInputFormatClass(),
storageHandler.getOutputFormatClass(),props);
if (tableDesc.getJobProperties() == null) {
- tableDesc.setJobProperties(new HashMap<String, String>());
+ tableDesc.setJobProperties(new HashMap<>());
}
Properties mytableProperties = tableDesc.getProperties();
mytableProperties.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_NAME,inputJobInfo.getDatabaseName()+
"." + inputJobInfo.getTableName());
- Map<String, String> jobProperties = new HashMap<String, String>();
+ Map<String, String> jobProperties = new HashMap<>();
try {
Map<String, String> properties = tableDesc.getJobProperties();
LinkedList<InputJobInfo> inputJobInfos = (LinkedList<InputJobInfo>)
HCatUtil.deserialize(
- properties.get(HCatConstants.HCAT_KEY_JOB_INFO));
+ properties.get(HCatConstants.HCAT_KEY_JOB_INFO));
if (inputJobInfos == null) {
inputJobInfos = new LinkedList<>();
}
@@ -493,9 +466,9 @@ public static Pair<String, String> getDbAndTableName(String
tableName) throws IO
Properties props =
outputJobInfo.getTableInfo().getStorerInfo().getProperties();
props.put(serdeConstants.SERIALIZATION_LIB,storageHandler.getSerDeClass().getName());
TableDesc tableDesc = new TableDesc(storageHandler.getInputFormatClass(),
- IgnoreKeyTextOutputFormat.class,props);
+ IgnoreKeyTextOutputFormat.class,props);
if (tableDesc.getJobProperties() == null)
- tableDesc.setJobProperties(new HashMap<String, String>());
+ tableDesc.setJobProperties(new HashMap<>());
for (Map.Entry<String, String> el : conf) {
tableDesc.getJobProperties().put(el.getKey(), el.getValue());
}
@@ -505,7 +478,7 @@ public static Pair<String, String> getDbAndTableName(String
tableName) throws IO
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_NAME,
outputJobInfo.getDatabaseName()+ "." + outputJobInfo.getTableName());
- Map<String, String> jobProperties = new HashMap<String, String>();
+ Map<String, String> jobProperties = new HashMap<>();
try {
tableDesc.getJobProperties().put(
HCatConstants.HCAT_KEY_OUTPUT_INFO,
@@ -566,7 +539,15 @@ public static IMetaStoreClient
getHiveMetastoreClient(HiveConf hiveConf)
if (hiveClientCache == null) {
synchronized (IMetaStoreClient.class) {
if (hiveClientCache == null) {
- hiveClientCache = new HiveClientCache(hiveConf);
+ hiveClientCache = new HiveClientCache(
+ hiveConf.getInt(
+ HCAT_HIVE_CLIENT_EXPIRY_TIME,
DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS),
+ hiveConf.getInt(
+ HCAT_HIVE_CLIENT_CACHE_INITIAL_CAPACITY,
DEFAULT_HIVE_CACHE_INITIAL_CAPACITY),
+ hiveConf.getInt(
+ HCAT_HIVE_CLIENT_CACHE_MAX_CAPACITY,
DEFAULT_HIVE_CACHE_MAX_CAPACITY),
+ hiveConf.getBoolean(
+ HCAT_HIVE_CLIENT_CACHE_STATS_ENABLED,
DEFAULT_HIVE_CLIENT_CACHE_STATS_ENABLED));
}
}
}
@@ -585,10 +566,9 @@ public static IMetaStoreClient
getHiveMetastoreClient(HiveConf hiveConf)
* @param hiveConf The hive configuration
* @return the client
* @throws MetaException When HiveMetaStoreClient couldn't be created
- * @throws IOException
*/
@Deprecated
- public static HiveMetaStoreClient getHiveClient(HiveConf hiveConf) throws
MetaException, IOException {
+ public static HiveMetaStoreClient getHiveClient(HiveConf hiveConf) throws
MetaException {
LOG.warn("HCatUtil.getHiveClient is unsafe and can be a resource leak
depending on HMSC "
+ "implementation and caching mechanism. Use
HCatUtil.getHiveMetastoreClient instead.");
@@ -639,9 +619,7 @@ private static Properties
getHiveSiteOverrides(Configuration hiveSite, Configura
}
}
}
-
LOG.info("Configuration differences=" + difference);
-
return difference;
}
@@ -649,7 +627,6 @@ public static HiveConf getHiveConf(Configuration conf)
throws IOException {
HiveConf hiveConf = new HiveConf(conf, HCatUtil.class);
-
//copy the hive conf into the job conf and restore it
//in the backend context
if (StringUtils.isBlank(conf.get(HCatConstants.HCAT_KEY_HIVE_CONF))) {
@@ -657,7 +634,6 @@ public static HiveConf getHiveConf(Configuration conf)
LOG.info(HCatConstants.HCAT_KEY_HIVE_CONF + " not set. Generating
configuration differences.");
Properties differences = getHiveSiteOverrides(conf);
-
// Must set this key even if differences is empty otherwise client and
AM will attempt
// to set this multiple times.
conf.set(HCatConstants.HCAT_KEY_HIVE_CONF,
HCatUtil.serialize(differences));
@@ -679,24 +655,23 @@ public static HiveConf getHiveConf(Configuration conf)
return hiveConf;
}
- public static HiveConf storePropertiesToHiveConf(Properties properties,
HiveConf hiveConf)
- throws IOException {
- for (Map.Entry<Object, Object> prop : properties.entrySet()) {
- if (prop.getValue() instanceof String) {
- hiveConf.set((String) prop.getKey(), (String) prop.getValue());
- } else if (prop.getValue() instanceof Integer) {
- hiveConf.setInt((String) prop.getKey(), (Integer) prop.getValue());
- } else if (prop.getValue() instanceof Boolean) {
- hiveConf.setBoolean((String) prop.getKey(), (Boolean) prop.getValue());
- } else if (prop.getValue() instanceof Long) {
- hiveConf.setLong((String) prop.getKey(), (Long) prop.getValue());
- } else if (prop.getValue() instanceof Float) {
- hiveConf.setFloat((String) prop.getKey(), (Float) prop.getValue());
- } else {
- LOG.warn("Unsupported type: key=" + prop.getKey() + " value=" +
prop.getValue());
+ public static void storePropertiesToHiveConf(Properties properties, HiveConf
hiveConf) {
+ properties.forEach((key, value) -> {
+ switch (value) {
+ case String s ->
+ hiveConf.set((String) key, s);
+ case Integer i ->
+ hiveConf.setInt((String) key, i);
+ case Boolean b ->
+ hiveConf.setBoolean((String) key, b);
+ case Long l ->
+ hiveConf.setLong((String) key, l);
+ case Float v ->
+ hiveConf.setFloat((String) key, v);
+ case null, default ->
+ LOG.warn("Unsupported type: key=" + key + " value=" + value);
}
- }
- return hiveConf;
+ });
}
public static JobConf getJobConfFromContext(JobContext jobContext) {
@@ -714,19 +689,15 @@ public static JobConf getJobConfFromContext(JobContext
jobContext) {
// Retrieve settings in HiveConf that aren't also set in the JobConf.
public static Map<String,String> getHCatKeyHiveConf(JobConf conf) {
try {
- Properties properties = null;
-
- if (! StringUtils.isBlank(conf.get(HCatConstants.HCAT_KEY_HIVE_CONF))) {
+ Properties properties;
+ if (!StringUtils.isBlank(conf.get(HCatConstants.HCAT_KEY_HIVE_CONF))) {
properties = (Properties) HCatUtil.deserialize(
conf.get(HCatConstants.HCAT_KEY_HIVE_CONF));
-
LOG.info(HCatConstants.HCAT_KEY_HIVE_CONF + " is set. Using
differences=" + properties);
} else {
LOG.info(HCatConstants.HCAT_KEY_HIVE_CONF + " not set. Generating
configuration differences.");
-
properties = getHiveSiteOverrides(conf);
}
-
// This method may not be safe as it can throw an NPE if a key or value
is null.
return Maps.fromProperties(properties);
}
@@ -744,9 +715,8 @@ public static void copyJobPropertiesToJobConf(
public static boolean isHadoop23() {
String version = org.apache.hadoop.util.VersionInfo.getVersion();
- if
(version.matches("\\b0\\.23\\..+\\b")||version.matches("\\b2\\..*")||version.matches("\\b3\\..*"))
- return true;
- return false;
+ return version.matches("\\b0\\.23\\..+\\b")
+ || version.matches("\\b2\\..*") || version.matches("\\b3\\..*");
}
/**
* Used by various tests to make sure the path is safe for Windows
@@ -767,7 +737,7 @@ public static void putInputJobInfoToConf(InputJobInfo
inputJobInfo, Configuratio
throws IOException {
LinkedList<InputJobInfo> inputJobInfos = (LinkedList<InputJobInfo>)
HCatUtil.deserialize(
- conf.get(HCatConstants.HCAT_KEY_JOB_INFO));
+ conf.get(HCatConstants.HCAT_KEY_JOB_INFO));
if (inputJobInfos == null) {
inputJobInfos = new LinkedList<>();
@@ -777,14 +747,14 @@ public static void putInputJobInfoToConf(InputJobInfo
inputJobInfo, Configuratio
}
public static LinkedList<InputJobInfo>
getInputJobInfosFromConf(Configuration conf)
- throws IOException {
+ throws IOException {
LinkedList<InputJobInfo> inputJobInfos = (LinkedList<InputJobInfo>)
HCatUtil.deserialize(
- conf.get(HCatConstants.HCAT_KEY_JOB_INFO));
+ conf.get(HCatConstants.HCAT_KEY_JOB_INFO));
return inputJobInfos;
}
public static InputJobInfo getLastInputJobInfosFromConf(Configuration conf)
- throws IOException {
+ throws IOException {
LinkedList<InputJobInfo> inputJobInfos = getInputJobInfosFromConf(conf);
if (inputJobInfos == null || inputJobInfos.isEmpty()) {
return null;
diff --git
a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java
b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java
deleted file mode 100644
index 11e53d94a48..00000000000
---
a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java
+++ /dev/null
@@ -1,541 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hive.hcatalog.common;
-
-import java.io.IOException;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import javax.security.auth.login.LoginException;
-
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.hive.common.classification.InterfaceAudience;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.annotation.NoReconnect;
-import org.apache.hadoop.hive.shims.Utils;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hive.common.util.ShutdownHookManager;
-import org.apache.thrift.TException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.RemovalListener;
-import com.google.common.cache.RemovalNotification;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
-/**
- * A thread safe time expired cache for HiveMetaStoreClient
- */
-class HiveClientCache {
- public final static int DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS = 2 * 60;
- public final static int DEFAULT_HIVE_CACHE_INITIAL_CAPACITY = 50;
- public final static int DEFAULT_HIVE_CACHE_MAX_CAPACITY = 50;
- public final static boolean DEFAULT_HIVE_CLIENT_CACHE_STATS_ENABLED = false;
-
- private final Cache<HiveClientCacheKey, ICacheableMetaStoreClient> hiveCache;
- private static final Logger LOG =
LoggerFactory.getLogger(HiveClientCache.class);
- private final int timeout;
- // This lock is used to make sure removalListener won't close a client that
is being contemplated for returning by get()
- private final Object CACHE_TEARDOWN_LOCK = new Object();
-
- private static final AtomicInteger nextId = new AtomicInteger(0);
-
- private final ScheduledFuture<?> cleanupHandle; // used to cleanup cache
-
- private boolean enableStats;
-
- // Since HiveMetaStoreClient is not threadsafe, hive clients are not shared
across threads.
- // Thread local variable containing each thread's unique ID, is used as one
of the keys for the cache
- // causing each thread to get a different client even if the conf is same.
- private static final ThreadLocal<Integer> threadId =
- new ThreadLocal<Integer>() {
- @Override
- protected Integer initialValue() {
- return nextId.getAndIncrement();
- }
- };
-
- private int getThreadId() {
- return threadId.get();
- }
-
- public static IMetaStoreClient getNonCachedHiveMetastoreClient(HiveConf
hiveConf) throws MetaException {
- return RetryingMetaStoreClient.getProxy(hiveConf, true);
- }
-
- public HiveClientCache(HiveConf hiveConf) {
- this(hiveConf.getInt(HCatConstants.HCAT_HIVE_CLIENT_EXPIRY_TIME,
DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS),
- hiveConf.getInt(HCatConstants.HCAT_HIVE_CLIENT_CACHE_INITIAL_CAPACITY,
DEFAULT_HIVE_CACHE_INITIAL_CAPACITY),
- hiveConf.getInt(HCatConstants.HCAT_HIVE_CLIENT_CACHE_MAX_CAPACITY,
DEFAULT_HIVE_CACHE_MAX_CAPACITY),
-
hiveConf.getBoolean(HCatConstants.HCAT_HIVE_CLIENT_CACHE_STATS_ENABLED,
DEFAULT_HIVE_CLIENT_CACHE_STATS_ENABLED));
-
- }
-
- /**
- * @deprecated This constructor will be made private or removed as more
configuration properties are required.
- */
- @Deprecated
- public HiveClientCache(final int timeout) {
- this(timeout, DEFAULT_HIVE_CACHE_INITIAL_CAPACITY,
DEFAULT_HIVE_CACHE_MAX_CAPACITY, DEFAULT_HIVE_CLIENT_CACHE_STATS_ENABLED);
- }
-
- /**
- * @param timeout the length of time in seconds after a client is created
that it should be automatically removed
- */
- private HiveClientCache(final int timeout, final int initialCapacity, final
int maxCapacity, final boolean enableStats) {
- this.timeout = timeout;
- this.enableStats = enableStats;
-
- LOG.info("Initializing cache: eviction-timeout=" + timeout + "
initial-capacity=" + initialCapacity + " maximum-capacity=" + maxCapacity);
-
- CacheBuilder builder = CacheBuilder.newBuilder()
- .initialCapacity(initialCapacity)
- .maximumSize(maxCapacity)
- .expireAfterAccess(timeout, TimeUnit.SECONDS)
- .removalListener(createRemovalListener());
-
- /*
- * Guava versions <12.0 have stats collection enabled by default and do
not expose a recordStats method.
- * Check for newer versions of the library and ensure that stats
collection is enabled by default.
- */
- try {
- java.lang.reflect.Method m = builder.getClass().getMethod("recordStats",
null);
- m.invoke(builder, null);
- } catch (NoSuchMethodException e) {
- LOG.debug("Using a version of guava <12.0. Stats collection is enabled
by default.");
- } catch (Exception e) {
- LOG.warn("Unable to invoke recordStats method.", e);
- }
-
- this.hiveCache = builder.build();
-
- /*
- * We need to use a cleanup interval, which is how often the cleanup
thread will kick in
- * and go do a check to see if any of the connections can be expired. We
don't want to
- * do this too often, because it'd be like having a mini-GC going off
every so often,
- * so we limit it to a minimum of DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS.
If the client
- * has explicitly set a larger timeout on the cache, though, we respect
that, and use that
- */
- long cleanupInterval = timeout > DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS ?
timeout : DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS;
-
- this.cleanupHandle = createCleanupThread(cleanupInterval);
-
- createShutdownHook();
- }
-
- private RemovalListener<HiveClientCacheKey, ICacheableMetaStoreClient>
createRemovalListener() {
- RemovalListener<HiveClientCacheKey, ICacheableMetaStoreClient> listener =
- new RemovalListener<HiveClientCacheKey, ICacheableMetaStoreClient>() {
- @Override
- public void onRemoval(RemovalNotification<HiveClientCacheKey,
ICacheableMetaStoreClient> notification) {
- ICacheableMetaStoreClient hiveMetaStoreClient =
notification.getValue();
- if (hiveMetaStoreClient != null) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Evicting client: " +
Integer.toHexString(System.identityHashCode(hiveMetaStoreClient)));
- }
-
- // TODO: This global lock may not be necessary as all concurrent
methods in ICacheableMetaStoreClient
- // are synchronized.
- synchronized (CACHE_TEARDOWN_LOCK) {
- hiveMetaStoreClient.setExpiredFromCache();
- hiveMetaStoreClient.tearDownIfUnused();
- }
- }
- }
- };
-
- return listener;
- }
-
- private ScheduledFuture<?> createCleanupThread(long interval) {
- // Add a maintenance thread that will attempt to trigger a cache clean
continuously
- Runnable cleanupThread = new Runnable() {
- @Override
- public void run() {
- cleanup();
- }
- };
-
- /**
- * Create the cleanup handle. In addition to cleaning up every
cleanupInterval, we add
- * a slight offset, so that the very first time it runs, it runs with a
slight delay, so
- * as to catch any other connections that were closed when the first
timeout happened.
- * As a result, the time we can expect an unused connection to be reaped is
- * 5 seconds after the first timeout, and then after that, it'll check for
whether or not
- * it can be cleaned every
max(DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS,timeout) seconds
- */
- ThreadFactory daemonThreadFactory = (new
ThreadFactoryBuilder()).setDaemon(true)
- .setNameFormat("HiveClientCache-cleaner-%d")
- .build();
-
- return Executors.newScheduledThreadPool(1, daemonThreadFactory)
- .scheduleWithFixedDelay(cleanupThread, timeout + 5, interval,
TimeUnit.SECONDS);
- }
-
- private void createShutdownHook() {
- // Add a shutdown hook for cleanup, if there are elements remaining in the
cache which were not cleaned up.
- // This is the best effort approach. Ignore any error while doing so.
Notice that most of the clients
- // would get cleaned up via either the removalListener or the close()
call, only the active clients
- // that are in the cache or expired but being used in other threads wont
get cleaned. The following code will only
- // clean the active cache ones. The ones expired from cache but being hold
by other threads are in the mercy
- // of finalize() being called.
- Thread cleanupHiveClientShutdownThread = new Thread() {
- @Override
- public void run() {
- LOG.debug("Cleaning up hive client cache in ShutDown hook");
- cleanupHandle.cancel(false); // Cancel the maintenance thread.
- closeAllClientsQuietly();
- }
- };
-
- ShutdownHookManager.addShutdownHook(cleanupHiveClientShutdownThread);
- }
-
- /**
- * Note: This doesn't check if they are being used or not, meant only to be
called during shutdown etc.
- */
- void closeAllClientsQuietly() {
- try {
- ConcurrentMap<HiveClientCacheKey, ICacheableMetaStoreClient> elements =
hiveCache.asMap();
- for (ICacheableMetaStoreClient cacheableHiveMetaStoreClient :
elements.values()) {
- cacheableHiveMetaStoreClient.tearDown();
- }
- } catch (Exception e) {
- LOG.warn("Clean up of hive clients in the cache failed. Ignored", e);
- }
-
- if (this.enableStats) {
- LOG.info("Cache statistics after shutdown: size=" + hiveCache.size() + "
" + hiveCache.stats());
- }
- }
-
- public void cleanup() {
- // TODO: periodically reload a new HiveConf to check if stats reporting is
enabled.
- hiveCache.cleanUp();
-
- if (enableStats) {
- LOG.info("Cache statistics after cleanup: size=" + hiveCache.size() + "
" + hiveCache.stats());
- }
- }
-
- /**
- * Returns a cached client if exists or else creates one, caches and returns
it. It also checks that the client is
- * healthy and can be reused
- * @param hiveConf
- * @return the hive client
- * @throws MetaException
- * @throws IOException
- * @throws LoginException
- */
- public IMetaStoreClient get(final HiveConf hiveConf) throws MetaException,
IOException, LoginException {
- final HiveClientCacheKey cacheKey =
HiveClientCacheKey.fromHiveConf(hiveConf, getThreadId());
- ICacheableMetaStoreClient cacheableHiveMetaStoreClient = null;
-
- // the hmsc is not shared across threads. So the only way it could get
closed while we are doing healthcheck
- // is if removalListener closes it. The synchronization takes care that
removalListener won't do it
- synchronized (CACHE_TEARDOWN_LOCK) {
- cacheableHiveMetaStoreClient = getOrCreate(cacheKey);
- cacheableHiveMetaStoreClient.acquire();
- }
- if (!cacheableHiveMetaStoreClient.isOpen()) {
- synchronized (CACHE_TEARDOWN_LOCK) {
- hiveCache.invalidate(cacheKey);
- cacheableHiveMetaStoreClient.close();
- cacheableHiveMetaStoreClient = getOrCreate(cacheKey);
- cacheableHiveMetaStoreClient.acquire();
- }
- }
- return cacheableHiveMetaStoreClient;
- }
-
- /**
- * Return from cache if exists else create/cache and return
- * @param cacheKey
- * @return
- * @throws IOException
- * @throws MetaException
- * @throws LoginException
- */
- private ICacheableMetaStoreClient getOrCreate(final HiveClientCacheKey
cacheKey)
- throws IOException, MetaException, LoginException {
- try {
- return hiveCache.get(cacheKey, new Callable<ICacheableMetaStoreClient>()
{
- @Override
- public ICacheableMetaStoreClient call() throws MetaException {
- // This is called from HCat, so always allow embedded metastore (as
was the default).
- return
- (ICacheableMetaStoreClient)
RetryingMetaStoreClient.getProxy(cacheKey.getHiveConf(),
- new Class<?>[]{HiveConf.class, Integer.class, Boolean.class},
- new Object[]{cacheKey.getHiveConf(), timeout, true},
- CacheableHiveMetaStoreClient.class.getName());
- }
- });
- } catch (ExecutionException e) {
- Throwable t = e.getCause();
- if (t instanceof IOException) {
- throw (IOException) t;
- } else if (t instanceof MetaException) {
- throw (MetaException) t;
- } else if (t instanceof LoginException) {
- throw (LoginException) t;
- } else {
- throw new IOException("Error creating hiveMetaStoreClient", t);
- }
- }
- }
-
- /**
- * A class to wrap HiveConf and expose equality based only on
UserGroupInformation and the metaStoreURIs.
- * This becomes the key for the cache and this way the same
HiveMetaStoreClient would be returned if
- * UserGroupInformation and metaStoreURIs are same. This function can evolve
to express
- * the cases when HiveConf is different but the same hiveMetaStoreClient can
be used
- */
- static class HiveClientCacheKey {
- final private String metaStoreURIs;
- final private UserGroupInformation ugi;
- final private HiveConf hiveConf;
- final private int threadId;
-
- private HiveClientCacheKey(HiveConf hiveConf, final int threadId) throws
IOException, LoginException {
- this.metaStoreURIs = hiveConf.getVar(HiveConf.ConfVars.METASTORE_URIS);
- ugi = Utils.getUGI();
- this.hiveConf = hiveConf;
- this.threadId = threadId;
- }
-
- public static HiveClientCacheKey fromHiveConf(HiveConf hiveConf, final int
threadId) throws IOException, LoginException {
- return new HiveClientCacheKey(hiveConf, threadId);
- }
-
- public HiveConf getHiveConf() {
- return hiveConf;
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
- HiveClientCacheKey that = (HiveClientCacheKey) o;
- return new EqualsBuilder().
- append(this.metaStoreURIs,
- that.metaStoreURIs).
- append(this.ugi, that.ugi).
- append(this.threadId, that.threadId).isEquals();
- }
-
- @Override
- public int hashCode() {
- return new HashCodeBuilder().
- append(metaStoreURIs).
- append(ugi).
- append(threadId).toHashCode();
- }
-
- @Override
- public String toString() {
- return "HiveClientCacheKey: uri=" + this.metaStoreURIs + " ugi=" +
this.ugi + " thread=" + this.threadId;
- }
- }
-
- @InterfaceAudience.Private
- public interface ICacheableMetaStoreClient extends IMetaStoreClient {
- @NoReconnect
- void acquire();
-
- @NoReconnect
- void setExpiredFromCache();
-
- @NoReconnect
- AtomicInteger getUsers();
-
- @NoReconnect
- boolean isClosed();
-
- /**
- * @deprecated This method is not used internally and should not be
visible through HCatClient.create.
- */
- @Deprecated
- @NoReconnect
- boolean isOpen();
-
- @NoReconnect
- void tearDownIfUnused();
-
- @NoReconnect
- void tearDown();
- }
-
- /**
- * Add # of current users on HiveMetaStoreClient, so that the client can be
cleaned when no one is using it.
- */
- static class CacheableHiveMetaStoreClient extends HiveMetaStoreClient
implements ICacheableMetaStoreClient {
-
- private final AtomicInteger users = new AtomicInteger(0);
- private volatile boolean expiredFromCache = false;
- private boolean isClosed = false;
-
- CacheableHiveMetaStoreClient(final HiveConf conf, final Integer timeout,
Boolean allowEmbedded)
- throws MetaException {
- super(conf, null, allowEmbedded);
- }
-
- /**
- * Increments the user count and optionally renews the expiration time.
- * <code>renew</code> should correspond with the expiration policy of the
cache.
- * When the policy is <code>expireAfterAccess</code>, the expiration time
should be extended.
- * When the policy is <code>expireAfterWrite</code>, the expiration time
should not be extended.
- * A mismatch with the policy will lead to closing the connection
unnecessarily after the initial
- * expiration time is generated.
- * @param renew whether the expiration time should be extended.
- */
- public synchronized void acquire() {
- users.incrementAndGet();
- if (users.get() > 1) {
- LOG.warn("Unexpected increment of user count beyond one: " +
users.get() + " " + this);
- }
- }
-
- /**
- * Decrements the user count.
- */
- private void release() {
- if (users.get() > 0) {
- users.decrementAndGet();
- } else {
- LOG.warn("Unexpected attempt to decrement user count of zero: " +
users.get() + " " + this);
- }
- }
-
- /**
- * Communicate to the client that it is no longer in the cache.
- * The expiration time should be voided to allow the connection to be
closed at the first opportunity.
- */
- public synchronized void setExpiredFromCache() {
- if (users.get() != 0) {
- LOG.warn("Evicted client has non-zero user count: " + users.get());
- }
-
- expiredFromCache = true;
- }
-
- public boolean isClosed() {
- return isClosed;
- }
-
- /*
- * Used only for Debugging or testing purposes
- */
- public AtomicInteger getUsers() {
- return users;
- }
-
- /**
- * Make a call to hive meta store and see if the client is still usable.
Some calls where the user provides
- * invalid data renders the client unusable for future use (example:
create a table with very long table name)
- * @return
- */
- @Deprecated
- public boolean isOpen() {
- try {
- // Look for an unlikely database name and see if either MetaException
or TException is thrown
- super.getDatabases("NonExistentDatabaseUsedForHealthCheck");
- } catch (TException e) {
- return false;
- }
- return true;
- }
-
- /**
- * Decrement the user count and piggyback this to set expiry flag as well,
then teardown(), if conditions are met.
- * This *MUST* be called by anyone who uses this client.
- */
- @Override
- public synchronized void close() {
- release();
- tearDownIfUnused();
- }
-
- /**
- * Attempt to tear down the client connection.
- * The connection will be closed if the following conditions hold:
- * 1. There are no active user holding the client.
- * 2. The client has been evicted from the cache.
- */
- public synchronized void tearDownIfUnused() {
- if (users.get() != 0) {
- LOG.warn("Non-zero user count preventing client tear down: users=" +
users.get() + " expired=" + expiredFromCache);
- }
-
- if (users.get() == 0 && expiredFromCache) {
- this.tearDown();
- }
- }
-
- /**
- * Close the underlying objects irrespective of whether they are in use or
not.
- */
- public void tearDown() {
- try {
- if (!isClosed) {
- super.close();
- }
- isClosed = true;
- } catch (Exception e) {
- LOG.warn("Error closing hive metastore client. Ignored.", e);
- }
- }
-
- @Override
- public String toString() {
- return "HCatClient: thread: " + Thread.currentThread().getId() + "
users=" + users.get()
- + " expired=" + expiredFromCache + " closed=" + isClosed;
- }
-
- /**
- * GC is attempting to destroy the object.
- * No one references this client anymore, so it can be torn down without
worrying about user counts.
- * @throws Throwable
- */
- @Override
- protected void finalize() throws Throwable {
- if (users.get() != 0) {
- LOG.warn("Closing client with non-zero user count: users=" +
users.get() + " expired=" + expiredFromCache);
- }
-
- try {
- this.tearDown();
- } finally {
- super.finalize();
- }
- }
- }
-}
diff --git
a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java
b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java
deleted file mode 100644
index a5cb79cd9b3..00000000000
---
a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hive.hcatalog.common;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStore;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.util.ExitUtil;
-import org.apache.hive.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer;
-import org.apache.thrift.TException;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNotSame;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import org.junit.Ignore;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.security.auth.login.LoginException;
-import java.io.IOException;
-import java.math.BigInteger;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-
-public class TestHiveClientCache {
-
- private static final Logger LOG =
LoggerFactory.getLogger(TestHiveClientCache.class);
- final HiveConf hiveConf = new HiveConf();
-
- @BeforeClass
- public static void setUp() throws Exception {
- }
-
- @AfterClass
- public static void tearDown() throws Exception {
- }
-
- @Test
- public void testCacheHit() throws IOException, MetaException, LoginException
{
- HiveClientCache cache = new HiveClientCache(1000);
- HiveClientCache.ICacheableMetaStoreClient client =
(HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf);
- assertNotNull(client);
- client.close(); // close shouldn't matter
-
- // Setting a non important configuration should return the same client only
- hiveConf.setIntVar(HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS, 10);
- HiveClientCache.ICacheableMetaStoreClient client2 =
(HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf);
- assertNotNull(client2);
- assertSame(client, client2);
- assertEquals(client.getUsers(), client2.getUsers());
- client2.close();
- }
-
- @Test
- public void testCacheMiss() throws IOException, MetaException,
LoginException {
- HiveClientCache cache = new HiveClientCache(1000);
- IMetaStoreClient client = cache.get(hiveConf);
- assertNotNull(client);
-
- // Set different uri as it is one of the criteria deciding whether to
return the same client or not
- hiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, " "); // URIs are
checked for string equivalence, even spaces make them different
- IMetaStoreClient client2 = cache.get(hiveConf);
- assertNotNull(client2);
- assertNotSame(client, client2);
- }
-
- /**
- * Check that a new client is returned for the same configuration after the
expiry time.
- * Also verify that the expiry time configuration is honoured
- */
- @Test
- public void testCacheExpiry() throws IOException, MetaException,
LoginException, InterruptedException {
- HiveClientCache cache = new HiveClientCache(1);
- HiveClientCache.ICacheableMetaStoreClient client =
(HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf);
- assertNotNull(client);
-
- Thread.sleep(2500);
- HiveClientCache.ICacheableMetaStoreClient client2 =
(HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf);
- client.close();
- assertTrue(client.isClosed()); // close() after *expiry time* and *a cache
access* should have tore down the client
-
- assertNotNull(client2);
- assertNotSame(client, client2);
- }
-
- /**
- * Check that a *new* client is created if asked from different threads even
with
- * the same hive configuration
- * @throws ExecutionException
- * @throws InterruptedException
- */
- @Test
- public void testMultipleThreadAccess() throws ExecutionException,
InterruptedException {
- final HiveClientCache cache = new HiveClientCache(1000);
-
- class GetHiveClient implements Callable<IMetaStoreClient> {
- @Override
- public IMetaStoreClient call() throws IOException, MetaException,
LoginException {
- return cache.get(hiveConf);
- }
- }
-
- ExecutorService executor = Executors.newFixedThreadPool(2);
-
- Callable<IMetaStoreClient> worker1 = new GetHiveClient();
- Callable<IMetaStoreClient> worker2 = new GetHiveClient();
- Future<IMetaStoreClient> clientFuture1 = executor.submit(worker1);
- Future<IMetaStoreClient> clientFuture2 = executor.submit(worker2);
- IMetaStoreClient client1 = clientFuture1.get();
- IMetaStoreClient client2 = clientFuture2.get();
- assertNotNull(client1);
- assertNotNull(client2);
- assertNotSame(client1, client2);
- }
-
- @Test
- public void testCloseAllClients() throws IOException, MetaException,
LoginException {
- final HiveClientCache cache = new HiveClientCache(1000);
- HiveClientCache.ICacheableMetaStoreClient client1 =
(HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf);
- hiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, " "); // URIs are
checked for string equivalence, even spaces make them different
- HiveClientCache.ICacheableMetaStoreClient client2 =
(HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf);
- cache.closeAllClientsQuietly();
- assertTrue(client1.isClosed());
- assertTrue(client2.isClosed());
- }
-
- /**
- * Test that a long table name actually breaks the HMSC. Subsequently check
that isOpen() reflects
- * and tells if the client is broken
- */
- @Ignore("hangs indefinitely")
- @Test
- public void testHMSCBreakability() throws IOException, MetaException,
LoginException, TException, AlreadyExistsException,
- InvalidObjectException, NoSuchObjectException, InterruptedException {
- // Setup
- LocalMetaServer metaServer = new LocalMetaServer();
- metaServer.start();
-
- final HiveClientCache cache = new HiveClientCache(1000);
- HiveClientCache.CacheableHiveMetaStoreClient client =
- (HiveClientCache.CacheableHiveMetaStoreClient)
cache.get(metaServer.getHiveConf());
-
- assertTrue(client.isOpen());
-
- final String DB_NAME = "test_db";
- final String LONG_TABLE_NAME = "long_table_name_" + new BigInteger(200,
new Random()).toString(2);
-
- try {
- client.dropTable(DB_NAME, LONG_TABLE_NAME);
- } catch (Exception e) {
- }
- try {
- client.dropDatabase(DB_NAME);
- } catch (Exception e) {
- }
-
- client.createDatabase(new Database(DB_NAME, "", null, null));
-
- List<FieldSchema> fields = new ArrayList<FieldSchema>();
- fields.add(new FieldSchema("colname", serdeConstants.STRING_TYPE_NAME,
""));
- Table tbl = new Table();
- tbl.setDbName(DB_NAME);
- tbl.setTableName(LONG_TABLE_NAME);
- StorageDescriptor sd = new StorageDescriptor();
- sd.setCols(fields);
- tbl.setSd(sd);
- sd.setSerdeInfo(new SerDeInfo());
-
- // Break the client
- try {
- client.createTable(tbl);
- fail("Exception was expected while creating table with long name");
- } catch (Exception e) {
- }
-
- assertFalse(client.isOpen());
- metaServer.shutDown();
- }
-
- private static class LocalMetaServer implements Runnable {
- public final int MS_PORT = 20101;
- private final HiveConf hiveConf;
- public final static int WAIT_TIME_FOR_BOOTUP = 30000;
-
- public LocalMetaServer() {
- ExitUtil.disableSystemExit();
- ExitUtil.disableSystemHalt();
- ExitUtil.resetFirstExitException();
- ExitUtil.resetFirstHaltException();
- hiveConf = new HiveConf(TestHiveClientCache.class);
- hiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:"
- + MS_PORT);
-
hiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3);
- hiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_FAILURE_RETRIES,
3);
- hiveConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
- HCatSemanticAnalyzer.class.getName());
- hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, "");
- hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, "");
- hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname,
- "false");
- System.setProperty(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, " ");
- System.setProperty(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, " ");
- }
-
- public void start() throws InterruptedException {
- Thread thread = new Thread(this);
- thread.start();
- Thread.sleep(WAIT_TIME_FOR_BOOTUP); // Wait for the server to bootup
- }
-
- @Override
- public void run() {
- try {
- HiveMetaStore.main(new String[]{"-v", "-p", String.valueOf(MS_PORT)});
- } catch (Throwable t) {
- LOG.error("Exiting. Got exception from metastore: ", t);
- }
- }
-
- public HiveConf getHiveConf() {
- return hiveConf;
- }
-
- public void shutDown() {
- ExitUtil.resetFirstExitException();
- ExitUtil.resetFirstHaltException();
- }
- }
-}
diff --git
a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java
b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java
index 429ed56f90d..df012a76d02 100644
---
a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java
+++
b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java
@@ -114,7 +114,7 @@ public void testSequenceTableWriteReadMR() throws Exception
{
assertTrue(((InvocationTargetException)e.getCause().getCause().getCause()).getTargetException().getMessage().contains(
"Could not connect to meta store using any of the URIs provided"));
assertTrue(e.getCause().getMessage().contains(
- "Unable to instantiate
org.apache.hive.hcatalog.common.HiveClientCache$CacheableHiveMetaStoreClient"));
+ "Unable to instantiate
org.apache.hadoop.hive.metastore.HiveClientCache$CacheableHiveMetaStoreClient"));
}
assertTrue(caughtException);
}
@@ -132,7 +132,7 @@ public void map(LongWritable key, Text value, Context
context) throws IOExceptio
}
private HCatSchema getSchema() throws HCatException {
- HCatSchema schema = new HCatSchema(new ArrayList<HCatFieldSchema>());
+ HCatSchema schema = new HCatSchema(new ArrayList<>());
schema.append(new HCatFieldSchema("a0", HCatFieldSchema.Type.INT,
""));
schema.append(new HCatFieldSchema("a1",
@@ -142,5 +142,4 @@ private HCatSchema getSchema() throws HCatException {
return schema;
}
-
}
diff --git a/iceberg/iceberg-catalog/pom.xml b/iceberg/iceberg-catalog/pom.xml
index fb11670b389..50058ddfb17 100644
--- a/iceberg/iceberg-catalog/pom.xml
+++ b/iceberg/iceberg-catalog/pom.xml
@@ -39,7 +39,7 @@
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
- <artifactId>hive-standalone-metastore-common</artifactId>
+ <artifactId>hive-standalone-metastore-client</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
diff --git a/iceberg/iceberg-handler/pom.xml b/iceberg/iceberg-handler/pom.xml
index 6b62072dfeb..aa84920d29e 100644
--- a/iceberg/iceberg-handler/pom.xml
+++ b/iceberg/iceberg-handler/pom.xml
@@ -48,7 +48,7 @@
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
- <artifactId>hive-standalone-metastore-common</artifactId>
+ <artifactId>hive-standalone-metastore-client</artifactId>
</dependency>
<dependency>
<groupId>org.apache.avro</groupId>
diff --git a/iceberg/pom.xml b/iceberg/pom.xml
index b08885f5dc8..028b7f00c75 100644
--- a/iceberg/pom.xml
+++ b/iceberg/pom.xml
@@ -147,7 +147,7 @@
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
- <artifactId>hive-standalone-metastore-common</artifactId>
+ <artifactId>hive-standalone-metastore-client</artifactId>
<version>${standalone-metastore.version}</version>
</dependency>
<dependency>
diff --git a/itests/qtest-druid/pom.xml b/itests/qtest-druid/pom.xml
index 0dd989f14af..65135aad641 100644
--- a/itests/qtest-druid/pom.xml
+++ b/itests/qtest-druid/pom.xml
@@ -98,6 +98,10 @@
<artifactId>druid-hdfs-storage</artifactId>
<version>${druid.version}</version>
<exclusions>
+ <exclusion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-hdfs-client</artifactId>
+ </exclusion>
<exclusion>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-servlet</artifactId>
diff --git a/metastore/pom.xml b/metastore/pom.xml
index 453e35a8e52..97554ed922f 100644
--- a/metastore/pom.xml
+++ b/metastore/pom.xml
@@ -46,7 +46,7 @@
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
- <artifactId>hive-standalone-metastore-common</artifactId>
+ <artifactId>hive-standalone-metastore-client</artifactId>
<version>${standalone-metastore.version}</version>
</dependency>
<dependency>
@@ -183,6 +183,12 @@
<classifier>tests</classifier>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>org.apache.hive</groupId>
+ <artifactId>hive-standalone-metastore-server</artifactId>
+ <version>${standalone-metastore.version}</version>
+ <scope>test</scope>
+ </dependency>
<!-- test inter-project -->
<dependency>
<groupId>junit</groupId>
diff --git
a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveClientCache.java
b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveClientCache.java
index 7b6f597efbb..f798d2dde74 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveClientCache.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveClientCache.java
@@ -52,7 +52,7 @@
/**
* A thread safe time expired cache for HiveMetaStoreClient
*/
-class HiveClientCache {
+public class HiveClientCache {
public final static int DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS = 2 * 60;
public final static int DEFAULT_HIVE_CACHE_INITIAL_CAPACITY = 50;
public final static int DEFAULT_HIVE_CACHE_MAX_CAPACITY = 50;
@@ -107,7 +107,7 @@ public HiveClientCache(final int timeout) {
/**
* @param timeout the length of time in seconds after a client is created
that it should be automatically removed
*/
- private HiveClientCache(final int timeout, final int initialCapacity, final
int maxCapacity, final boolean enableStats) {
+ public HiveClientCache(final int timeout, final int initialCapacity, final
int maxCapacity, final boolean enableStats) {
this.timeout = timeout;
this.enableStats = enableStats;
diff --git
a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveClientCache.java
b/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveClientCache.java
new file mode 100644
index 00000000000..65dae97a4af
--- /dev/null
+++
b/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveClientCache.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.junit.Test;
+
+import javax.security.auth.login.LoginException;
+import java.io.IOException;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+public class TestHiveClientCache {
+ final HiveConf hiveConf = new HiveConf();
+
+ @Test
+ public void testCacheHit() throws IOException, MetaException, LoginException
{
+ HiveClientCache cache = new HiveClientCache(1000);
+ HiveClientCache.ICacheableMetaStoreClient client =
+ (HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf);
+ assertNotNull(client);
+ client.close(); // close shouldn't matter
+
+ // Setting a non important configuration should return the same client only
+ hiveConf.setIntVar(HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS, 10);
+ HiveClientCache.ICacheableMetaStoreClient client2 =
+ (HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf);
+ assertNotNull(client2);
+ assertSame(client, client2);
+ assertEquals(client.getUsers(), client2.getUsers());
+ client2.close();
+ }
+
+ @Test
+ public void testCacheMiss() throws IOException, MetaException,
LoginException {
+ HiveClientCache cache = new HiveClientCache(1000);
+ IMetaStoreClient client = cache.get(hiveConf);
+ assertNotNull(client);
+
+ // Set different uri as it is one of the criteria deciding whether to
return the same client or not
+ hiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, " "); // URIs are
checked for string equivalence, even spaces make them different
+ IMetaStoreClient client2 = cache.get(hiveConf);
+ assertNotNull(client2);
+ assertNotSame(client, client2);
+ }
+
+ /**
+ * Check that a new client is returned for the same configuration after the
expiry time.
+ * Also verify that the expiry time configuration is honoured
+ */
+ @Test
+ public void testCacheExpiry() throws IOException, MetaException,
LoginException, InterruptedException {
+ HiveClientCache cache = new HiveClientCache(1);
+ HiveClientCache.ICacheableMetaStoreClient client =
+ (HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf);
+ assertNotNull(client);
+
+ Thread.sleep(2500);
+ HiveClientCache.ICacheableMetaStoreClient client2 =
+ (HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf);
+ client.close();
+ assertTrue(client.isClosed()); // close() after *expiry time* and *a cache
access* should have tore down the client
+
+ assertNotNull(client2);
+ assertNotSame(client, client2);
+ }
+
+ /**
+ * Check that a *new* client is created if asked from different threads even
with
+ * the same hive configuration
+ * @throws ExecutionException
+ * @throws InterruptedException
+ */
+ @Test
+ public void testMultipleThreadAccess() throws ExecutionException,
InterruptedException {
+ final HiveClientCache cache = new HiveClientCache(1000);
+
+ class GetHiveClient implements Callable<IMetaStoreClient> {
+ @Override
+ public IMetaStoreClient call() throws IOException, MetaException,
LoginException {
+ return cache.get(hiveConf);
+ }
+ }
+ Callable<IMetaStoreClient> worker1 = new GetHiveClient();
+ Callable<IMetaStoreClient> worker2 = new GetHiveClient();
+
+ Future<IMetaStoreClient> clientFuture1, clientFuture2;
+ try (ExecutorService executor = Executors.newFixedThreadPool(2)) {
+ clientFuture1 = executor.submit(worker1);
+ clientFuture2 = executor.submit(worker2);
+ }
+ IMetaStoreClient client1 = clientFuture1.get();
+ IMetaStoreClient client2 = clientFuture2.get();
+
+ assertNotNull(client1);
+ assertNotNull(client2);
+ assertNotSame(client1, client2);
+ }
+
+ @Test
+ public void testCloseAllClients() throws IOException, MetaException,
LoginException {
+ final HiveClientCache cache = new HiveClientCache(1000);
+ HiveClientCache.ICacheableMetaStoreClient client1 =
+ (HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf);
+ MetastoreConf.setVar(hiveConf, MetastoreConf.ConfVars.THRIFT_URIS, " ");
// URIs are checked for string equivalence, even spaces make them different
+ HiveClientCache.ICacheableMetaStoreClient client2 =
+ (HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf);
+ cache.closeAllClientsQuietly();
+ assertTrue(client1.isClosed());
+ assertTrue(client2.isClosed());
+ }
+}
diff --git a/packaging/src/main/assembly/src.xml
b/packaging/src/main/assembly/src.xml
index 88ad117c197..4dc658e1871 100644
--- a/packaging/src/main/assembly/src.xml
+++ b/packaging/src/main/assembly/src.xml
@@ -105,6 +105,7 @@
<include>shims/**/*</include>
<include>storage-api/**/*</include>
<include>standalone-metastore/metastore-common/**/*</include>
+ <include>standalone-metastore/metastore-client/**/*</include>
<include>standalone-metastore/metastore-server/**/*</include>
<include>standalone-metastore/metastore-tools/**/*</include>
<include>standalone-metastore/metastore-rest-catalog/**/*</include>
diff --git a/pom.xml b/pom.xml
index b34da788fca..e20f1fce884 100644
--- a/pom.xml
+++ b/pom.xml
@@ -97,6 +97,8 @@
<maven.build-helper.plugin.version>3.4.0</maven.build-helper.plugin.version>
<maven.eclipse.plugin.version>2.10</maven.eclipse.plugin.version>
<maven.exec.plugin.version>3.1.0</maven.exec.plugin.version>
+ <maven.compiler.plugin.version>3.14.0</maven.compiler.plugin.version>
+ <maven.javadoc.plugin.version>3.11.2</maven.javadoc.plugin.version>
<maven.versions.plugin.version>2.16.0</maven.versions.plugin.version>
<maven.shade.plugin.version>3.6.0</maven.shade.plugin.version>
<maven.surefire.plugin.version>3.5.1</maven.surefire.plugin.version>
@@ -1548,6 +1550,16 @@
<artifactId>exec-maven-plugin</artifactId>
<version>${maven.exec.plugin.version}</version>
</plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>${maven.compiler.plugin.version}</version>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-javadoc-plugin</artifactId>
+ <version>${maven.javadoc.plugin.version}</version>
+ </plugin>
<plugin>
<groupId>com.github.os72</groupId>
<artifactId>protoc-jar-maven-plugin</artifactId>
diff --git a/ql/pom.xml b/ql/pom.xml
index 06c765bb986..b4fea3627e9 100644
--- a/ql/pom.xml
+++ b/ql/pom.xml
@@ -1060,6 +1060,7 @@
<include>org.apache.hive:hive-llap-client</include>
<include>org.apache.hive:hive-metastore</include>
<include>org.apache.hive:hive-standalone-metastore-common</include>
+
<include>org.apache.hive:hive-standalone-metastore-client</include>
<include>org.apache.hive:hive-standalone-metastore-server</include>
<include>org.apache.hive:hive-service-rpc</include>
<include>com.esotericsoftware.kryo:kryo5</include>
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 566b9607164..0a783c81aaa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -124,9 +124,7 @@
import org.apache.hadoop.hive.metastore.api.UpdateTransactionalStatsRequest;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.Batchable;
-import org.apache.hadoop.hive.metastore.client.HookEnabledMetaStoreClient;
-import org.apache.hadoop.hive.metastore.client.SynchronizedMetaStoreClient;
-import org.apache.hadoop.hive.metastore.client.ThriftHiveMetaStoreClient;
+import
org.apache.hadoop.hive.metastore.client.builder.HiveMetaStoreClientBuilder;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.utils.RetryUtilities;
import org.apache.hadoop.hive.ql.Context;
@@ -139,7 +137,6 @@
import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.PartitionDropOptions;
-import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.AggrStats;
@@ -697,7 +694,7 @@ public void createDatabase(Database db) throws
AlreadyExistsException, HiveExcep
* @param name
* @throws NoSuchObjectException
* @throws HiveException
- * @see
org.apache.hadoop.hive.metastore.HiveMetaStoreClient#dropDatabase(java.lang.String)
+ * @see HiveMetaStoreClient#dropDatabase(java.lang.String)
*/
public void dropDatabase(String name) throws HiveException,
NoSuchObjectException {
dropDatabase(name, true, false, false);
@@ -1024,7 +1021,7 @@ public void createDataConnector(DataConnector connector)
throws AlreadyExistsExc
* @param name
* @throws NoSuchObjectException
* @throws HiveException
- * @see
org.apache.hadoop.hive.metastore.HiveMetaStoreClient#dropDataConnector(java.lang.String,
boolean, boolean)
+ * @see HiveMetaStoreClient#dropDataConnector(java.lang.String, boolean,
boolean)
*/
public void dropDataConnector(String name, boolean ifNotExists) throws
HiveException, NoSuchObjectException {
dropDataConnector(name, ifNotExists, true);
@@ -6004,22 +6001,19 @@ public HiveMetaHook getHook(
}
};
- IMetaStoreClient thriftClient = ThriftHiveMetaStoreClient.newClient(conf,
allowEmbedded);
- IMetaStoreClient clientWithLocalCache =
HiveMetaStoreClientWithLocalCache.newClient(conf, thriftClient);
- IMetaStoreClient sessionLevelClient =
SessionHiveMetaStoreClient.newClient(conf, clientWithLocalCache);
- IMetaStoreClient clientWithHook =
HookEnabledMetaStoreClient.newClient(conf, hookLoader, sessionLevelClient);
+ HiveMetaStoreClientBuilder msClientBuilder = new
HiveMetaStoreClientBuilder(conf)
+ .newThriftClient(allowEmbedded)
+ .enhanceWith(client ->
+ HiveMetaStoreClientWithLocalCache.newClient(conf, client))
+ .enhanceWith(client ->
+ SessionHiveMetaStoreClient.newClient(conf, client))
+ .withHooks(hookLoader)
+ .threadSafe();
- if (conf.getBoolVar(ConfVars.METASTORE_FASTPATH)) {
- return SynchronizedMetaStoreClient.newClient(conf, clientWithHook);
- } else {
- return RetryingMetaStoreClient.getProxy(
- conf,
- new Class[] {Configuration.class, IMetaStoreClient.class},
- new Object[] {conf, clientWithHook},
- metaCallTimeMap,
- SynchronizedMetaStoreClient.class.getName()
- );
+ if (!conf.getBoolVar(ConfVars.METASTORE_FASTPATH)) {
+ msClientBuilder = msClientBuilder.withRetry(metaCallTimeMap);
}
+ return msClientBuilder.build();
}
@Nullable
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreClientWithLocalCache.java
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreClientWithLocalCache.java
index 89e5940017d..4f8fb7391ee 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreClientWithLocalCache.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreClientWithLocalCache.java
@@ -41,8 +41,8 @@
import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest;
import org.apache.hadoop.hive.metastore.api.PartitionsStatsRequest;
import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.client.HiveMetaStoreClientUtils;
import org.apache.hadoop.hive.metastore.client.MetaStoreClientWrapper;
+import org.apache.hadoop.hive.metastore.client.utils.HiveMetaStoreClientUtils;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.metadata.client.MetaStoreClientCacheUtils;
import
org.apache.hadoop.hive.ql.metadata.client.MetaStoreClientCacheUtils.CacheKey;
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
index a4789d6f93f..fee1897cca7 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
@@ -92,10 +92,10 @@
import org.apache.hadoop.hive.metastore.api.TableValidWriteIds;
import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest;
import org.apache.hadoop.hive.metastore.api.UniqueConstraintsResponse;
-import org.apache.hadoop.hive.metastore.client.HiveMetaStoreClientUtils;
import org.apache.hadoop.hive.metastore.client.MetaStoreClientWrapper;
import org.apache.hadoop.hive.metastore.client.ThriftHiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+import org.apache.hadoop.hive.metastore.client.utils.HiveMetaStoreClientUtils;
import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils;
@@ -135,7 +135,7 @@
import static org.apache.hadoop.hive.metastore.Warehouse.makePartName;
import static org.apache.hadoop.hive.metastore.Warehouse.makeSpecFromName;
import static org.apache.hadoop.hive.metastore.Warehouse.makeValsFromName;
-import static
org.apache.hadoop.hive.metastore.client.HiveMetaStoreClientUtils.deepCopyFieldSchemas;
+import static
org.apache.hadoop.hive.metastore.client.utils.HiveMetaStoreClientUtils.deepCopyFieldSchemas;
import static
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.compareFieldColumns;
import static
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getColumnNamesForTable;
import static
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
diff --git a/standalone-metastore/metastore-client/pom.xml
b/standalone-metastore/metastore-client/pom.xml
new file mode 100644
index 00000000000..f47e0546094
--- /dev/null
+++ b/standalone-metastore/metastore-client/pom.xml
@@ -0,0 +1,59 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <artifactId>hive-standalone-metastore</artifactId>
+ <groupId>org.apache.hive</groupId>
+ <version>4.2.0-SNAPSHOT</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <artifactId>hive-standalone-metastore-client</artifactId>
+ <name>Hive Metastore Client</name>
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hive</groupId>
+ <artifactId>hive-standalone-metastore-common</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <!-- test scope dependencies -->
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-core</artifactId>
+ <version>${mockito-core.version}</version>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+ <build>
+ <plugins>
+ <plugin>
+ <!-- Suppress source assembly -->
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>assemble</id>
+ <phase>none</phase>
+ <goals>
+ <goal>single</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
similarity index 94%
rename from
standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
rename to
standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 387695be670..3bc5993e537 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -28,10 +28,9 @@
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.Type;
-import org.apache.hadoop.hive.metastore.client.HookEnabledMetaStoreClient;
import org.apache.hadoop.hive.metastore.client.MetaStoreClientWrapper;
-import org.apache.hadoop.hive.metastore.client.SynchronizedMetaStoreClient;
import org.apache.hadoop.hive.metastore.client.ThriftHiveMetaStoreClient;
+import
org.apache.hadoop.hive.metastore.client.builder.HiveMetaStoreClientBuilder;
import org.apache.thrift.TException;
import java.util.List;
@@ -73,9 +72,11 @@ private HiveMetaStoreClient(Configuration conf,
HiveMetaHookLoader hookLoader,
private static IMetaStoreClient createUnderlyingClient(Configuration conf,
HiveMetaHookLoader hookLoader,
ThriftHiveMetaStoreClient thriftClient) {
- IMetaStoreClient clientWithHook =
HookEnabledMetaStoreClient.newClient(conf, hookLoader, thriftClient);
- IMetaStoreClient synchronizedClient =
SynchronizedMetaStoreClient.newClient(conf, clientWithHook);
- return synchronizedClient;
+ return new HiveMetaStoreClientBuilder(conf)
+ .client(thriftClient)
+ .withHooks(hookLoader)
+ .threadSafe()
+ .build();
}
// methods for test
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
similarity index 100%
rename from
standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
rename to
standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
similarity index 88%
rename from
standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
rename to
standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
index c2fa9c99ad4..0cf9901fd2a 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
+++
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
@@ -29,8 +29,9 @@
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.Iterator;
-import java.util.concurrent.ConcurrentHashMap;
+import java.util.Map;
import java.util.concurrent.TimeUnit;
+import java.util.function.Supplier;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@@ -65,15 +66,21 @@ public class RetryingMetaStoreClient implements
InvocationHandler {
private final UserGroupInformation ugi;
private final int retryLimit;
private final long retryDelaySeconds;
- private final ConcurrentHashMap<String, Long> metaCallTimeMap;
+ private final Map<String, Long> metaCallTimeMap;
private final long connectionLifeTimeInMillis;
private long lastConnectionTime;
private boolean localMetaStore;
protected RetryingMetaStoreClient(Configuration conf, Class<?>[]
constructorArgTypes,
- Object[] constructorArgs,
ConcurrentHashMap<String, Long> metaCallTimeMap,
- Class<? extends IMetaStoreClient>
msClientClass) throws MetaException {
+ Object[] constructorArgs, Map<String, Long> metaCallTimeMap,
+ Class<? extends IMetaStoreClient> msClientClass) throws MetaException {
+ this(conf, metaCallTimeMap, () ->
+ JavaUtils.newInstance(msClientClass, constructorArgTypes,
constructorArgs));
+ }
+
+ protected RetryingMetaStoreClient(Configuration conf, Map<String, Long>
metaCallTimeMap,
+ Supplier<? extends IMetaStoreClient> msClient) throws MetaException {
this.ugi = getUGI();
@@ -93,9 +100,9 @@ protected RetryingMetaStoreClient(Configuration conf,
Class<?>[] constructorArgT
SecurityUtils.reloginExpiringKeytabUser();
- this.base = JavaUtils.newInstance(msClientClass, constructorArgTypes,
constructorArgs);
+ this.base = msClient.get();
- LOG.info("RetryingMetaStoreClient proxy=" + msClientClass + " ugi=" +
this.ugi
+ LOG.info("RetryingMetaStoreClient proxy=" + base.getClass() + " ugi=" +
this.ugi
+ " retries=" + this.retryLimit + " delay=" + this.retryDelaySeconds
+ " lifetime=" + this.connectionLifeTimeInMillis);
}
@@ -114,7 +121,7 @@ public static IMetaStoreClient getProxy(Configuration
hiveConf, HiveMetaHookLoad
}
public static IMetaStoreClient getProxy(Configuration hiveConf,
HiveMetaHookLoader hookLoader,
- ConcurrentHashMap<String, Long> metaCallTimeMap, String mscClassName,
boolean allowEmbedded)
+ Map<String, Long> metaCallTimeMap, String mscClassName, boolean
allowEmbedded)
throws MetaException {
return getProxy(hiveConf,
@@ -139,7 +146,7 @@ public static IMetaStoreClient getProxy(Configuration
hiveConf, Class<?>[] const
* Please use getProxy(HiveConf conf, HiveMetaHookLoader hookLoader) for
external purpose.
*/
public static IMetaStoreClient getProxy(Configuration hiveConf, Class<?>[]
constructorArgTypes,
- Object[] constructorArgs, ConcurrentHashMap<String, Long>
metaCallTimeMap,
+ Object[] constructorArgs, Map<String, Long> metaCallTimeMap,
String mscClassName) throws MetaException {
@SuppressWarnings("unchecked")
@@ -149,8 +156,20 @@ public static IMetaStoreClient getProxy(Configuration
hiveConf, Class<?>[] const
RetryingMetaStoreClient handler =
new RetryingMetaStoreClient(hiveConf, constructorArgTypes,
constructorArgs,
metaCallTimeMap, baseClass);
+ return getProxy(baseClass.getInterfaces(), handler);
+ }
+
+ public static IMetaStoreClient getProxy(Configuration hiveConf, Map<String,
Long> metaCallTimeMap,
+ IMetaStoreClient msClient) throws MetaException {
+ RetryingMetaStoreClient handler =
+ new RetryingMetaStoreClient(hiveConf, metaCallTimeMap, () -> msClient);
+ return getProxy(msClient.getClass().getInterfaces(), handler);
+ }
+
+ private static IMetaStoreClient getProxy(Class<?>[] interfaces,
+ RetryingMetaStoreClient handler) {
return (IMetaStoreClient) Proxy.newProxyInstance(
- RetryingMetaStoreClient.class.getClassLoader(),
baseClass.getInterfaces(), handler);
+ RetryingMetaStoreClient.class.getClassLoader(), interfaces,
handler);
}
@Override
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/TableIterable.java
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/TableIterable.java
similarity index 86%
rename from
standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/TableIterable.java
rename to
standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/TableIterable.java
index 1a17fe31c36..2a747aa46ef 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/TableIterable.java
+++
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/TableIterable.java
@@ -30,13 +30,12 @@
* on the resulting Table objects. It batches the calls to
* IMetaStoreClient.getTableObjectsByName to avoid OOM issues in HS2 (with
* embedded metastore) or MetaStore server (if HS2 is using remote metastore).
- *
*/
public class TableIterable implements Iterable<Table> {
@Override
public Iterator<Table> iterator() {
- return new Iterator<Table>() {
+ return new Iterator<>() {
private final Iterator<String> tableNamesIter = tableNames.iterator();
private Iterator<org.apache.hadoop.hive.metastore.api.Table> batchIter =
null;
@@ -56,7 +55,7 @@ public Table next() {
private void getNextBatch() {
// get next batch of table names in this list
- List<String> nameBatch = new ArrayList<String>();
+ List<String> nameBatch = new ArrayList<>();
int batchCounter = 0;
while (batchCounter < batchSize && tableNamesIter.hasNext()) {
nameBatch.add(tableNamesIter.next());
@@ -74,13 +73,11 @@ private void getNextBatch() {
} catch (TException e) {
throw new RuntimeException(e);
}
-
}
@Override
public void remove() {
- throw new IllegalStateException(
- "TableIterable is a read-only iterable and remove() is
unsupported");
+ throw new IllegalStateException("TableIterable is a read-only iterable
and remove() is unsupported");
}
};
}
@@ -95,17 +92,11 @@ public void remove() {
* Primary constructor that fetches all tables in a given msc, given a Hive
* object,a db name and a table name list.
*/
- public TableIterable(IMetaStoreClient msc, String dbname, List<String>
tableNames, int batchSize)
- throws TException {
- this.msc = msc;
- this.catName = null;
- this.dbname = dbname;
- this.tableNames = tableNames;
- this.batchSize = batchSize;
+ public TableIterable(IMetaStoreClient msc, String dbname, List<String>
tableNames, int batchSize) {
+ this(msc, null, dbname, tableNames, batchSize);
}
- public TableIterable(IMetaStoreClient msc, String catName, String dbname,
List<String>
- tableNames, int batchSize) throws TException {
+ public TableIterable(IMetaStoreClient msc, String catName, String dbname,
List<String> tableNames, int batchSize) {
this.msc = msc;
this.catName = catName;
this.dbname = dbname;
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/BaseMetaStoreClient.java
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/BaseMetaStoreClient.java
similarity index 99%
rename from
standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/BaseMetaStoreClient.java
rename to
standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/BaseMetaStoreClient.java
index 6b22fbf156b..5dd1703b0e8 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/BaseMetaStoreClient.java
+++
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/BaseMetaStoreClient.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.hive.metastore.PartitionDropOptions;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.*;
+import org.apache.hadoop.hive.metastore.client.utils.HiveMetaStoreClientUtils;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/HookEnabledMetaStoreClient.java
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/HookEnabledMetaStoreClient.java
similarity index 100%
rename from
standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/HookEnabledMetaStoreClient.java
rename to
standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/HookEnabledMetaStoreClient.java
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/MetaStoreClientWrapper.java
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/MetaStoreClientWrapper.java
similarity index 100%
rename from
standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/MetaStoreClientWrapper.java
rename to
standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/MetaStoreClientWrapper.java
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/SynchronizedMetaStoreClient.java
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/SynchronizedMetaStoreClient.java
similarity index 95%
rename from
standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/SynchronizedMetaStoreClient.java
rename to
standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/SynchronizedMetaStoreClient.java
index 3ca4f7618e3..0d784246d93 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/SynchronizedMetaStoreClient.java
+++
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/SynchronizedMetaStoreClient.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.hive.metastore.client;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import java.lang.reflect.InvocationHandler;
@@ -28,7 +29,7 @@
/**
* A synchronized wrapper for {@link IMetaStoreClient}.
- * The reflection logic originally comes from {@link
org.apache.hadoop.hive.metastore.HiveMetaStoreClient}.
+ * The reflection logic originally comes from {@link HiveMetaStoreClient}.
* This should be used by multi-thread applications unless all the underlying
layers are thread-safe.
*/
public class SynchronizedMetaStoreClient extends MetaStoreClientWrapper
implements IMetaStoreClient {
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/ThriftHiveMetaStoreClient.java
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/ThriftHiveMetaStoreClient.java
similarity index 99%
rename from
standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/ThriftHiveMetaStoreClient.java
rename to
standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/ThriftHiveMetaStoreClient.java
index e88c3f04340..3036b9889ef 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/ThriftHiveMetaStoreClient.java
+++
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/ThriftHiveMetaStoreClient.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.*;
import org.apache.hadoop.hive.metastore.api.Package;
+import org.apache.hadoop.hive.metastore.client.utils.HiveMetaStoreClientUtils;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.hooks.URIResolverHook;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
diff --git
a/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveMetaStoreClientBuilder.java
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveMetaStoreClientBuilder.java
new file mode 100644
index 00000000000..ab213e3ea6d
--- /dev/null
+++
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveMetaStoreClientBuilder.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HiveMetaHookLoader;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.client.HookEnabledMetaStoreClient;
+import org.apache.hadoop.hive.metastore.client.SynchronizedMetaStoreClient;
+import org.apache.hadoop.hive.metastore.client.ThriftHiveMetaStoreClient;
+
+import java.util.Map;
+import java.util.Objects;
+import java.util.function.Function;
+
+public class HiveMetaStoreClientBuilder {
+
+ private final Configuration conf;
+ private IMetaStoreClient client;
+
+ public HiveMetaStoreClientBuilder(Configuration conf) {
+ this.conf = Objects.requireNonNull(conf);
+ }
+
+ public HiveMetaStoreClientBuilder newClient() throws MetaException {
+ this.client = new HiveMetaStoreClient(conf);
+ return this;
+ }
+
+ public HiveMetaStoreClientBuilder newThriftClient(boolean allowEmbedded)
throws MetaException {
+ this.client = ThriftHiveMetaStoreClient.newClient(conf, allowEmbedded);
+ return this;
+ }
+
+ public HiveMetaStoreClientBuilder client(IMetaStoreClient client) {
+ this.client = client;
+ return this;
+ }
+
+ public HiveMetaStoreClientBuilder enhanceWith(Function<IMetaStoreClient,
IMetaStoreClient> wrapperFunction) {
+ client = wrapperFunction.apply(client);
+ return this;
+ }
+
+ public HiveMetaStoreClientBuilder withHooks(HiveMetaHookLoader hookLoader)
{
+ this.client = HookEnabledMetaStoreClient.newClient(conf, hookLoader,
client);
+ return this;
+ }
+
+ public HiveMetaStoreClientBuilder withRetry(Map<String, Long>
metaCallTimeMap) throws MetaException {
+ client = RetryingMetaStoreClient.getProxy(conf, metaCallTimeMap,
client);
+ return this;
+ }
+
+ public HiveMetaStoreClientBuilder threadSafe() {
+ this.client = SynchronizedMetaStoreClient.newClient(conf, client);
+ return this;
+ }
+
+ public IMetaStoreClient build() {
+ return Objects.requireNonNull(client);
+ }
+}
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/HiveMetaStoreClientUtils.java
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/utils/HiveMetaStoreClientUtils.java
similarity index 98%
rename from
standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/HiveMetaStoreClientUtils.java
rename to
standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/utils/HiveMetaStoreClientUtils.java
index 1ab7e503c13..89815a15677 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/client/HiveMetaStoreClientUtils.java
+++
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/client/utils/HiveMetaStoreClientUtils.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.hadoop.hive.metastore.client;
+package org.apache.hadoop.hive.metastore.client.utils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.TableName;
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/TableFetcher.java
b/standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/utils/TableFetcher.java
similarity index 100%
rename from
standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/TableFetcher.java
rename to
standalone-metastore/metastore-client/src/main/java/org/apache/hadoop/hive/metastore/utils/TableFetcher.java
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestTableIterable.java
b/standalone-metastore/metastore-client/src/test/java/org/apache/hadoop/hive/metastore/TestTableIterable.java
similarity index 94%
rename from
standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestTableIterable.java
rename to
standalone-metastore/metastore-client/src/test/java/org/apache/hadoop/hive/metastore/TestTableIterable.java
index a63d781a28b..bbe4a780278 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestTableIterable.java
+++
b/standalone-metastore/metastore-client/src/test/java/org/apache/hadoop/hive/metastore/TestTableIterable.java
@@ -23,9 +23,7 @@
import java.util.List;
import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Table;
@@ -42,7 +40,6 @@
/**
* Unit tests for TableIterable.
*/
-@Category(MetastoreUnitTest.class)
public class TestTableIterable {
@Test
@@ -71,6 +68,5 @@ public void testNumReturned() throws MetaException,
InvalidOperationException,
verify(msc).getTableObjectsByName("dummy", Arrays.asList("a", "b", "c"));
verify(msc).getTableObjectsByName("dummy", Arrays.asList("d", "e", "f"));
-
}
}
diff --git
a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/utils/TestTableFetcher.java
b/standalone-metastore/metastore-client/src/test/java/org/apache/hadoop/hive/metastore/utils/TestTableFetcher.java
similarity index 100%
rename from
standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/utils/TestTableFetcher.java
rename to
standalone-metastore/metastore-client/src/test/java/org/apache/hadoop/hive/metastore/utils/TestTableFetcher.java
diff --git a/standalone-metastore/metastore-rest-catalog/pom.xml
b/standalone-metastore/metastore-rest-catalog/pom.xml
index b0cfdc533c7..fbe13841054 100644
--- a/standalone-metastore/metastore-rest-catalog/pom.xml
+++ b/standalone-metastore/metastore-rest-catalog/pom.xml
@@ -23,7 +23,6 @@
<standalone.metastore.path.to.root>..</standalone.metastore.path.to.root>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<log4j2.debug>false</log4j2.debug>
- <hive.version>${project.parent.version}</hive.version>
<iceberg.version>1.9.1</iceberg.version>
</properties>
<dependencies>
diff --git a/standalone-metastore/metastore-server/pom.xml
b/standalone-metastore/metastore-server/pom.xml
index cd447a51746..2014f10cfdd 100644
--- a/standalone-metastore/metastore-server/pom.xml
+++ b/standalone-metastore/metastore-server/pom.xml
@@ -27,15 +27,9 @@
<dependencies>
<dependency>
<groupId>org.apache.hive</groupId>
- <artifactId>hive-standalone-metastore-common</artifactId>
+ <artifactId>hive-standalone-metastore-client</artifactId>
<version>${hive.version}</version>
</dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-metastore</artifactId>
- <version>${hive.version}</version>
- <scope>test</scope>
- </dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/conf/TestMetastoreConf.java
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/conf/TestMetastoreConf.java
index 24af86f4061..2756a4bdb1e 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/conf/TestMetastoreConf.java
+++
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/conf/TestMetastoreConf.java
@@ -48,7 +48,6 @@
import org.apache.hadoop.hive.metastore.MetastoreTaskThread;
import org.apache.hadoop.hive.metastore.RetryingHMSHandler;
import org.apache.hadoop.hive.metastore.RuntimeStatsCleanerTask;
-import org.apache.hadoop.hive.metastore.SerDeStorageSchemaReader;
import org.apache.hadoop.hive.metastore.events.EventCleanerTask;
import
org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager;
import org.apache.hadoop.hive.metastore.txn.service.AcidHouseKeeperService;
@@ -456,7 +455,7 @@ public void testClassNames() {
Assert.assertEquals(MetastoreConf.DEFAULT_STORAGE_SCHEMA_READER_CLASS,
DefaultStorageSchemaReader.class.getName());
Assert.assertEquals(MetastoreConf.SERDE_STORAGE_SCHEMA_READER_CLASS,
- SerDeStorageSchemaReader.class.getName());
+ "org.apache.hadoop.hive.metastore.SerDeStorageSchemaReader");
Assert.assertEquals(MetastoreConf.HIVE_ALTER_HANDLE_CLASS,
HiveAlterHandler.class.getName());
Assert.assertEquals(MetastoreConf.MATERIALZIATIONS_REBUILD_LOCK_CLEANER_TASK_CLASS,
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/txn/retry/TestSqlRetryHandler.java
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/txn/retry/TestSqlRetryHandler.java
index 3948faaaadc..4f81b7b765f 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/txn/retry/TestSqlRetryHandler.java
+++
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/txn/retry/TestSqlRetryHandler.java
@@ -17,7 +17,8 @@
*/
package org.apache.hadoop.hive.metastore.txn.retry;
-import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.junit.Assert;
import org.junit.Test;
@@ -27,17 +28,16 @@ public class TestSqlRetryHandler {
@Test
public void testRetryableRegex() {
- HiveConf conf = new HiveConf();
+ Configuration conf = MetastoreConf.newMetastoreConf();
SQLException sqlException = new SQLException("ORA-08177: can't serialize
access for this transaction", "72000");
// Note that we have 3 regex'es below
- conf.setVar(HiveConf.ConfVars.HIVE_TXN_RETRYABLE_SQLEX_REGEX, "^Deadlock
detected, roll back,.*08177.*,.*08178.*");
+ MetastoreConf.setVar(conf,
MetastoreConf.ConfVars.TXN_RETRYABLE_SQLEX_REGEX, "^Deadlock detected, roll
back,.*08177.*,.*08178.*");
boolean result = SqlRetryHandler.isRetryable(conf, sqlException);
Assert.assertTrue("regex should be retryable", result);
sqlException = new SQLException("This error message, has comma in it");
- conf.setVar(HiveConf.ConfVars.HIVE_TXN_RETRYABLE_SQLEX_REGEX, ".*comma.*");
+ MetastoreConf.setVar(conf,
MetastoreConf.ConfVars.TXN_RETRYABLE_SQLEX_REGEX, ".*comma.*");
result = SqlRetryHandler.isRetryable(conf, sqlException);
Assert.assertTrue("regex should be retryable", result);
}
-
}
diff --git a/standalone-metastore/metastore-tools/pom.xml
b/standalone-metastore/metastore-tools/pom.xml
index a2946039bb7..bdc74cb3458 100644
--- a/standalone-metastore/metastore-tools/pom.xml
+++ b/standalone-metastore/metastore-tools/pom.xml
@@ -63,21 +63,6 @@
</exclusion>
</exclusions>
</dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-common</artifactId>
- <version>${hive.version}</version>
- <exclusions>
- <exclusion>
- <groupId>org.apache.zookeeper</groupId>
- <artifactId>zookeeper</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.apache.curator</groupId>
- <artifactId>curator-client</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-standalone-metastore-common</artifactId>
diff --git a/standalone-metastore/metastore-tools/tools-common/pom.xml
b/standalone-metastore/metastore-tools/tools-common/pom.xml
index 23ddb6d13e1..fb1983b317a 100644
--- a/standalone-metastore/metastore-tools/tools-common/pom.xml
+++ b/standalone-metastore/metastore-tools/tools-common/pom.xml
@@ -30,20 +30,12 @@
<groupId>org.apache.hive</groupId>
<artifactId>hive-standalone-metastore-common</artifactId>
</dependency>
- <dependency>
- <groupId>org.apache.hive.hcatalog</groupId>
- <artifactId>hive-hcatalog-server-extensions</artifactId>
- </dependency>
<!-- https://mvnrepository.com/artifact/org.jetbrains/annotations -->
<dependency>
<groupId>org.jetbrains</groupId>
<artifactId>annotations</artifactId>
<scope>compile</scope>
</dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-common</artifactId>
- </dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
diff --git
a/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java
b/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java
index 33ba54e1ca8..914dc5f8e32 100644
---
a/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java
+++
b/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.hive.metastore.tools;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.AbortTxnsRequest;
import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest;
@@ -140,7 +139,7 @@ private void addResource(Configuration conf, @NotNull
String r) throws Malformed
*/
private void getClient(@Nullable URI uri)
throws TException, IOException, InterruptedException,
URISyntaxException, LoginException {
- Configuration conf = new HiveConf();
+ Configuration conf = MetastoreConf.newMetastoreConf();
addResource(conf, HIVE_SITE);
if (uri != null) {
conf.set(METASTORE_URI, uri.toString());
diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml
index 7cdd8f5b31f..7c137624b5b 100644
--- a/standalone-metastore/pom.xml
+++ b/standalone-metastore/pom.xml
@@ -27,6 +27,7 @@
<inceptionYear>2008</inceptionYear>
<modules>
<module>metastore-common</module>
+ <module>metastore-client</module>
<module>metastore-server</module>
<module>metastore-tools</module>
<module>metastore-rest-catalog</module>