Copilot commented on code in PR #6464:
URL: https://github.com/apache/hive/pull/6464#discussion_r3193798630
##########
standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java:
##########
@@ -648,6 +648,13 @@ public enum ConfVars {
"hive.txn.acid.metrics.delta.pct.threshold", 0.01f,
"Percentage (fractional) size of the delta files relative to the base
directory. Deltas smaller than this threshold " +
"count as small deltas. Default 0.01 = 1%.)"),
+
METASTORE_JDBC_SLOW_QUERIES("metastore.jdbc.execution,logSlowQueriesThreshold",
"metastore.jdbc.execution,logSlowQueriesThreshold",
+ 5000, "Log the slow jdbc query that Metastore has been waiting for the
result beyond the threshold(ms), " +
+ "should enable the metastore.profile.jdbc.execution first"),
+ METASTORE_PROFILE_JDBC_EXECUTION("metastore.profile.jdbc.execution",
"metastore.profile.jdbc.execution", true,
Review Comment:
METASTORE_PROFILE_JDBC_EXECUTION is defaulting to true, which makes
DataSourceProvider route JDBC URLs through MetastoreDriver by default. This is
a user-facing/operational behavior change (different driver + proxy wrappers)
and adds overhead; consider defaulting this to false and requiring explicit
opt-in.
##########
standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java:
##########
@@ -648,6 +648,13 @@ public enum ConfVars {
"hive.txn.acid.metrics.delta.pct.threshold", 0.01f,
"Percentage (fractional) size of the delta files relative to the base
directory. Deltas smaller than this threshold " +
"count as small deltas. Default 0.01 = 1%.)"),
+
METASTORE_JDBC_SLOW_QUERIES("metastore.jdbc.execution,logSlowQueriesThreshold",
"metastore.jdbc.execution,logSlowQueriesThreshold",
Review Comment:
Conf var key for METASTORE_JDBC_SLOW_QUERIES uses a comma
("metastore.jdbc.execution,logSlowQueriesThreshold") instead of a dot-separated
property name. This will make the setting impossible to configure via standard
XML/property files. Rename the key(s) to a valid dot-separated name (and keep
the hive/metastore alias consistent).
##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/datasource/MetastoreConnection.java:
##########
@@ -0,0 +1,328 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.datasource;
+
+import java.sql.Array;
+import java.sql.Blob;
+import java.sql.CallableStatement;
+import java.sql.Clob;
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.NClob;
+import java.sql.PreparedStatement;
+import java.sql.SQLClientInfoException;
+import java.sql.SQLException;
+import java.sql.SQLWarning;
+import java.sql.SQLXML;
+import java.sql.Savepoint;
+import java.sql.Statement;
+import java.sql.Struct;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.Executor;
+
+import org.apache.hadoop.conf.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public record MetastoreConnection(Connection delegate, Configuration
configuration) implements Connection {
+ private static final Logger LOG =
LoggerFactory.getLogger(MetastoreConnection.class);
+ @Override
+ public Statement createStatement() throws SQLException {
+ return MetastoreStatement.getProxyStatement(configuration,
delegate.createStatement(), null);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql) throws SQLException {
+ return MetastoreStatement.getProxyStatement(configuration,
delegate.prepareStatement(sql), sql);
+ }
+
+ @Override
+ public CallableStatement prepareCall(String sql) throws SQLException {
+ return MetastoreStatement.getProxyStatement(configuration,
delegate.prepareCall(sql), sql);
+ }
+
+ @Override
+ public String nativeSQL(String sql) throws SQLException {
+ return delegate.nativeSQL(sql);
+ }
+
+ @Override
+ public void setAutoCommit(boolean autoCommit) throws SQLException {
+ delegate.setAutoCommit(autoCommit);
+ }
+
+ @Override
+ public boolean getAutoCommit() throws SQLException {
+ return delegate.getAutoCommit();
+ }
+
+ @Override
+ public void commit() throws SQLException {
+ delegate.commit();
+ }
+
+ @Override
+ public void rollback() throws SQLException {
+ delegate.rollback();
+ }
+
+ @Override
+ public void close() throws SQLException {
+ // Sometimes we want to see who closes the connection at when
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Connection is being closed", new RuntimeException());
+ }
+ delegate.close();
+ }
+
+ @Override
+ public boolean isClosed() throws SQLException {
+ return delegate.isClosed();
+ }
+
+ @Override
+ public DatabaseMetaData getMetaData() throws SQLException {
+ return delegate.getMetaData();
+ }
+
+ @Override
+ public void setReadOnly(boolean readOnly) throws SQLException {
+ delegate.setReadOnly(readOnly);
+ }
+
+ @Override
+ public boolean isReadOnly() throws SQLException {
+ return delegate.isReadOnly();
+ }
+
+ @Override
+ public void setCatalog(String catalog) throws SQLException {
+ delegate.setCatalog(catalog);
+ }
+
+ @Override
+ public String getCatalog() throws SQLException {
+ return delegate.getCatalog();
+ }
+
+ @Override
+ public void setTransactionIsolation(int level) throws SQLException {
+ delegate.setTransactionIsolation(level);
+ }
+
+ @Override
+ public int getTransactionIsolation() throws SQLException {
+ return delegate.getTransactionIsolation();
+ }
+
+ @Override
+ public SQLWarning getWarnings() throws SQLException {
+ return delegate.getWarnings();
+ }
+
+ @Override
+ public void clearWarnings() throws SQLException {
+ delegate.clearWarnings();
+ }
+
+ @Override
+ public Statement createStatement(int resultSetType, int
resultSetConcurrency) throws SQLException {
+ return delegate.createStatement(resultSetType, resultSetConcurrency);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, int resultSetType, int
resultSetConcurrency)
+ throws SQLException {
+ return MetastoreStatement.getProxyStatement(configuration,
+ delegate.prepareStatement(sql, resultSetType, resultSetConcurrency),
sql);
Review Comment:
MetastoreConnection.createStatement(int, int) returns the delegate Statement
directly, bypassing MetastoreStatement profiling/wrapping that is applied in
the other createStatement overloads. Wrap this overload as well so profiling
behavior is consistent.
##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/datasource/MetastoreStatement.java:
##########
@@ -0,0 +1,257 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.datasource;
+
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Timer;
+
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.sql.Statement;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.atomic.LongAdder;
+
+import org.apache.commons.lang3.ClassUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HMSHandlerContext;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static
org.apache.hadoop.hive.metastore.datasource.MetastoreStatement.JdbcProfilerUtils.logSlowExecution;
+import static
org.apache.hadoop.hive.metastore.datasource.MetastoreStatement.JdbcProfilerUtils.isSlowExecution;
+
+@SuppressWarnings("unchecked")
+public final class MetastoreStatement implements InvocationHandler {
+ private static final Logger LOG =
LoggerFactory.getLogger(MetastoreStatement.class);
+ static final String EXEC_HOOK = "metastore.jdbc.execution.hook";
+ static final ThreadLocal<Pair<Pair<String, Long>, LongAdder>> CURRENT_CALL =
new ThreadLocal<>();
+
+ private final String rawSql;
+ private final Statement delegate;
+ private final Configuration configuration;
+ private final MetastoreStatementHook hook;
+
+ private MetastoreStatement(Configuration conf, Statement statement, String
rawSql) {
+ this.configuration = Objects.requireNonNull(conf);
+ this.rawSql = rawSql;
+ this.delegate = Objects.requireNonNull(statement);
+ String className = conf.get(EXEC_HOOK, "");
+ if (StringUtils.isEmpty(className)) {
+ hook = new JdbcProfilerUtils(conf);
+ } else {
+ try {
+ hook = JavaUtils.newInstance(JavaUtils.getClass(className.trim(),
MetastoreStatementHook.class),
+ new Class[] { Configuration.class}, new Object[] {conf});
+ } catch (MetaException e) {
+ throw new RuntimeException(e.getMessage());
+ }
+ }
+ }
+
+ public static <T extends Statement> T getProxyStatement(Configuration
configuration, T delegate, String rawSql) {
+ MetastoreStatement handler = new MetastoreStatement(configuration,
delegate, rawSql);
+ return (T) Proxy.newProxyInstance(JavaUtils.getClassLoader(),
+ ClassUtils.getAllInterfaces(delegate.getClass()).toArray(new
Class[0]), handler);
+ }
+
+ @Override
+ public Object invoke(Object proxy, Method method, Object[] args) throws
Throwable {
+ Timer.Context ctx = null;
+ try {
+ LongAdder adder = null;
+ boolean shouldMonitor = hook.profile(rawSql, method, args);
+ if (Metrics.getRegistry() != null && shouldMonitor) {
+ Optional<Pair<String, Long>> ctxCall = HMSHandlerContext.getCallId();
+ Pair<Pair<String, Long>, LongAdder> previousCall = CURRENT_CALL.get();
+ if (ctxCall.isPresent()) {
+ if ((previousCall == null ||
!ctxCall.get().equals(previousCall.getLeft()))) {
+ // we approach the end of previous thrift call
+ if (previousCall != null) {
+ Pair<String, Long> call = previousCall.getLeft();
+ long totalSpent = previousCall.getRight().longValue();
+ LOG.debug("{} took {} ms to complete all jdbc queries",
call.getLeft(), totalSpent);
+ if (isSlowExecution(configuration, totalSpent)) {
+ LOG.info("{} took {} ms to complete all jdbc queries",
call.getLeft(), totalSpent);
+ }
+ }
+ adder = new LongAdder();
+ CURRENT_CALL.set(Pair.of(ctxCall.get(), adder));
+ } else {
+ adder = previousCall.getRight();
+ }
+ }
+ String metricName = hook.getMetricName(method, args);
+ Timer timer = Metrics.getOrCreateTimer(metricName);
+ if (timer != null) {
+ ctx = timer.time();
+ }
+ }
+ long start = System.currentTimeMillis();
+ hook.preRun(method, args);
+ Object result = method.invoke(delegate, args);
+ hook.postRun(method, args, result);
+ long timeSpent = System.currentTimeMillis() - start;
+ if (shouldMonitor) {
+ String statement = rawSql != null ? rawSql : (args != null &&
args.length > 0 ? (String) args[0] : "no sql found");
+ LOG.debug("SQL query: {} completed in {} ms", statement, timeSpent);
+ }
+ logSlowExecution(timeSpent, configuration, rawSql, method, args);
+ if (adder != null) {
+ adder.add(timeSpent);
+ }
+ return result;
+ } catch (InvocationTargetException | UndeclaredThrowableException e) {
+ throw e.getCause();
+ } finally {
+ if (ctx != null) {
+ ctx.stop();
+ }
+ }
+ }
+
+ public interface MetastoreStatementHook {
+ /**
+ * Whether should monitor the current call, this method gives a chance to
profile a specific pattern of queries.
+ * For example, we use {@link JdbcProfilerUtils} to profile the queries
originated from a set of specific APIs.
+ * @param sql The sql being executed, it might be null for {@link
Statement#execute}, for this case
+ * need to obtain the sql from args, the method input.
+ * @param method Method which is being called
+ * @param args The method input
+ * @return true for profiling this call, false otherwise
+ */
+ boolean profile(String sql, Method method, Object[] args);
+
+ String getMetricName(Method method, Object[] args);
+ /**
+ * Invoked before the method call
+ * @param method Method which is being called
+ * @param args The method input
+ */
+ default void preRun(Method method, Object[] args) {
+
+ }
+
+ /**
+ * Invoked post the method call
+ * @param method Method which is being called
+ * @param args The method input
+ * @param result The execution result from the call
+ */
+ default void postRun(Method method, Object[] args, Object result) {
+
+ }
+ }
+
+ /**
+ * This class is used to profile the underlying statement originated from
specific thrift API calls
+ */
+ public static class JdbcProfilerUtils implements
MetastoreStatement.MetastoreStatementHook {
+ private static final Set<String> PROFILED_APIS = new HashSet<>();
+ static final Set<String> QUERY_EXECUTION =
+ Set.of("executeQuery", "executeUpdate", "execute", "executeBatch");
+ private static volatile boolean initialized = false;
+ private static long logSlowQueriesThreshold;
+
+ public JdbcProfilerUtils(Configuration configuration) {
+ initialize(Objects.requireNonNull(configuration));
+ }
+
+ private static void initialize(Configuration configuration) {
+ if (!initialized) {
+ synchronized (JdbcProfilerUtils.class) {
+ if (!initialized) {
+ initialized = true;
+ logSlowQueriesThreshold = MetastoreConf.getLongVar(configuration,
+ MetastoreConf.ConfVars.METASTORE_JDBC_SLOW_QUERIES);
+ if (logSlowQueriesThreshold > 0) {
+ LOG.info("The slow query log enabled, will log the query that
takes more than {} ms",
+ logSlowQueriesThreshold);
+ }
+ String thriftApis = MetastoreConf.getVar(configuration,
+ MetastoreConf.ConfVars.METASTORE_PROFILE_JDBC_THRIFT_APIS);
+ if (StringUtils.isNotEmpty(thriftApis)) {
+ PROFILED_APIS.addAll(Arrays.asList(thriftApis.split(",")));
+ }
+ }
+ }
+ }
+ }
+
+ public static void logSlowExecution(long timeSpent, Configuration
configuration,
+ String sql, Method method, Object[] args) {
+ if (isSlowExecution(configuration, timeSpent)) {
+ Object[] printableArgs = args;
+ if (args != null && args.length > 10) {
+ printableArgs = new Object[10];
+ System.arraycopy(args, 0, printableArgs, 0, 7);
+ System.arraycopy(args, args.length - 2, printableArgs, 8, 2);
+ args[7] = "....";
Review Comment:
logSlowExecution truncation logic writes the placeholder string into args[7]
instead of printableArgs[7], mutating the original invocation arguments and
leaving printableArgs with a null hole. Set the placeholder on printableArgs
(and avoid mutating the original args array).
##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java:
##########
@@ -88,8 +92,18 @@ public Result invokeInternal(final Object proxy, final
Method method, final Obje
Object object = null;
boolean isStarted = Deadline.startTimer(method.getName());
try {
+ if (!local) {
+ Pair<String, Long> currentCall = Pair.of(method.getName(),
System.currentTimeMillis());
+ Optional<Pair<String, Long>> previous =
HMSHandlerContext.getCallId();
+ previous.ifPresent(
+ pc -> LOG.debug("Previous call {} will be taken over by {}",
pc, currentCall));
+ HMSHandlerContext.setCallId(currentCall);
+ }
object = method.invoke(baseHandler, args);
} finally {
+ if (!local) {
+ HMSHandlerContext.setCallId(null);
Review Comment:
In RetryingHMSHandler.invokeInternal, callId is always cleared to null in
finally, even if there was a previous value. This can break nested/stacked
contexts and loses the previous callId unexpectedly. Save the previous callId
before setting and restore it in finally.
##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/datasource/MetastoreConnection.java:
##########
@@ -0,0 +1,328 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.datasource;
+
+import java.sql.Array;
+import java.sql.Blob;
+import java.sql.CallableStatement;
+import java.sql.Clob;
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.NClob;
+import java.sql.PreparedStatement;
+import java.sql.SQLClientInfoException;
+import java.sql.SQLException;
+import java.sql.SQLWarning;
+import java.sql.SQLXML;
+import java.sql.Savepoint;
+import java.sql.Statement;
+import java.sql.Struct;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.Executor;
+
+import org.apache.hadoop.conf.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public record MetastoreConnection(Connection delegate, Configuration
configuration) implements Connection {
+ private static final Logger LOG =
LoggerFactory.getLogger(MetastoreConnection.class);
+ @Override
+ public Statement createStatement() throws SQLException {
+ return MetastoreStatement.getProxyStatement(configuration,
delegate.createStatement(), null);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql) throws SQLException {
+ return MetastoreStatement.getProxyStatement(configuration,
delegate.prepareStatement(sql), sql);
+ }
+
+ @Override
+ public CallableStatement prepareCall(String sql) throws SQLException {
+ return MetastoreStatement.getProxyStatement(configuration,
delegate.prepareCall(sql), sql);
+ }
+
+ @Override
+ public String nativeSQL(String sql) throws SQLException {
+ return delegate.nativeSQL(sql);
+ }
+
+ @Override
+ public void setAutoCommit(boolean autoCommit) throws SQLException {
+ delegate.setAutoCommit(autoCommit);
+ }
+
+ @Override
+ public boolean getAutoCommit() throws SQLException {
+ return delegate.getAutoCommit();
+ }
+
+ @Override
+ public void commit() throws SQLException {
+ delegate.commit();
+ }
+
+ @Override
+ public void rollback() throws SQLException {
+ delegate.rollback();
+ }
+
+ @Override
+ public void close() throws SQLException {
+ // Sometimes we want to see who closes the connection at when
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Connection is being closed", new RuntimeException());
+ }
+ delegate.close();
+ }
+
+ @Override
+ public boolean isClosed() throws SQLException {
+ return delegate.isClosed();
+ }
+
+ @Override
+ public DatabaseMetaData getMetaData() throws SQLException {
+ return delegate.getMetaData();
+ }
+
+ @Override
+ public void setReadOnly(boolean readOnly) throws SQLException {
+ delegate.setReadOnly(readOnly);
+ }
+
+ @Override
+ public boolean isReadOnly() throws SQLException {
+ return delegate.isReadOnly();
+ }
+
+ @Override
+ public void setCatalog(String catalog) throws SQLException {
+ delegate.setCatalog(catalog);
+ }
+
+ @Override
+ public String getCatalog() throws SQLException {
+ return delegate.getCatalog();
+ }
+
+ @Override
+ public void setTransactionIsolation(int level) throws SQLException {
+ delegate.setTransactionIsolation(level);
+ }
+
+ @Override
+ public int getTransactionIsolation() throws SQLException {
+ return delegate.getTransactionIsolation();
+ }
+
+ @Override
+ public SQLWarning getWarnings() throws SQLException {
+ return delegate.getWarnings();
+ }
+
+ @Override
+ public void clearWarnings() throws SQLException {
+ delegate.clearWarnings();
+ }
+
+ @Override
+ public Statement createStatement(int resultSetType, int
resultSetConcurrency) throws SQLException {
+ return delegate.createStatement(resultSetType, resultSetConcurrency);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, int resultSetType, int
resultSetConcurrency)
+ throws SQLException {
+ return MetastoreStatement.getProxyStatement(configuration,
+ delegate.prepareStatement(sql, resultSetType, resultSetConcurrency),
sql);
+ }
+
+ @Override
+ public CallableStatement prepareCall(String sql, int resultSetType, int
resultSetConcurrency) throws SQLException {
+ return MetastoreStatement.getProxyStatement(configuration,
+ delegate.prepareCall(sql, resultSetType, resultSetConcurrency), sql);
+ }
+
+ @Override
+ public Map<String, Class<?>> getTypeMap() throws SQLException {
+ return delegate.getTypeMap();
+ }
+
+ @Override
+ public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
+ delegate.setTypeMap(map);
+ }
+
+ @Override
+ public void setHoldability(int holdability) throws SQLException {
+ delegate.setHoldability(holdability);
+ }
+
+ @Override
+ public int getHoldability() throws SQLException {
+ return delegate.getHoldability();
+ }
+
+ @Override
+ public Savepoint setSavepoint() throws SQLException {
+ return delegate.setSavepoint();
+ }
+
+ @Override
+ public Savepoint setSavepoint(String name) throws SQLException {
+ return delegate.setSavepoint(name);
+ }
+
+ @Override
+ public void rollback(Savepoint savepoint) throws SQLException {
+ delegate.rollback(savepoint);
+ }
+
+ @Override
+ public void releaseSavepoint(Savepoint savepoint) throws SQLException {
+ delegate.releaseSavepoint(savepoint);
+ }
+
+ @Override
+ public Statement createStatement(int resultSetType, int
resultSetConcurrency, int resultSetHoldability)
+ throws SQLException {
+ return MetastoreStatement.getProxyStatement(configuration,
+ delegate.createStatement(resultSetType, resultSetConcurrency,
resultSetHoldability), null);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, int resultSetType, int
resultSetConcurrency,
+ int resultSetHoldability) throws SQLException {
+ return MetastoreStatement.getProxyStatement(configuration,
+ delegate.prepareStatement(sql, resultSetType, resultSetConcurrency,
resultSetHoldability), sql);
+ }
+
+ @Override
+ public CallableStatement prepareCall(String sql, int resultSetType, int
resultSetConcurrency,
+ int resultSetHoldability) throws SQLException {
+ return MetastoreStatement.getProxyStatement(configuration,
+ delegate.prepareCall(sql, resultSetType, resultSetConcurrency,
resultSetHoldability), sql);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys)
throws SQLException {
+ return MetastoreStatement.getProxyStatement(configuration,
delegate.prepareStatement(sql, autoGeneratedKeys), sql);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, int[] columnIndexes)
throws SQLException {
+ return MetastoreStatement.getProxyStatement(configuration,
delegate.prepareStatement(sql, columnIndexes), sql);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, String[] columnNames)
throws SQLException {
+ return MetastoreStatement.getProxyStatement(configuration,
delegate.prepareStatement(sql, columnNames), sql);
+ }
+
+ @Override
+ public Clob createClob() throws SQLException {
+ return delegate.createClob();
+ }
+
+ @Override
+ public Blob createBlob() throws SQLException {
+ return delegate.createBlob();
+ }
+
+ @Override
+ public NClob createNClob() throws SQLException {
+ return delegate.createNClob();
+ }
+
+ @Override
+ public SQLXML createSQLXML() throws SQLException {
+ return delegate.createSQLXML();
+ }
+
+ @Override
+ public boolean isValid(int timeout) throws SQLException {
+ return delegate.isValid(timeout);
+ }
+
+ @Override
+ public void setClientInfo(String name, String value) throws
SQLClientInfoException {
+ delegate.setClientInfo(name, value);
+ }
+
+ @Override
+ public void setClientInfo(Properties properties) throws
SQLClientInfoException {
+ delegate.setClientInfo(properties);
+ }
+
+ @Override
+ public String getClientInfo(String name) throws SQLException {
+ return delegate.getClientInfo(name);
+ }
+
+ @Override
+ public Properties getClientInfo() throws SQLException {
+ return delegate.getClientInfo();
+ }
+
+ @Override
+ public Array createArrayOf(String typeName, Object[] elements) throws
SQLException {
+ return delegate.createArrayOf(typeName, elements);
+ }
+
+ @Override
+ public Struct createStruct(String typeName, Object[] attributes) throws
SQLException {
+ return delegate.createStruct(typeName, attributes);
+ }
+
+ @Override
+ public void setSchema(String schema) throws SQLException {
+ delegate.setSchema(schema);
+ }
+
+ @Override
+ public String getSchema() throws SQLException {
+ return delegate.getSchema();
+ }
+
+ @Override
+ public void abort(Executor executor) throws SQLException {
+ delegate.abort(executor);
+ }
+
+ @Override
+ public void setNetworkTimeout(Executor executor, int milliseconds) throws
SQLException {
+ delegate.setNetworkTimeout(executor, milliseconds);
+ }
+
+ @Override
+ public int getNetworkTimeout() throws SQLException {
+ return delegate.getNetworkTimeout();
+ }
+
+ @Override
+ public <T> T unwrap(Class<T> iface) throws SQLException {
+ return delegate.unwrap(iface);
+ }
+
+ @Override
+ public boolean isWrapperFor(Class<?> iface) throws SQLException {
+ return delegate.isWrapperFor(iface);
Review Comment:
MetastoreConnection.unwrap/isWrapperFor delegates directly to the underlying
connection, so unwrapping to MetastoreConnection (or checking wrapper status)
will fail even though the current object is that wrapper. Implement
unwrap/isWrapperFor to return/recognize the current wrapper when iface matches,
and only delegate otherwise.
##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/datasource/DbCPDataSourceProvider.java:
##########
@@ -127,10 +119,8 @@ public DataSource create(Configuration hdpConfig, int
maxPoolSize) throws SQLExc
objectPool.setSoftMinEvictableIdleDuration(Duration.ofMillis(600 *
1000));
}
}
- String stmt = dbProduct.getPrepareTxnStmt();
- if (stmt != null) {
-
poolableConnFactory.setConnectionInitSql(Collections.singletonList(stmt));
- }
+ DataSourceProvider.preparePool(hdpConfig, stmt ->
poolableConnFactory.setConnectionInitSql(Collections.singletonList(stmt)),
+ kv -> dbcpDs.setConnectionProperties(kv.getKey() + "=" +
kv.getValue()));
Review Comment:
DbCPDataSourceProvider passes each datasource property to
BasicDataSource.setConnectionProperties, which replaces the entire property
string each time. For databases that return multiple properties (e.g., MySQL),
only the last entry will be kept. Accumulate into a single connectionProperties
string (with proper separators) or use an API that adds individual properties
if available.
##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/datasource/MetastoreDriver.java:
##########
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.datasource;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import java.sql.Connection;
+import java.sql.Driver;
+import java.sql.DriverManager;
+import java.sql.DriverPropertyInfo;
+import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
+import java.util.ArrayList;
+import java.util.Enumeration;
+import java.util.List;
+import java.util.Properties;
+import java.util.logging.Logger;
+import java.util.regex.Pattern;
+
+import org.apache.commons.lang3.math.NumberUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.utils.MetastoreVersionInfo;
+import org.slf4j.LoggerFactory;
+
+import static java.sql.DriverManager.registerDriver;
+
+public class MetastoreDriver implements Driver {
+ private static final org.slf4j.Logger LOG =
LoggerFactory.getLogger(MetastoreDriver.class);
+ private static final String URL_PREFIX = "jdbc:metastore://";
+ private static int majorVersion = -1;
+ private static int minorVersion = -1;
+ private static final Configuration configuration;
+ private static final Driver delegateDriver;
+ private static final String jdbcUrl;
+ static {
+ try {
+ registerDriver(new MetastoreDriver());
+ String versionString = MetastoreVersionInfo.getVersion();
+ String[] versionNums = versionString.split("\\.");
+ if (NumberUtils.isNumber(versionNums[0])) {
+ majorVersion = Integer.parseInt(versionNums[0]);
+ }
+ if (versionNums.length >1 && NumberUtils.isNumber(versionNums[1])) {
+ minorVersion = Integer.parseInt(versionNums[1]);
+ }
+ configuration = MetastoreConf.newMetastoreConf();
+ jdbcUrl = MetastoreConf.getVar(configuration,
MetastoreConf.ConfVars.CONNECT_URL_KEY);
+ delegateDriver = findRegisteredDriver(configuration);
+ } catch (Exception e) {
+ throw new RuntimeException("Failed to register Metastore driver", e);
Review Comment:
MetastoreDriver caches CONNECT_URL_KEY and the delegate Driver in static
finals at class-load time and connect() ignores the incoming url, always
connecting to the cached jdbcUrl. This breaks cases where the metastore updates
the connect URL at runtime (e.g., RetryingHMSHandler reloadConf /
MetaStoreInit.updateConnectionURL) or where different Configuration instances
should point to different metastore DBs. Consider deriving the delegate
URL/driver per connect() call (e.g., encode the real JDBC URL in the metastore
JDBC URL or pass it via Properties) instead of using static cached values.
##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/datasource/MetastoreStatement.java:
##########
@@ -0,0 +1,257 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.datasource;
+
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Timer;
+
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.sql.Statement;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.atomic.LongAdder;
+
+import org.apache.commons.lang3.ClassUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HMSHandlerContext;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static
org.apache.hadoop.hive.metastore.datasource.MetastoreStatement.JdbcProfilerUtils.logSlowExecution;
+import static
org.apache.hadoop.hive.metastore.datasource.MetastoreStatement.JdbcProfilerUtils.isSlowExecution;
+
+@SuppressWarnings("unchecked")
+public final class MetastoreStatement implements InvocationHandler {
+ private static final Logger LOG =
LoggerFactory.getLogger(MetastoreStatement.class);
+ static final String EXEC_HOOK = "metastore.jdbc.execution.hook";
+ static final ThreadLocal<Pair<Pair<String, Long>, LongAdder>> CURRENT_CALL =
new ThreadLocal<>();
+
+ private final String rawSql;
+ private final Statement delegate;
+ private final Configuration configuration;
+ private final MetastoreStatementHook hook;
+
+ private MetastoreStatement(Configuration conf, Statement statement, String
rawSql) {
+ this.configuration = Objects.requireNonNull(conf);
+ this.rawSql = rawSql;
+ this.delegate = Objects.requireNonNull(statement);
+ String className = conf.get(EXEC_HOOK, "");
+ if (StringUtils.isEmpty(className)) {
+ hook = new JdbcProfilerUtils(conf);
+ } else {
+ try {
+ hook = JavaUtils.newInstance(JavaUtils.getClass(className.trim(),
MetastoreStatementHook.class),
+ new Class[] { Configuration.class}, new Object[] {conf});
+ } catch (MetaException e) {
+ throw new RuntimeException(e.getMessage());
Review Comment:
MetastoreStatement constructor wraps MetaException as new
RuntimeException(e.getMessage()) which drops the original cause/stack trace.
Re-throw with the original exception as the cause to preserve debugging context.
##########
standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/datasource/TestMetastoreConnection.java:
##########
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.datasource;
+
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Timer;
+import com.zaxxer.hikari.HikariDataSource;
+
+import javax.sql.DataSource;
+import java.lang.reflect.Method;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.Statement;
+
+import org.apache.commons.dbcp2.PoolingDataSource;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.derby.impl.jdbc.EmbedConnection;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HMSHandlerContext;
+import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MetastoreUnitTest.class)
+public class TestMetastoreConnection {
+ private Configuration conf;
+ private Counter slowQuery;
+
+ @Before
+ public void init() {
+ conf = MetastoreDriver.getConfiguration();
+ conf.set(MetastoreStatement.EXEC_HOOK,
MetastoreStatementTestHook.class.getName());
+ MetastoreConf.setLongVar(conf,
MetastoreConf.ConfVars.METASTORE_JDBC_SLOW_QUERIES, 1000);
+ MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.METRICS_ENABLED,
true);
+ MetastoreConf.setBoolVar(conf,
MetastoreConf.ConfVars.METASTORE_PROFILE_JDBC_EXECUTION, true);
+ MetastoreConf.setVar(conf,
MetastoreConf.ConfVars.METASTORE_PROFILE_JDBC_THRIFT_APIS,
"test_metastore_statement");
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_USER_NAME,
"dummyUser");
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.PWD, "dummyPass");
+ conf.unset(MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE.getVarname());
+
+ Metrics.initialize(conf);
+ slowQuery = Metrics.getOrCreateCounter(MetricsConstants.JDBC_SLOW_QUERIES);
+ }
+
+ @Test
+ public void testDefaultHikariCp() throws Exception {
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE,
HikariCPDataSourceProvider.HIKARI);
+
+ DataSourceProvider dsp =
DataSourceProviderFactory.tryGetDataSourceProviderOrNull(conf);
+ Assert.assertNotNull(dsp);
+ DataSource ds = dsp.create(conf);
+ Assert.assertTrue(ds instanceof HikariDataSource);
+ verify(ds.getConnection());
+ }
+
+ @Test
+ public void testDbCpDataSource() throws Exception {
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE,
DbCPDataSourceProvider.DBCP);
+
+ DataSourceProvider dsp =
DataSourceProviderFactory.tryGetDataSourceProviderOrNull(conf);
+ Assert.assertNotNull(dsp);
+ DataSource ds = dsp.create(conf);
+ Assert.assertTrue(ds instanceof PoolingDataSource);
+ verify(ds.getConnection());
+ }
+
+ private void verify(Connection connection) throws Exception {
+ Assert.assertTrue(connection.unwrap(MetastoreConnection.class).delegate()
instanceof EmbedConnection);
+ long slowNum = slowQuery.getCount();
+ Timer timer =
Metrics.getOrCreateTimer(MetastoreStatementTestHook.TEST_METRIC_NAME);
+ Assert.assertNotNull(timer);
+ long timeCount = timer.getCount();
+ try (AutoCloseable sleep =
MetastoreStatementTestHook.testConnection("test_metastore_statement", 1500)) {
+ try (Statement statement = connection.createStatement();
+ ResultSet rs = statement.executeQuery("VALUES 1")) {
+ Assert.assertTrue(rs.next());
+ }
+ }
+ Assert.assertEquals(slowNum + 1, slowQuery.getCount());
+ Assert.assertEquals(timeCount + 1, timer.getCount());
+ Assert.assertTrue(timer.getSnapshot().getMean() > 1000);
+
+ // Test a method outside of monitor
+ try (AutoCloseable sleep =
MetastoreStatementTestHook.testConnection("test_statement_outside", 1500)) {
+ try (Statement statement = connection.createStatement();
+ ResultSet rs = statement.executeQuery("VALUES 1")) {
Review Comment:
These unit tests add at least ~3 seconds of wall-clock sleep per invocation
of verify() (two 1500ms sleeps), and verify() runs in two tests, making the
suite noticeably slower and potentially flaky under load. Consider lowering the
slow-query threshold and using shorter sleeps, or using a controllable
clock/test hook that avoids long Thread.sleep in unit tests.
##########
standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/datasource/TestMetastoreConnection.java:
##########
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.datasource;
+
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Timer;
+import com.zaxxer.hikari.HikariDataSource;
+
+import javax.sql.DataSource;
+import java.lang.reflect.Method;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.Statement;
+
+import org.apache.commons.dbcp2.PoolingDataSource;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.derby.impl.jdbc.EmbedConnection;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HMSHandlerContext;
+import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MetastoreUnitTest.class)
+public class TestMetastoreConnection {
+ private Configuration conf;
+ private Counter slowQuery;
+
+ @Before
+ public void init() {
+ conf = MetastoreDriver.getConfiguration();
+ conf.set(MetastoreStatement.EXEC_HOOK,
MetastoreStatementTestHook.class.getName());
+ MetastoreConf.setLongVar(conf,
MetastoreConf.ConfVars.METASTORE_JDBC_SLOW_QUERIES, 1000);
+ MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.METRICS_ENABLED,
true);
+ MetastoreConf.setBoolVar(conf,
MetastoreConf.ConfVars.METASTORE_PROFILE_JDBC_EXECUTION, true);
+ MetastoreConf.setVar(conf,
MetastoreConf.ConfVars.METASTORE_PROFILE_JDBC_THRIFT_APIS,
"test_metastore_statement");
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_USER_NAME,
"dummyUser");
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.PWD, "dummyPass");
+ conf.unset(MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE.getVarname());
+
+ Metrics.initialize(conf);
+ slowQuery = Metrics.getOrCreateCounter(MetricsConstants.JDBC_SLOW_QUERIES);
+ }
+
+ @Test
+ public void testDefaultHikariCp() throws Exception {
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE,
HikariCPDataSourceProvider.HIKARI);
+
+ DataSourceProvider dsp =
DataSourceProviderFactory.tryGetDataSourceProviderOrNull(conf);
+ Assert.assertNotNull(dsp);
+ DataSource ds = dsp.create(conf);
+ Assert.assertTrue(ds instanceof HikariDataSource);
+ verify(ds.getConnection());
+ }
+
+ @Test
+ public void testDbCpDataSource() throws Exception {
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE,
DbCPDataSourceProvider.DBCP);
+
+ DataSourceProvider dsp =
DataSourceProviderFactory.tryGetDataSourceProviderOrNull(conf);
+ Assert.assertNotNull(dsp);
+ DataSource ds = dsp.create(conf);
+ Assert.assertTrue(ds instanceof PoolingDataSource);
+ verify(ds.getConnection());
+ }
+
+ private void verify(Connection connection) throws Exception {
+ Assert.assertTrue(connection.unwrap(MetastoreConnection.class).delegate()
instanceof EmbedConnection);
+ long slowNum = slowQuery.getCount();
+ Timer timer =
Metrics.getOrCreateTimer(MetastoreStatementTestHook.TEST_METRIC_NAME);
+ Assert.assertNotNull(timer);
+ long timeCount = timer.getCount();
+ try (AutoCloseable sleep =
MetastoreStatementTestHook.testConnection("test_metastore_statement", 1500)) {
+ try (Statement statement = connection.createStatement();
+ ResultSet rs = statement.executeQuery("VALUES 1")) {
+ Assert.assertTrue(rs.next());
+ }
+ }
+ Assert.assertEquals(slowNum + 1, slowQuery.getCount());
+ Assert.assertEquals(timeCount + 1, timer.getCount());
+ Assert.assertTrue(timer.getSnapshot().getMean() > 1000);
+
+ // Test a method outside of monitor
+ try (AutoCloseable sleep =
MetastoreStatementTestHook.testConnection("test_statement_outside", 1500)) {
+ try (Statement statement = connection.createStatement();
+ ResultSet rs = statement.executeQuery("VALUES 1")) {
+ Assert.assertTrue(rs.next());
+ }
+ }
+ // record the slow query though
+ Assert.assertEquals(slowNum + 2, slowQuery.getCount());
+ // don't count this un-interested method
+ Assert.assertEquals(timeCount + 1, timer.getCount());
+ }
+
+ public static class MetastoreStatementTestHook extends
MetastoreStatement.JdbcProfilerUtils {
+ static final String TEST_METRIC_NAME = "MetastoreStatementTestHook_" +
System.currentTimeMillis();
+ static final String ENABLE_SLEEP_FOR_QUERY =
"MetastoreStatementTestHook.should.sleep";
+ static final String SLEEP_MILLIS = "MetastoreStatementTestHook.sleep.ms";
+ private final boolean shouldSleep;
+ private final long sleepMs;
+ public MetastoreStatementTestHook(Configuration configuration) {
+ super(configuration);
+ shouldSleep = configuration.getBoolean(ENABLE_SLEEP_FOR_QUERY, false);
+ sleepMs = configuration.getLong(SLEEP_MILLIS, 1000);
+ }
+
+ @Override
+ public void preRun(Method method, Object[] args) {
+ if (shouldSleep &&
+
MetastoreStatement.JdbcProfilerUtils.QUERY_EXECUTION.contains(method.getName()))
{
+ try {
+ Thread.sleep(sleepMs);
+ } catch (InterruptedException e) {
Review Comment:
MetastoreStatementTestHook.preRun swallows InterruptedException without
restoring the interrupt status. Even in tests, it’s better to call
Thread.currentThread().interrupt() (and/or propagate) to avoid hiding
interruptions.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]