This is an automated email from the ASF dual-hosted git repository.

sunchao pushed a commit to branch branch-2.3
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/branch-2.3 by this push:
     new 56d76cd  Revert "HIVE-23998: Upgrade guava to 27 for Hive 2.3 branch 
(#1365)"
56d76cd is described below

commit 56d76cd896df7db2e5e2bbdb8a711dafe4c2afcd
Author: Chao Sun <sunc...@apache.org>
AuthorDate: Tue Aug 11 21:28:08 2020 -0700

    Revert "HIVE-23998: Upgrade guava to 27 for Hive 2.3 branch (#1365)"
    
    This reverts commit cb69a2d5f99b5eff0047717f631da22372f8c6d0.
---
 .../org/apache/hadoop/hive/common/JvmMetricsInfo.java   |  4 ++--
 .../org/apache/hadoop/hive/common/JvmPauseMonitor.java  |  2 +-
 .../apache/hadoop/hive/druid/DruidStorageHandler.java   | 17 -----------------
 .../hadoop/hive/druid/serde/DruidQueryRecordReader.java |  5 ++---
 .../hive/druid/serde/DruidSelectQueryRecordReader.java  |  4 ++--
 .../hive/druid/serde/DruidTopNQueryRecordReader.java    |  4 ++--
 .../org/apache/hadoop/hive/io/TestHadoopFileStatus.java |  8 --------
 .../java/org/apache/hive/jdbc/TestServiceDiscovery.java |  6 ------
 .../apache/hadoop/hive/cli/control/CoreCliDriver.java   |  2 +-
 .../sqlstd/SQLStdHiveAuthorizationValidatorForTest.java |  9 ---------
 .../hadoop/hive/util/ElapsedTimeLoggingWrapper.java     |  2 +-
 .../hadoop/hive/llap/tez/LlapProtocolClientProxy.java   |  4 ++--
 .../apache/hadoop/hive/llap/daemon/impl/AMReporter.java |  6 +++---
 .../hadoop/hive/llap/daemon/impl/LlapTaskReporter.java  |  2 +-
 .../hive/llap/daemon/impl/TaskExecutorService.java      |  2 +-
 .../hive/llap/daemon/impl/TaskRunnerCallable.java       | 10 +++++-----
 .../hadoop/hive/llap/metrics/LlapDaemonCacheInfo.java   |  5 +++--
 .../hive/llap/metrics/LlapDaemonExecutorInfo.java       |  5 +++--
 .../hadoop/hive/llap/metrics/LlapDaemonIOInfo.java      |  5 +++--
 .../hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java     |  5 +++--
 .../hive/llap/tezplugins/LlapTaskSchedulerService.java  |  6 +++---
 .../llap/tezplugins/metrics/LlapTaskSchedulerInfo.java  |  5 +++--
 .../hive/metastore/hbase/TephraHBaseConnection.java     |  3 +--
 pom.xml                                                 |  2 +-
 .../org/apache/hadoop/hive/ql/exec/FetchOperator.java   |  5 ++---
 .../org/apache/hadoop/hive/ql/hooks/LineageLogger.java  |  2 +-
 .../hadoop/hive/ql/metadata/HiveMetaStoreChecker.java   |  2 +-
 .../calcite/rules/views/SubstitutionVisitor.java        |  4 ----
 .../ql/optimizer/calcite/stats/HiveRelMdPredicates.java |  3 +--
 .../physical/AnnotateRunTimeStatsOptimizer.java         | 14 ++++++++++----
 .../apache/hadoop/hive/ql/parse/ReplicationSpec.java    | 10 +---------
 .../apache/hive/service/cli/session/SessionManager.java |  5 -----
 .../main/java/org/apache/hadoop/hive/io/HdfsUtils.java  |  7 -------
 .../org/apache/hive/spark/client/MetricsCollection.java | 12 ------------
 34 files changed, 59 insertions(+), 128 deletions(-)

diff --git a/common/src/java/org/apache/hadoop/hive/common/JvmMetricsInfo.java 
b/common/src/java/org/apache/hadoop/hive/common/JvmMetricsInfo.java
index 5d79355..3ab73c5 100644
--- a/common/src/java/org/apache/hadoop/hive/common/JvmMetricsInfo.java
+++ b/common/src/java/org/apache/hadoop/hive/common/JvmMetricsInfo.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hive.common;
 
-import com.google.common.base.MoreObjects;
+import com.google.common.base.Objects;
 
 import org.apache.hadoop.metrics2.MetricsInfo;
 
@@ -58,7 +58,7 @@ public enum JvmMetricsInfo implements MetricsInfo {
   @Override public String description() { return desc; }
 
   @Override public String toString() {
-    return MoreObjects.toStringHelper(this)
+    return Objects.toStringHelper(this)
         .add("name", name()).add("description", desc)
         .toString();
   }
diff --git a/common/src/java/org/apache/hadoop/hive/common/JvmPauseMonitor.java 
b/common/src/java/org/apache/hadoop/hive/common/JvmPauseMonitor.java
index fb7d7de..cf080e3 100644
--- a/common/src/java/org/apache/hadoop/hive/common/JvmPauseMonitor.java
+++ b/common/src/java/org/apache/hadoop/hive/common/JvmPauseMonitor.java
@@ -173,7 +173,7 @@ public class JvmPauseMonitor {
   private class Monitor implements Runnable {
     @Override
     public void run() {
-      Stopwatch sw = Stopwatch.createUnstarted();
+      Stopwatch sw = new Stopwatch();
       Map<String, GcTimes> gcTimesBeforeSleep = getGcTimes();
       while (shouldRun) {
         sw.reset().start();
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java
index 24b3612..d4f6865 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java
@@ -295,11 +295,6 @@ public class DruidStorageHandler extends 
DefaultHiveMetaHook implements HiveStor
           }
         }, new Predicate<Throwable>() {
           @Override
-          public boolean test(@Nullable Throwable input) {
-            return input instanceof IOException;
-          }
-
-          @Override
           public boolean apply(@Nullable Throwable input) {
             return input instanceof IOException;
           }
@@ -347,18 +342,6 @@ public class DruidStorageHandler extends 
DefaultHiveMetaHook implements HiveStor
       while (numRetries++ < maxTries && !setOfUrls.isEmpty()) {
         setOfUrls = ImmutableSet.copyOf(Sets.filter(setOfUrls, new 
Predicate<URL>() {
           @Override
-          public boolean test(URL input) {
-            try {
-              String result = DruidStorageHandlerUtils.getURL(httpClient, 
input);
-              LOG.debug(String.format("Checking segment [%s] response is 
[%s]", input, result));
-              return Strings.isNullOrEmpty(result);
-            } catch (IOException e) {
-              LOG.error(String.format("Error while checking URL [%s]", input), 
e);
-              return true;
-            }
-          }
-
-          @Override
           public boolean apply(URL input) {
             try {
               String result = DruidStorageHandlerUtils.getURL(httpClient, 
input);
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidQueryRecordReader.java
 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidQueryRecordReader.java
index dfcf889..8d099c7 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidQueryRecordReader.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidQueryRecordReader.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hive.druid.serde;
 
+import com.google.common.collect.Iterators;
 import com.metamx.common.lifecycle.Lifecycle;
 import com.metamx.http.client.HttpClient;
 import com.metamx.http.client.HttpClientConfig;
@@ -36,7 +37,6 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.io.InputStream;
-import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
 
@@ -63,8 +63,7 @@ public abstract class DruidQueryRecordReader<T extends 
BaseQuery<R>, R extends C
   /**
    * Query results.
    */
-
-  protected Iterator<R> results = Collections.emptyIterator();
+  protected Iterator<R> results = Iterators.emptyIterator();
 
   @Override
   public void initialize(InputSplit split, TaskAttemptContext context) throws 
IOException {
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSelectQueryRecordReader.java
 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSelectQueryRecordReader.java
index b0888b0..8a41e91 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSelectQueryRecordReader.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSelectQueryRecordReader.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hive.druid.serde;
 
 import java.io.IOException;
 import java.io.InputStream;
-import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
 
@@ -28,6 +27,7 @@ import org.apache.hadoop.hive.druid.DruidStorageHandlerUtils;
 import org.apache.hadoop.io.NullWritable;
 
 import com.fasterxml.jackson.core.type.TypeReference;
+import com.google.common.collect.Iterators;
 
 import io.druid.query.Result;
 import io.druid.query.select.EventHolder;
@@ -42,7 +42,7 @@ public class DruidSelectQueryRecordReader
 
   private Result<SelectResultValue> current;
 
-  private Iterator<EventHolder> values = Collections.emptyIterator();
+  private Iterator<EventHolder> values = Iterators.emptyIterator();
 
   @Override
   protected SelectQuery createQuery(String content) throws IOException {
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTopNQueryRecordReader.java
 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTopNQueryRecordReader.java
index 36aee8d..d431925 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTopNQueryRecordReader.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTopNQueryRecordReader.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hive.druid.serde;
 
 import java.io.IOException;
 import java.io.InputStream;
-import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
 
@@ -28,6 +27,7 @@ import org.apache.hadoop.hive.druid.DruidStorageHandlerUtils;
 import org.apache.hadoop.io.NullWritable;
 
 import com.fasterxml.jackson.core.type.TypeReference;
+import com.google.common.collect.Iterators;
 
 import io.druid.query.Result;
 import io.druid.query.topn.DimensionAndMetricValueExtractor;
@@ -42,7 +42,7 @@ public class DruidTopNQueryRecordReader
 
   private Result<TopNResultValue> current;
 
-  private Iterator<DimensionAndMetricValueExtractor> values = 
Collections.emptyIterator();
+  private Iterator<DimensionAndMetricValueExtractor> values = 
Iterators.emptyIterator();
 
   @Override
   protected TopNQuery createQuery(String content) throws IOException {
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/io/TestHadoopFileStatus.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/io/TestHadoopFileStatus.java
index d7996f8..b9fc09b 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/io/TestHadoopFileStatus.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/io/TestHadoopFileStatus.java
@@ -80,14 +80,6 @@ public class TestHadoopFileStatus {
     Assert.assertTrue(sourceStatus.getAclEntries().size() == 3);
     Iterables.removeIf(sourceStatus.getAclEntries(), new Predicate<AclEntry>() 
{
       @Override
-      public boolean test(AclEntry input) {
-        if (input.getName() == null) {
-          return true;
-        }
-        return false;
-      }
-
-      @Override
       public boolean apply(AclEntry input) {
         if (input.getName() == null) {
           return true;
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestServiceDiscovery.java 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestServiceDiscovery.java
index 6266f30..b153679 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestServiceDiscovery.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestServiceDiscovery.java
@@ -144,12 +144,6 @@ public class TestServiceDiscovery {
     }
 
     @Override
-    public boolean test(ConnParamInfo inputParam) {
-      return inputParam.host.equals(host) && inputParam.port == port &&
-              inputParam.path.startsWith(pathPrefix);
-    }
-
-    @Override
     public boolean apply(ConnParamInfo inputParam) {
       return inputParam.host.equals(host) && inputParam.port == port &&
         inputParam.path.startsWith(pathPrefix);
diff --git 
a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java
 
b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java
index 1d38104..a735346 100644
--- 
a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java
+++ 
b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java
@@ -153,7 +153,7 @@ public class CoreCliDriver extends CliAdapter {
 
   @Override
   public void runTest(String tname, String fname, String fpath) throws 
Exception {
-    Stopwatch sw = Stopwatch.createStarted();
+    Stopwatch sw = new Stopwatch().start();
     boolean skipped = false;
     boolean failed = false;
     try {
diff --git 
a/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java
 
b/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java
index 9011712..383fa8c 100644
--- 
a/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java
+++ 
b/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java
@@ -80,15 +80,6 @@ public class SQLStdHiveAuthorizationValidatorForTest extends 
SQLStdHiveAuthoriza
     } else {
       return Lists.newArrayList(Iterables.filter(privilegeObjects,new 
Predicate<HivePrivilegeObject>() {
         @Override
-        public boolean test(@Nullable HivePrivilegeObject hivePrivilegeObject) 
{
-          // Return true to retain an item, and false to filter it out.
-          if (hivePrivilegeObject == null){
-            return true;
-          }
-          return !bypassObjectTypes.contains(hivePrivilegeObject.getType());
-        }
-
-        @Override
         public boolean apply(@Nullable HivePrivilegeObject 
hivePrivilegeObject) {
           // Return true to retain an item, and false to filter it out.
           if (hivePrivilegeObject == null){
diff --git 
a/itests/util/src/main/java/org/apache/hadoop/hive/util/ElapsedTimeLoggingWrapper.java
 
b/itests/util/src/main/java/org/apache/hadoop/hive/util/ElapsedTimeLoggingWrapper.java
index 74f50ba..061a918 100644
--- 
a/itests/util/src/main/java/org/apache/hadoop/hive/util/ElapsedTimeLoggingWrapper.java
+++ 
b/itests/util/src/main/java/org/apache/hadoop/hive/util/ElapsedTimeLoggingWrapper.java
@@ -28,7 +28,7 @@ public abstract class ElapsedTimeLoggingWrapper<T> {
   public abstract T invokeInternal() throws Exception;
 
   public T invoke(String message, Logger LOG, boolean toStdErr) throws 
Exception {
-    Stopwatch sw = Stopwatch.createStarted();
+    Stopwatch sw = new Stopwatch().start();
     try {
       T retVal = invokeInternal();
       return retVal;
diff --git 
a/llap-client/src/java/org/apache/hadoop/hive/llap/tez/LlapProtocolClientProxy.java
 
b/llap-client/src/java/org/apache/hadoop/hive/llap/tez/LlapProtocolClientProxy.java
index e219692..ce75d72 100644
--- 
a/llap-client/src/java/org/apache/hadoop/hive/llap/tez/LlapProtocolClientProxy.java
+++ 
b/llap-client/src/java/org/apache/hadoop/hive/llap/tez/LlapProtocolClientProxy.java
@@ -131,7 +131,7 @@ public class LlapProtocolClientProxy extends 
AbstractService {
       public void onFailure(Throwable t) {
         LOG.warn("RequestManager shutdown with error", t);
       }
-    }, requestManagerExecutor);
+    });
   }
 
   @Override
@@ -263,7 +263,7 @@ public class LlapProtocolClientProxy extends 
AbstractService {
     void submitToExecutor(CallableRequest request, LlapNodeId nodeId) {
       ListenableFuture<SourceStateUpdatedResponseProto> future =
           executor.submit(request);
-      Futures.addCallback(future, new ResponseCallback(request.getCallback(), 
nodeId, this), executor);
+      Futures.addCallback(future, new ResponseCallback(request.getCallback(), 
nodeId, this));
     }
 
     @VisibleForTesting
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/AMReporter.java 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/AMReporter.java
index 09ed5b6..b4c62d5 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/AMReporter.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/AMReporter.java
@@ -176,7 +176,7 @@ public class AMReporter extends AbstractService {
           
Thread.getDefaultUncaughtExceptionHandler().uncaughtException(Thread.currentThread(),
 t);
         }
       }
-    }, queueLookupExecutor);
+    });
     // TODO: why is this needed? we could just save the host and port?
     nodeId = LlapNodeId.getInstance(localAddress.get().getHostName(), 
localAddress.get().getPort());
     LOG.info("AMReporter running with DaemonId: {}, NodeId: {}", daemonId, 
nodeId);
@@ -271,7 +271,7 @@ public class AMReporter extends AbstractService {
         LOG.warn("Failed to send taskKilled for {}. The attempt will likely 
time out.",
             taskAttemptId);
       }
-    }, executor);
+    });
   }
 
   public void queryComplete(QueryIdentifier queryIdentifier) {
@@ -337,7 +337,7 @@ public class AMReporter extends AbstractService {
                     amNodeInfo.amNodeId, currentQueryIdentifier, t);
                   queryFailedHandler.queryFailed(currentQueryIdentifier);
                 }
-              }, executor);
+              });
             }
           }
         } catch (InterruptedException e) {
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
index 9af5d28..3d59702 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
@@ -123,7 +123,7 @@ public class LlapTaskReporter implements 
TaskReporterInterface {
     currentCallable = new HeartbeatCallable(completionListener, task, 
umbilical, pollInterval, sendCounterInterval,
         maxEventsToGet, requestCounter, containerIdStr, initialEvent, 
fragmentRequestId);
     ListenableFuture<Boolean> future = 
heartbeatExecutor.submit(currentCallable);
-    Futures.addCallback(future, new HeartbeatCallback(errorReporter), 
heartbeatExecutor);
+    Futures.addCallback(future, new HeartbeatCallback(errorReporter));
   }
 
   /**
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
index 8bedba2..70447d9 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
@@ -168,7 +168,7 @@ public class TaskExecutorService extends AbstractService
     executionCompletionExecutorService = MoreExecutors.listeningDecorator(
         executionCompletionExecutorServiceRaw);
     ListenableFuture<?> future = waitQueueExecutorService.submit(new 
WaitQueueWorker());
-    Futures.addCallback(future, new WaitQueueWorkerCallback(), 
waitQueueExecutorService);
+    Futures.addCallback(future, new WaitQueueWorkerCallback());
   }
 
   private Comparator<TaskWrapper> createComparator(
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
index 21e39dd..c3a74af 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
@@ -113,8 +113,8 @@ public class TaskRunnerCallable extends 
CallableWithNdc<TaskRunner2Result> {
   private final String queryId;
   private final HadoopShim tezHadoopShim;
   private boolean shouldRunTask = true;
-  final Stopwatch runtimeWatch = Stopwatch.createUnstarted();
-  final Stopwatch killtimerWatch = Stopwatch.createUnstarted();
+  final Stopwatch runtimeWatch = new Stopwatch();
+  final Stopwatch killtimerWatch = new Stopwatch();
   private final AtomicBoolean isStarted = new AtomicBoolean(false);
   private final AtomicBoolean isCompleted = new AtomicBoolean(false);
   private final AtomicBoolean killInvoked = new AtomicBoolean(false);
@@ -275,7 +275,7 @@ public class TaskRunnerCallable extends 
CallableWithNdc<TaskRunner2Result> {
         } finally {
           FileSystem.closeAllForUGI(fsTaskUgi);
           LOG.info("ExecutionTime for Container: " + 
request.getContainerIdString() + "=" +
-                  runtimeWatch.stop().elapsed(TimeUnit.MILLISECONDS));
+                  runtimeWatch.stop().elapsedMillis());
           if (LOG.isDebugEnabled()) {
             LOG.debug(
                 "canFinish post completion: " + taskSpec.getTaskAttemptID() + 
": " + canFinish());
@@ -501,14 +501,14 @@ public class TaskRunnerCallable extends 
CallableWithNdc<TaskRunner2Result> {
           LOG.info("Killed task {}", requestId);
           if (killtimerWatch.isRunning()) {
             killtimerWatch.stop();
-            long elapsed = killtimerWatch.elapsed(TimeUnit.MILLISECONDS);
+            long elapsed = killtimerWatch.elapsedMillis();
             LOG.info("Time to die for task {}", elapsed);
             if (metrics != null) {
               metrics.addMetricsPreemptionTimeToKill(elapsed);
             }
           }
           if (metrics != null) {
-            
metrics.addMetricsPreemptionTimeLost(runtimeWatch.elapsed(TimeUnit.MILLISECONDS));
+            metrics.addMetricsPreemptionTimeLost(runtimeWatch.elapsedMillis());
             metrics.incrExecutorTotalKilled();
           }
           break;
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheInfo.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheInfo.java
index fbd7fd2..427a0b1 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheInfo.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheInfo.java
@@ -17,9 +17,10 @@
  */
 package org.apache.hadoop.hive.llap.metrics;
 
-import com.google.common.base.MoreObjects;
 import org.apache.hadoop.metrics2.MetricsInfo;
 
+import com.google.common.base.Objects;
+
 /**
  * Metrics information for llap cache.
  */
@@ -49,7 +50,7 @@ public enum LlapDaemonCacheInfo implements MetricsInfo {
 
   @Override
   public String toString() {
-    return MoreObjects.toStringHelper(this)
+    return Objects.toStringHelper(this)
         .add("name", name()).add("description", desc)
         .toString();
   }
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorInfo.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorInfo.java
index 9695a4c..69d1c6f 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorInfo.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonExecutorInfo.java
@@ -17,9 +17,10 @@
  */
 package org.apache.hadoop.hive.llap.metrics;
 
-import com.google.common.base.MoreObjects;
 import org.apache.hadoop.metrics2.MetricsInfo;
 
+import com.google.common.base.Objects;
+
 /**
  * Metrics information for llap daemon container.
  */
@@ -73,7 +74,7 @@ public enum LlapDaemonExecutorInfo implements MetricsInfo {
 
   @Override
   public String toString() {
-    return MoreObjects.toStringHelper(this)
+    return Objects.toStringHelper(this)
         .add("name", name()).add("description", desc)
         .toString();
   }
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonIOInfo.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonIOInfo.java
index ed6e2bf..f0fde62 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonIOInfo.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonIOInfo.java
@@ -17,9 +17,10 @@
  */
 package org.apache.hadoop.hive.llap.metrics;
 
-import com.google.common.base.MoreObjects;
 import org.apache.hadoop.metrics2.MetricsInfo;
 
+import com.google.common.base.Objects;
+
 /**
  * Llap daemon I/O elevator metrics
  */
@@ -41,7 +42,7 @@ public enum LlapDaemonIOInfo implements MetricsInfo {
 
   @Override
   public String toString() {
-    return MoreObjects.toStringHelper(this)
+    return Objects.toStringHelper(this)
         .add("name", name()).add("description", desc)
         .toString();
   }
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java
index 1625cb2..efbddaa 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java
@@ -17,9 +17,10 @@
  */
 package org.apache.hadoop.hive.llap.metrics;
 
-import com.google.common.base.MoreObjects;
 import org.apache.hadoop.metrics2.MetricsInfo;
 
+import com.google.common.base.Objects;
+
 /**
  * Llap daemon JVM info. These are some additional metrics that are not 
exposed via
  * {@link org.apache.hadoop.metrics.jvm.JvmMetrics}
@@ -52,7 +53,7 @@ public enum LlapDaemonJvmInfo implements MetricsInfo {
 
   @Override
   public String toString() {
-    return MoreObjects.toStringHelper(this)
+    return Objects.toStringHelper(this)
       .add("name", name()).add("description", desc)
       .toString();
   }
diff --git 
a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
 
b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
index 6a664d0..6bedccb 100644
--- 
a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
+++ 
b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
@@ -317,15 +317,15 @@ public class LlapTaskSchedulerService extends 
TaskScheduler {
       }, 10000L, TimeUnit.MILLISECONDS);
 
       nodeEnablerFuture = nodeEnabledExecutor.submit(nodeEnablerCallable);
-      Futures.addCallback(nodeEnablerFuture, new 
LoggingFutureCallback("NodeEnablerThread", LOG), nodeEnabledExecutor);
+      Futures.addCallback(nodeEnablerFuture, new 
LoggingFutureCallback("NodeEnablerThread", LOG));
 
       delayedTaskSchedulerFuture =
           delayedTaskSchedulerExecutor.submit(delayedTaskSchedulerCallable);
       Futures.addCallback(delayedTaskSchedulerFuture,
-          new LoggingFutureCallback("DelayedTaskSchedulerThread", LOG), 
delayedTaskSchedulerExecutor);
+          new LoggingFutureCallback("DelayedTaskSchedulerThread", LOG));
 
       schedulerFuture = schedulerExecutor.submit(schedulerCallable);
-      Futures.addCallback(schedulerFuture, new 
LoggingFutureCallback("SchedulerThread", LOG), schedulerExecutor);
+      Futures.addCallback(schedulerFuture, new 
LoggingFutureCallback("SchedulerThread", LOG));
 
       registry.start();
       registry.registerStateChangeListener(new NodeStateChangeListener());
diff --git 
a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapTaskSchedulerInfo.java
 
b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapTaskSchedulerInfo.java
index 7b1e3fe..c190be8 100644
--- 
a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapTaskSchedulerInfo.java
+++ 
b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/metrics/LlapTaskSchedulerInfo.java
@@ -17,9 +17,10 @@
  */
 package org.apache.hadoop.hive.llap.tezplugins.metrics;
 
-import com.google.common.base.MoreObjects;
 import org.apache.hadoop.metrics2.MetricsInfo;
 
+import com.google.common.base.Objects;
+
 /**
  * Metrics information for llap task scheduler.
  */
@@ -51,7 +52,7 @@ public enum LlapTaskSchedulerInfo implements MetricsInfo {
 
   @Override
   public String toString() {
-    return MoreObjects.toStringHelper(this)
+    return Objects.toStringHelper(this)
         .add("name", name()).add("description", desc)
         .toString();
   }
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/TephraHBaseConnection.java
 
b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/TephraHBaseConnection.java
index 76e4529..f66200f 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/TephraHBaseConnection.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/TephraHBaseConnection.java
@@ -61,8 +61,7 @@ public class TephraHBaseConnection extends 
VanillaHBaseConnection {
     if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_IN_TEST)) {
       LOG.debug("Using an in memory client transaction system for testing");
       TransactionManager txnMgr = new TransactionManager(conf);
-      txnMgr.startAsync();
-      txnMgr.awaitRunning();
+      txnMgr.startAndWait();
       txnClient = new InMemoryTxSystemClient(txnMgr);
     } else {
       // TODO should enable use of ZKDiscoveryService if users want it
diff --git a/pom.xml b/pom.xml
index d856845..1d66722 100644
--- a/pom.xml
+++ b/pom.xml
@@ -135,7 +135,7 @@
     <dropwizard.version>3.1.0</dropwizard.version>
     
<dropwizard-metrics-hadoop-metrics2-reporter.version>0.1.2</dropwizard-metrics-hadoop-metrics2-reporter.version>
     <druid.version>0.9.2</druid.version>
-    <guava.version>27.0-jre</guava.version>
+    <guava.version>14.0.1</guava.version>
     <groovy.version>2.4.4</groovy.version>
     <h2database.version>1.3.166</h2database.version>
     <hadoop.version>2.7.2</hadoop.version>
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
index abdc2ef..004bb2f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
@@ -22,7 +22,6 @@ import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
@@ -106,7 +105,7 @@ public class FetchOperator implements Serializable {
 
   private transient Iterator<Path> iterPath;
   private transient Iterator<PartitionDesc> iterPartDesc;
-  private transient Iterator<FetchInputFormatSplit> iterSplits = 
Collections.emptyIterator();
+  private transient Iterator<FetchInputFormatSplit> iterSplits = 
Iterators.emptyIterator();
 
   private transient Path currPath;
   private transient PartitionDesc currDesc;
@@ -541,7 +540,7 @@ public class FetchOperator implements Serializable {
       this.currPath = null;
       this.iterPath = null;
       this.iterPartDesc = null;
-      this.iterSplits = Collections.emptyIterator();
+      this.iterSplits = Iterators.emptyIterator();
     } catch (Exception e) {
       throw new HiveException("Failed with exception " + e.getMessage()
           + StringUtils.stringifyException(e));
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java 
b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
index 48415a5..c1f6883 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
@@ -461,7 +461,7 @@ public class LineageLogger implements 
ExecuteWithHookContext {
    */
   private String getQueryHash(String queryStr) {
     Hasher hasher = Hashing.md5().newHasher();
-    hasher.putUnencodedChars(queryStr);
+    hasher.putString(queryStr);
     return hasher.hash().toString();
   }
 }
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
index 5a99a1a..84c0902 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
@@ -433,7 +433,7 @@ public class HiveMetaStoreChecker {
     ExecutorService executor;
     if (poolSize <= 1) {
       LOG.debug("Using single-threaded version of MSCK-GetPaths");
-      executor = MoreExecutors.newDirectExecutorService();
+      executor = MoreExecutors.sameThreadExecutor();
     } else {
       LOG.debug("Using multi-threaded version of MSCK-GetPaths with number of 
threads " + poolSize);
       ThreadFactory threadFactory =
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/SubstitutionVisitor.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/SubstitutionVisitor.java
index 1dee3b5..93dcc0e 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/SubstitutionVisitor.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/SubstitutionVisitor.java
@@ -2413,10 +2413,6 @@ public class SubstitutionVisitor {
   public static class FilterOnProjectRule extends RelOptRule {
     private static final Predicate<Filter> PREDICATE =
         new Predicate<Filter>() {
-          public boolean test(Filter input) {
-            return input.getCondition() instanceof RexInputRef;
-          }
-
           public boolean apply(Filter input) {
             return input.getCondition() instanceof RexInputRef;
           }
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
index 3191d18..69e157e 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hive.ql.optimizer.calcite.stats;
 
 import java.util.ArrayList;
 import java.util.BitSet;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -533,7 +532,7 @@ public class HiveRelMdPredicates implements 
MetadataHandler<BuiltInMetadata.Pred
         public Iterator<Mapping> iterator() {
           ImmutableBitSet fields = exprFields.get(predicate.toString());
           if (fields.cardinality() == 0) {
-            return Collections.emptyIterator();
+            return Iterators.emptyIterator();
           }
           return new ExprsItr(fields);
         }
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AnnotateRunTimeStatsOptimizer.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AnnotateRunTimeStatsOptimizer.java
index 109e5ea..ee67443 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AnnotateRunTimeStatsOptimizer.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AnnotateRunTimeStatsOptimizer.java
@@ -18,7 +18,13 @@
 package org.apache.hadoop.hive.ql.optimizer.physical;
 
 import java.io.Serializable;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.Stack;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -67,16 +73,16 @@ public class AnnotateRunTimeStatsOptimizer implements 
PhysicalPlanResolver {
 
       if (currTask instanceof MapRedTask) {
         MapRedTask mr = (MapRedTask) currTask;
-        ops.addAll((List<Operator<? extends OperatorDesc>>) 
mr.getWork().getAllOperators());
+        ops.addAll(mr.getWork().getAllOperators());
       } else if (currTask instanceof TezTask) {
         TezWork work = ((TezTask) currTask).getWork();
         for (BaseWork w : work.getAllWork()) {
-          ops.addAll((Set<Operator<? extends OperatorDesc>>) 
w.getAllOperators());
+          ops.addAll(w.getAllOperators());
         }
       } else if (currTask instanceof SparkTask) {
         SparkWork sparkWork = (SparkWork) currTask.getWork();
         for (BaseWork w : sparkWork.getAllWork()) {
-          ops.addAll((Set<Operator<? extends OperatorDesc>>) 
w.getAllOperators());
+          ops.addAll(w.getAllOperators());
         }
       }
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
index 349d851..48362a3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
@@ -250,16 +250,8 @@ public class ReplicationSpec {
   public Predicate<Partition> allowEventReplacementInto() {
     return new Predicate<Partition>() {
       @Override
-      public boolean test(@Nullable Partition partition) {
-        if (partition == null) {
-            return false;
-        }
-        return (allowEventReplacementInto(partition));
-      }
-
-      @Override
       public boolean apply(@Nullable Partition partition) {
-        if (partition == null) {
+        if (partition == null){
           return false;
         }
         return (allowEventReplacementInto(partition));
diff --git 
a/service/src/java/org/apache/hive/service/cli/session/SessionManager.java 
b/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
index 89718d5..26c8812 100644
--- a/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
+++ b/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
@@ -134,11 +134,6 @@ public class SessionManager extends CompositeService {
       public Integer getValue() {
         Iterable<HiveSession> filtered = Iterables.filter(getSessions(), new 
Predicate<HiveSession>() {
           @Override
-          public boolean test(HiveSession hiveSession) {
-            return hiveSession.getNoOperationTime() == 0L;
-          }
-
-          @Override
           public boolean apply(HiveSession hiveSession) {
             return hiveSession.getNoOperationTime() == 0L;
           }
diff --git 
a/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java 
b/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java
index e1aaf5e..277738f 100644
--- a/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java
+++ b/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java
@@ -187,13 +187,6 @@ public class HdfsUtils {
   private static void removeBaseAclEntries(List<AclEntry> entries) {
     Iterables.removeIf(entries, new Predicate<AclEntry>() {
       @Override
-      public boolean test(AclEntry input) {
-        if (input.getName() == null) {
-          return true;
-        }
-        return false;
-      }
-      @Override
       public boolean apply(AclEntry input) {
         if (input.getName() == null) {
           return true;
diff --git 
a/spark-client/src/main/java/org/apache/hive/spark/client/MetricsCollection.java
 
b/spark-client/src/main/java/org/apache/hive/spark/client/MetricsCollection.java
index 3087a1c..0f03a64 100644
--- 
a/spark-client/src/main/java/org/apache/hive/spark/client/MetricsCollection.java
+++ 
b/spark-client/src/main/java/org/apache/hive/spark/client/MetricsCollection.java
@@ -101,10 +101,6 @@ public class MetricsCollection {
   public Metrics getTaskMetrics(final int jobId, final int stageId, final long 
taskId) {
     Predicate<TaskInfo> filter = new Predicate<TaskInfo>() {
       @Override
-      public boolean test(TaskInfo input) {
-        return jobId == input.jobId && stageId == input.stageId && taskId == 
input.taskId;
-      }
-      @Override
       public boolean apply(TaskInfo input) {
         return jobId == input.jobId && stageId == input.stageId && taskId == 
input.taskId;
       }
@@ -262,10 +258,6 @@ public class MetricsCollection {
       return jobId == input.jobId;
     }
 
-    @Override
-    public boolean test(TaskInfo input) {
-      return jobId == input.jobId;
-    }
   }
 
   private static class StageFilter implements Predicate<TaskInfo> {
@@ -283,10 +275,6 @@ public class MetricsCollection {
       return jobId == input.jobId && stageId == input.stageId;
     }
 
-    @Override
-    public boolean test(TaskInfo input) {
-      return jobId == input.jobId && stageId == input.stageId;
-    }
   }
 
 }

Reply via email to