This is an automated email from the ASF dual-hosted git repository.

jcamacho pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new f058a8c  HIVE-23979: Resolve spotbugs errors in JsonReporter.java, 
Metrics.java, and PerfLogger.java (Soumyakanti Das, reviewed by Jesus Camacho 
Rodriguez)
f058a8c is described below

commit f058a8c037de126c7c7400f0645d7987a5043753
Author: Soumyakanti Das <42816111+soumyakanti3...@users.noreply.github.com>
AuthorDate: Sat Aug 15 17:45:46 2020 -0400

    HIVE-23979: Resolve spotbugs errors in JsonReporter.java, Metrics.java, and 
PerfLogger.java (Soumyakanti Das, reviewed by Jesus Camacho Rodriguez)
    
    Closes apache/hive#1404
---
 .../org/apache/hadoop/hive/ql/log/PerfLogger.java  | 12 ++--
 .../java/org/apache/hadoop/hive/ql/Compiler.java   | 16 ++---
 ql/src/java/org/apache/hadoop/hive/ql/Driver.java  |  4 +-
 .../apache/hadoop/hive/ql/DriverTxnHandler.java    |  8 +--
 .../java/org/apache/hadoop/hive/ql/Executor.java   | 13 ++--
 .../java/org/apache/hadoop/hive/ql/HookRunner.java |  5 +-
 .../hadoop/hive/ql/exec/MapJoinOperator.java       |  4 +-
 .../org/apache/hadoop/hive/ql/exec/MoveTask.java   |  5 +-
 .../hive/ql/exec/SerializationUtilities.java       | 16 ++---
 .../hive/ql/exec/SparkHashTableSinkOperator.java   |  4 +-
 .../org/apache/hadoop/hive/ql/exec/Utilities.java  | 36 +++++-----
 .../ql/exec/spark/SparkDynamicPartitionPruner.java |  4 +-
 .../hive/ql/exec/spark/SparkMapRecordHandler.java  |  4 +-
 .../hadoop/hive/ql/exec/spark/SparkPlan.java       |  8 +--
 .../hive/ql/exec/spark/SparkPlanGenerator.java     | 14 ++--
 .../ql/exec/spark/SparkReduceRecordHandler.java    |  4 +-
 .../hadoop/hive/ql/exec/spark/SparkTask.java       |  4 +-
 .../ql/exec/spark/status/LocalSparkJobMonitor.java |  8 +--
 .../exec/spark/status/RemoteSparkJobMonitor.java   |  8 +--
 .../hive/ql/exec/spark/status/RenderStrategy.java  |  6 +-
 .../hive/ql/exec/tez/MapRecordProcessor.java       |  4 +-
 .../hive/ql/exec/tez/MergeFileRecordProcessor.java |  4 +-
 .../hive/ql/exec/tez/ReduceRecordProcessor.java    |  4 +-
 .../hive/ql/exec/tez/ReduceRecordSource.java       |  4 +-
 .../hadoop/hive/ql/exec/tez/TezProcessor.java      |  8 +--
 .../apache/hadoop/hive/ql/exec/tez/TezTask.java    | 16 ++---
 .../ql/exec/tez/monitoring/RenderStrategy.java     |  6 +-
 .../hive/ql/exec/tez/monitoring/TezJobMonitor.java |  8 +--
 .../hadoop/hive/ql/io/CombineHiveInputFormat.java  |  6 +-
 .../apache/hadoop/hive/ql/io/HiveInputFormat.java  |  4 +-
 .../org/apache/hadoop/hive/ql/metadata/Hive.java   | 76 +++++++++-----------
 .../ql/metadata/HiveMaterializedViewsRegistry.java |  4 +-
 .../apache/hadoop/hive/ql/optimizer/Transform.java |  6 +-
 .../hive/ql/optimizer/ppr/PartitionPruner.java     | 16 ++---
 .../hadoop/hive/ql/parse/CalcitePlanner.java       | 20 +++---
 .../apache/hadoop/hive/ql/parse/TezCompiler.java   | 80 +++++++++++-----------
 .../hadoop/hive/ql/parse/spark/SparkCompiler.java  | 12 ++--
 .../hive/ql/hooks/TestHiveProtoLoggingHook.java    |  4 +-
 .../metastore-common/spotbugs/spotbugs-exclude.xml |  3 -
 .../hadoop/hive/metastore/RetryingHMSHandler.java  |  4 +-
 .../hive/metastore/metrics/JsonReporter.java       |  3 +-
 .../hadoop/hive/metastore/metrics/Metrics.java     |  2 +-
 .../hadoop/hive/metastore/metrics/PerfLogger.java  |  8 +--
 standalone-metastore/spotbugs/spotbugs-exclude.xml |  3 -
 44 files changed, 228 insertions(+), 260 deletions(-)

diff --git a/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java 
b/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java
index 478a2a5..90ebde5 100644
--- a/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java
+++ b/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java
@@ -25,7 +25,6 @@ import 
org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
 import org.apache.hadoop.hive.common.metrics.common.MetricsScope;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.hive.common.util.SuppressFBWarnings;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -147,8 +146,7 @@ public class PerfLogger {
    * @param callerName the logging object to be used.
    * @param method method or ID that identifies this perf log element.
    */
-  @SuppressFBWarnings(value = "NM_METHOD_NAMING_CONVENTION", justification = 
"Intended")
-  public void PerfLogBegin(String callerName, String method) {
+  public void perfLogBegin(String callerName, String method) {
     long startTime = System.currentTimeMillis();
     startTimes.put(method, Long.valueOf(startTime));
     if (LOG.isDebugEnabled()) {
@@ -162,9 +160,8 @@ public class PerfLogger {
    * @param method
    * @return long duration  the difference between now and startTime, or -1 if 
startTime is null
    */
-  @SuppressFBWarnings(value = "NM_METHOD_NAMING_CONVENTION", justification = 
"Intended")
-  public long PerfLogEnd(String callerName, String method) {
-    return PerfLogEnd(callerName, method, null);
+  public long perfLogEnd(String callerName, String method) {
+    return perfLogEnd(callerName, method, null);
   }
 
   /**
@@ -173,8 +170,7 @@ public class PerfLogger {
    * @param method
    * @return long duration  the difference between now and startTime, or -1 if 
startTime is null
    */
-  @SuppressFBWarnings(value = "NM_METHOD_NAMING_CONVENTION", justification = 
"Intended")
-  public long PerfLogEnd(String callerName, String method, String 
additionalInfo) {
+  public long perfLogEnd(String callerName, String method, String 
additionalInfo) {
     Long startTime = startTimes.get(method);
     long endTime = System.currentTimeMillis();
     endTimes.put(method, Long.valueOf(endTime));
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Compiler.java 
b/ql/src/java/org/apache/hadoop/hive/ql/Compiler.java
index 84100e1..c51a146 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Compiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Compiler.java
@@ -125,7 +125,7 @@ public class Compiler {
   }
 
   private void initialize(String rawCommand) throws CommandProcessorException {
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.COMPILE);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.COMPILE);
     driverState.compilingWithLocking();
 
     VariableSubstitution variableSubstitution = new VariableSubstitution(new 
HiveVariableSource() {
@@ -159,7 +159,7 @@ public class Compiler {
   }
 
   private void parse() throws ParseException {
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARSE);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.PARSE);
 
     // Trigger query hook before compilation
     driverContext.getHookRunner().runBeforeParseHook(context.getCmd());
@@ -171,11 +171,11 @@ public class Compiler {
     } finally {
       driverContext.getHookRunner().runAfterParseHook(context.getCmd(), 
!success);
     }
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARSE);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.PARSE);
   }
 
   private BaseSemanticAnalyzer analyze() throws Exception {
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ANALYZE);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.ANALYZE);
 
     driverContext.getHookRunner().runBeforeCompileHook(context.getCmd());
 
@@ -234,7 +234,7 @@ public class Compiler {
     // validate the plan
     sem.validate();
 
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ANALYZE);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.ANALYZE);
 
     return sem;
   }
@@ -407,7 +407,7 @@ public class Compiler {
         HiveConf.getBoolVar(driverContext.getConf(), 
HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
 
       try {
-        perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DO_AUTHORIZATION);
+        perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.DO_AUTHORIZATION);
         // Authorization check for kill query will be in KillQueryImpl
         // As both admin or operation owner can perform the operation.
         // Which is not directly supported in authorizer
@@ -418,7 +418,7 @@ public class Compiler {
         CONSOLE.printError("Authorization failed:" + authExp.getMessage() + ". 
Use SHOW GRANT to get more details.");
         throw DriverUtils.createProcessorException(driverContext, 403, 
authExp.getMessage(), "42000", null);
       } finally {
-        perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DO_AUTHORIZATION);
+        perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.DO_AUTHORIZATION);
       }
     }
   }
@@ -474,7 +474,7 @@ public class Compiler {
       }
     }
 
-    double duration = perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.COMPILE) / 
1000.00;
+    double duration = perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.COMPILE) / 
1000.00;
     ImmutableMap<String, Long> compileHMSTimings = 
Hive.dumpMetaCallTimingWithoutEx("compilation");
     
driverContext.getQueryDisplay().setHmsTimings(QueryDisplay.Phase.COMPILATION, 
compileHMSTimings);
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java 
b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 5a0cb0d..4d2d869 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -394,12 +394,12 @@ public class Driver implements IDriver {
     }
 
     PerfLogger perfLogger = SessionState.getPerfLogger(true);
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.WAIT_COMPILE);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.WAIT_COMPILE);
 
     try (CompileLock compileLock = 
CompileLockFactory.newInstance(driverContext.getConf(), command)) {
       boolean success = compileLock.tryAcquire();
 
-      perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.WAIT_COMPILE);
+      perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.WAIT_COMPILE);
 
       if (metrics != null) {
         metrics.decrementCounter(MetricsConstant.WAITING_COMPILE_OPS, 1);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/DriverTxnHandler.java 
b/ql/src/java/org/apache/hadoop/hive/ql/DriverTxnHandler.java
index 6de6fd3..00fb75d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/DriverTxnHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/DriverTxnHandler.java
@@ -214,7 +214,7 @@ class DriverTxnHandler {
    */
   private void acquireLocks() throws CommandProcessorException {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ACQUIRE_READ_WRITE_LOCKS);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.ACQUIRE_READ_WRITE_LOCKS);
 
     if (!driverContext.getTxnManager().isTxnOpen() && 
driverContext.getTxnManager().supportsAcid()) {
       /* non acid txn managers don't support txns but fwd lock requests to 
lock managers
@@ -245,7 +245,7 @@ class DriverTxnHandler {
       throw DriverUtils.createProcessorException(driverContext, 10, 
errorMessage, ErrorMsg.findSQLState(e.getMessage()),
           e);
     } finally {
-      perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ACQUIRE_READ_WRITE_LOCKS);
+      perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.ACQUIRE_READ_WRITE_LOCKS);
     }
   }
 
@@ -614,7 +614,7 @@ class DriverTxnHandler {
 
   void endTransactionAndCleanup(boolean commit, HiveTxnManager txnManager) 
throws LockException {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.RELEASE_LOCKS);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.RELEASE_LOCKS);
     
     // If we've opened a transaction we need to commit or rollback rather than 
explicitly releasing the locks.
     driverContext.getConf().unset(ValidTxnList.VALID_TXNS_KEY);
@@ -633,7 +633,7 @@ class DriverTxnHandler {
       context.setHiveLocks(null);
     }
 
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.RELEASE_LOCKS);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.RELEASE_LOCKS);
   }
 
   private void commitOrRollback(boolean commit, HiveTxnManager txnManager) 
throws LockException {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Executor.java 
b/ql/src/java/org/apache/hadoop/hive/ql/Executor.java
index 62dc604..3aba618 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Executor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Executor.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.hive.ql.cache.results.CacheUsage;
 import org.apache.hadoop.hive.ql.cache.results.QueryResultsCache;
 import org.apache.hadoop.hive.ql.cache.results.QueryResultsCache.CacheEntry;
 import org.apache.hadoop.hive.ql.exec.ConditionalTask;
-import org.apache.hadoop.hive.ql.exec.DagUtils;
 import org.apache.hadoop.hive.ql.exec.FetchTask;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
@@ -85,7 +84,7 @@ public class Executor {
   }
 
   public void execute() throws CommandProcessorException {
-    SessionState.getPerfLogger().PerfLogBegin(CLASS_NAME, 
PerfLogger.DRIVER_EXECUTE);
+    SessionState.getPerfLogger().perfLogBegin(CLASS_NAME, 
PerfLogger.DRIVER_EXECUTE);
 
     boolean noName = 
Strings.isNullOrEmpty(driverContext.getConf().get(MRJobConfig.JOB_NAME));
 
@@ -236,7 +235,7 @@ public class Executor {
   }
 
   private void runTasks(boolean noName) throws Exception {
-    SessionState.getPerfLogger().PerfLogBegin(CLASS_NAME, 
PerfLogger.RUN_TASKS);
+    SessionState.getPerfLogger().perfLogBegin(CLASS_NAME, 
PerfLogger.RUN_TASKS);
 
     int jobCount = getJobCount();
     String jobName = getJobName();
@@ -247,7 +246,7 @@ public class Executor {
       handleFinished();
     }
 
-    SessionState.getPerfLogger().PerfLogEnd(CLASS_NAME, PerfLogger.RUN_TASKS);
+    SessionState.getPerfLogger().perfLogEnd(CLASS_NAME, PerfLogger.RUN_TASKS);
   }
 
   private void handleFinished() throws Exception {
@@ -440,7 +439,7 @@ public class Executor {
     } else if (driverContext.getCacheUsage().getStatus() == 
CacheUsage.CacheStatus.CAN_CACHE_QUERY_RESULTS &&
         driverContext.getCacheUsage().getCacheEntry() != null && 
driverContext.getPlan().getFetchTask() != null) {
       // Save results to the cache for future queries to use.
-      SessionState.getPerfLogger().PerfLogBegin(CLASS_NAME, 
PerfLogger.SAVE_TO_RESULTS_CACHE);
+      SessionState.getPerfLogger().perfLogBegin(CLASS_NAME, 
PerfLogger.SAVE_TO_RESULTS_CACHE);
 
       CacheEntry cacheEntry = driverContext.getCacheUsage().getCacheEntry();
       boolean savedToCache = 
QueryResultsCache.getInstance().setEntryValid(cacheEntry,
@@ -452,7 +451,7 @@ public class Executor {
         
driverContext.setUsedCacheEntry(driverContext.getCacheUsage().getCacheEntry());
       }
 
-      SessionState.getPerfLogger().PerfLogEnd(CLASS_NAME, 
PerfLogger.SAVE_TO_RESULTS_CACHE);
+      SessionState.getPerfLogger().perfLogEnd(CLASS_NAME, 
PerfLogger.SAVE_TO_RESULTS_CACHE);
     }
   }
 
@@ -539,7 +538,7 @@ public class Executor {
     if (noName) {
       driverContext.getConf().set(MRJobConfig.JOB_NAME, "");
     }
-    double duration = SessionState.getPerfLogger().PerfLogEnd(CLASS_NAME, 
PerfLogger.DRIVER_EXECUTE) / 1000.00;
+    double duration = SessionState.getPerfLogger().perfLogEnd(CLASS_NAME, 
PerfLogger.DRIVER_EXECUTE) / 1000.00;
 
     ImmutableMap<String, Long> executionHMSTimings = 
Hive.dumpMetaCallTimingWithoutEx("execution");
     
driverContext.getQueryDisplay().setHmsTimings(QueryDisplay.Phase.EXECUTION, 
executionHMSTimings);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java 
b/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java
index 2ba170b..da1254a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hive.ql;
 
-import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -292,9 +291,9 @@ public class HookRunner {
       PerfLogger perfLogger = SessionState.getPerfLogger();
 
       for (ExecuteWithHookContext hook : hooks) {
-        perfLogger.PerfLogBegin(CLASS_NAME, prefix + 
hook.getClass().getName());
+        perfLogger.perfLogBegin(CLASS_NAME, prefix + 
hook.getClass().getName());
         hook.run(hookContext);
-        perfLogger.PerfLogEnd(CLASS_NAME, prefix + hook.getClass().getName());
+        perfLogger.perfLogEnd(CLASS_NAME, prefix + hook.getClass().getName());
       }
     } catch (HiveException e) {
       throw e;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
index 489d09f..81501cc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
@@ -380,7 +380,7 @@ public class MapJoinOperator extends 
AbstractMapJoinOperator<MapJoinDesc> implem
   // Core logic to load hash table using HashTableLoader
   private Pair<MapJoinTableContainer[], MapJoinTableContainerSerDe[]> 
loadHashTableInternal(
           ExecMapperContext mapContext, MapredContext mrContext) throws 
HiveException {
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.LOAD_HASHTABLE);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.LOAD_HASHTABLE);
     loader.init(mapContext, mrContext, hconf, this);
     try {
       loader.load(mapJoinTables, mapJoinTableSerdes);
@@ -399,7 +399,7 @@ public class MapJoinOperator extends 
AbstractMapJoinOperator<MapJoinDesc> implem
     Pair<MapJoinTableContainer[], MapJoinTableContainerSerDe[]> pair =
             new ImmutablePair<> (mapJoinTables, mapJoinTableSerdes);
 
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.LOAD_HASHTABLE);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.LOAD_HASHTABLE);
 
     if (canSkipJoinProcessing(mapContext)) {
       LOG.info("Skipping big table join processing for " + this.toString());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index bf5a711..c21c0f4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.hive.ql.lockmgr.HiveLock;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockObj;
 import org.apache.hadoop.hive.ql.lockmgr.LockException;
-import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
 import org.apache.hadoop.hive.ql.log.PerfLogger;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -91,7 +90,7 @@ public class MoveTask extends Task<MoveWork> implements 
Serializable {
       throws HiveException {
     try {
       PerfLogger perfLogger = SessionState.getPerfLogger();
-      perfLogger.PerfLogBegin("MoveTask", PerfLogger.FILE_MOVES);
+      perfLogger.perfLogBegin("MoveTask", PerfLogger.FILE_MOVES);
 
       String mesg = "Moving data to " + (isDfsDir ? "" : "local ") + 
"directory "
           + targetPath.toString();
@@ -107,7 +106,7 @@ public class MoveTask extends Task<MoveWork> implements 
Serializable {
         moveFileFromDfsToLocal(sourcePath, targetPath, fs, dstFs);
       }
 
-      perfLogger.PerfLogEnd("MoveTask", PerfLogger.FILE_MOVES);
+      perfLogger.perfLogEnd("MoveTask", PerfLogger.FILE_MOVES);
     } catch (Exception e) {
       throw new HiveException("Unable to move source " + sourcePath + " to 
destination "
           + targetPath, e);
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java
index 6422e92..4fb89fc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java
@@ -613,14 +613,14 @@ public class SerializationUtilities {
 
   private static void serializePlan(Kryo kryo, Object plan, OutputStream out, 
boolean cloningPlan) {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SERIALIZE_PLAN);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.SERIALIZE_PLAN);
     LOG.info("Serializing " + plan.getClass().getSimpleName() + " using kryo");
     if (cloningPlan) {
       serializeObjectByKryo(kryo, plan, out);
     } else {
       serializeObjectByKryo(kryo, plan, out);
     }
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.SERIALIZE_PLAN);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.SERIALIZE_PLAN);
   }
 
   /**
@@ -652,7 +652,7 @@ public class SerializationUtilities {
   private static <T> T deserializePlan(Kryo kryo, InputStream in, Class<T> 
planClass,
       boolean cloningPlan) {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DESERIALIZE_PLAN);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.DESERIALIZE_PLAN);
     T plan;
     LOG.info("Deserializing " + planClass.getSimpleName() + " using kryo");
     if (cloningPlan) {
@@ -660,7 +660,7 @@ public class SerializationUtilities {
     } else {
       plan = deserializeObjectByKryo(kryo, in, planClass);
     }
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DESERIALIZE_PLAN);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.DESERIALIZE_PLAN);
     return plan;
   }
 
@@ -672,7 +672,7 @@ public class SerializationUtilities {
   public static MapredWork clonePlan(MapredWork plan) {
     // TODO: need proper clone. Meanwhile, let's at least keep this horror in 
one place
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.CLONE_PLAN);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.CLONE_PLAN);
     Operator<?> op = plan.getAnyOperator();
     CompilationOpContext ctx = (op == null) ? null : 
op.getCompilationOpContext();
     ByteArrayOutputStream baos = new ByteArrayOutputStream(4096);
@@ -683,7 +683,7 @@ public class SerializationUtilities {
     for (Operator<?> newOp : newPlan.getAllOperators()) {
       newOp.setCompilationOpContext(ctx);
     }
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.CLONE_PLAN);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.CLONE_PLAN);
     return newPlan;
   }
 
@@ -723,7 +723,7 @@ public class SerializationUtilities {
    */
   public static BaseWork cloneBaseWork(BaseWork plan) {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.CLONE_PLAN);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.CLONE_PLAN);
     Operator<?> op = plan.getAnyRootOperator();
     CompilationOpContext ctx = (op == null) ? null : 
op.getCompilationOpContext();
     ByteArrayOutputStream baos = new ByteArrayOutputStream(4096);
@@ -734,7 +734,7 @@ public class SerializationUtilities {
     for (Operator<?> newOp : newPlan.getAllOperators()) {
       newOp.setCompilationOpContext(ctx);
     }
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.CLONE_PLAN);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.CLONE_PLAN);
     return newPlan;
   }
 
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/SparkHashTableSinkOperator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/SparkHashTableSinkOperator.java
index 10144a1..0066739 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SparkHashTableSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SparkHashTableSinkOperator.java
@@ -100,11 +100,11 @@ public class SparkHashTableSinkOperator
         }
       } else {
         String method = PerfLogger.SPARK_FLUSH_HASHTABLE + getName();
-        perfLogger.PerfLogBegin(CLASS_NAME, method);
+        perfLogger.perfLogBegin(CLASS_NAME, method);
         try {
           flushToFile(mapJoinTables[tag], tag);
         } finally {
-          perfLogger.PerfLogEnd(CLASS_NAME, method);
+          perfLogger.perfLogEnd(CLASS_NAME, method);
         }
       }
       super.closeOp(abort);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index de31265..ea73cba 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -76,7 +76,6 @@ import java.util.zip.Deflater;
 import java.util.zip.DeflaterOutputStream;
 import java.util.zip.InflaterInputStream;
 
-import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.collections.MapUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.lang3.StringEscapeUtils;
@@ -112,7 +111,6 @@ import 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.DriverState;
-import org.apache.hadoop.hive.ql.QueryPlan;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.exec.mr.ExecDriver;
 import org.apache.hadoop.hive.ql.exec.mr.ExecMapper;
@@ -161,8 +159,6 @@ import org.apache.hadoop.hive.ql.plan.PartitionDesc;
 import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.ReduceWork;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
-import org.apache.hadoop.hive.ql.plan.api.Adjacency;
-import org.apache.hadoop.hive.ql.plan.api.Graph;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.stats.StatsFactory;
 import org.apache.hadoop.hive.ql.stats.StatsPublisher;
@@ -1453,9 +1449,9 @@ public final class Utilities {
         Path tmpPathOriginal = tmpPath;
         tmpPath = new Path(tmpPath.getParent(), tmpPath.getName() + ".moved");
         LOG.debug("shouldAvoidRename is false therefore moving/renaming " + 
tmpPathOriginal + " to " + tmpPath);
-        perfLogger.PerfLogBegin("FileSinkOperator", "rename");
+        perfLogger.perfLogBegin("FileSinkOperator", "rename");
         Utilities.rename(fs, tmpPathOriginal, tmpPath);
-        perfLogger.PerfLogEnd("FileSinkOperator", "rename");
+        perfLogger.perfLogEnd("FileSinkOperator", "rename");
       }
 
       // Remove duplicates from tmpPath
@@ -1464,21 +1460,21 @@ public final class Utilities {
       FileStatus[] statuses = statusList.toArray(new 
FileStatus[statusList.size()]);
       if(statuses != null && statuses.length > 0) {
         Set<FileStatus> filesKept = new HashSet<>();
-        perfLogger.PerfLogBegin("FileSinkOperator", 
"RemoveTempOrDuplicateFiles");
+        perfLogger.perfLogBegin("FileSinkOperator", 
"RemoveTempOrDuplicateFiles");
         // remove any tmp file or double-committed output files
         List<Path> emptyBuckets = Utilities.removeTempOrDuplicateFiles(
             fs, statuses, dpCtx, conf, hconf, filesKept, false);
-        perfLogger.PerfLogEnd("FileSinkOperator", 
"RemoveTempOrDuplicateFiles");
+        perfLogger.perfLogEnd("FileSinkOperator", 
"RemoveTempOrDuplicateFiles");
         // create empty buckets if necessary
         if (!emptyBuckets.isEmpty()) {
-          perfLogger.PerfLogBegin("FileSinkOperator", "CreateEmptyBuckets");
+          perfLogger.perfLogBegin("FileSinkOperator", "CreateEmptyBuckets");
           createEmptyBuckets(
               hconf, emptyBuckets, conf.getCompressed(), conf.getTableInfo(), 
reporter);
           for(Path p:emptyBuckets) {
             FileStatus[] items = fs.listStatus(p);
             filesKept.addAll(Arrays.asList(items));
           }
-          perfLogger.PerfLogEnd("FileSinkOperator", "CreateEmptyBuckets");
+          perfLogger.perfLogEnd("FileSinkOperator", "CreateEmptyBuckets");
         }
 
         // move to the file destination
@@ -1489,16 +1485,16 @@ public final class Utilities {
           conf.getFilesToFetch().addAll(filesKept);
         } else if (conf !=null && conf.isCTASorCM() && isBlobStorage) {
           // for CTAS or Create MV statements
-          perfLogger.PerfLogBegin("FileSinkOperator", 
"moveSpecifiedFileStatus");
+          perfLogger.perfLogBegin("FileSinkOperator", 
"moveSpecifiedFileStatus");
           LOG.debug("CTAS/Create MV: Files being renamed:  " + 
filesKept.toString());
           moveSpecifiedFilesInParallel(hconf, fs, tmpPath, specPath, 
filesKept);
-          perfLogger.PerfLogEnd("FileSinkOperator", "moveSpecifiedFileStatus");
+          perfLogger.perfLogEnd("FileSinkOperator", "moveSpecifiedFileStatus");
         } else {
           // for rest of the statement e.g. INSERT, LOAD etc
-          perfLogger.PerfLogBegin("FileSinkOperator", "RenameOrMoveFiles");
+          perfLogger.perfLogBegin("FileSinkOperator", "RenameOrMoveFiles");
           LOG.debug("Final renaming/moving. Source: " + tmpPath + " 
.Destination: " + specPath);
           renameOrMoveFilesInParallel(hconf, fs, tmpPath, specPath);
-          perfLogger.PerfLogEnd("FileSinkOperator", "RenameOrMoveFiles");
+          perfLogger.perfLogEnd("FileSinkOperator", "RenameOrMoveFiles");
         }
       }
     } else {
@@ -1528,7 +1524,7 @@ public final class Utilities {
     LOG.info("rename {} files from {} to dest {}",
         filesToMove.size(), srcPath, destPath);
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin("FileSinkOperator", "moveSpecifiedFileStatus");
+    perfLogger.perfLogBegin("FileSinkOperator", "moveSpecifiedFileStatus");
 
     final ExecutorService pool = createMoveThreadPool(conf);
 
@@ -1538,7 +1534,7 @@ public final class Utilities {
     shutdownAndCleanup(pool, futures);
     LOG.info("Completed rename from {} to {}", srcPath, destPath);
 
-    perfLogger.PerfLogEnd("FileSinkOperator", "moveSpecifiedFileStatus");
+    perfLogger.perfLogEnd("FileSinkOperator", "moveSpecifiedFileStatus");
   }
 
   /**
@@ -2542,7 +2538,7 @@ public final class Utilities {
   public static ContentSummary getInputSummary(final Context ctx, MapWork 
work, PathFilter filter)
       throws IOException {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.INPUT_SUMMARY);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.INPUT_SUMMARY);
 
     final long[] summary = {0L, 0L, 0L};
     final Set<Path> pathNeedProcess = new HashSet<>();
@@ -2584,7 +2580,7 @@ public final class Utilities {
       }
       getInputSummaryWithPool(ctx, 
Collections.unmodifiableSet(pathNeedProcess),
           work, summary, executor);
-      perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.INPUT_SUMMARY);
+      perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.INPUT_SUMMARY);
     }
     return new ContentSummary.Builder().length(summary[0])
         .fileCount(summary[1]).directoryCount(summary[2]).build();
@@ -3463,7 +3459,7 @@ public final class Utilities {
       Context ctx, boolean skipDummy) throws Exception {
 
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.INPUT_PATHS);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.INPUT_PATHS);
 
     Set<Path> pathsProcessed = new HashSet<Path>();
     List<Path> pathsToAdd = new LinkedList<Path>();
@@ -3552,7 +3548,7 @@ public final class Utilities {
       }
     }
 
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.INPUT_PATHS);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.INPUT_PATHS);
 
     return finalPathsToAdd;
   }
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkDynamicPartitionPruner.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkDynamicPartitionPruner.java
index b9285ac..1d0b044 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkDynamicPartitionPruner.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkDynamicPartitionPruner.java
@@ -79,11 +79,11 @@ public class SparkDynamicPartitionPruner {
       // Nothing to prune for this MapWork
       return;
     }
-    perfLogger.PerfLogBegin(CLASS_NAME,
+    perfLogger.perfLogBegin(CLASS_NAME,
             PerfLogger.SPARK_DYNAMICALLY_PRUNE_PARTITIONS + work.getName());
     processFiles(work, jobConf);
     prunePartitions(work);
-    perfLogger.PerfLogBegin(CLASS_NAME,
+    perfLogger.perfLogBegin(CLASS_NAME,
             PerfLogger.SPARK_DYNAMICALLY_PRUNE_PARTITIONS + work.getName());
   }
 
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java
index 530131f..671fdd2 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java
@@ -64,7 +64,7 @@ public class SparkMapRecordHandler extends SparkRecordHandler 
{
 
   @Override
   public <K, V> void init(JobConf job, OutputCollector<K, V> output, Reporter 
reporter) throws Exception {
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_INIT_OPERATORS);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.SPARK_INIT_OPERATORS);
     super.init(job, output, reporter);
 
     try {
@@ -124,7 +124,7 @@ public class SparkMapRecordHandler extends 
SparkRecordHandler {
         throw new RuntimeException("Map operator initialization failed: " + e, 
e);
       }
     }
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_INIT_OPERATORS);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.SPARK_INIT_OPERATORS);
   }
 
   @Override
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlan.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlan.java
index 8cab3ef..4c14ada 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlan.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlan.java
@@ -68,7 +68,7 @@ public class SparkPlan {
 
   @SuppressWarnings("unchecked")
   public JavaPairRDD<HiveKey, BytesWritable> generateGraph() {
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_BUILD_RDD_GRAPH);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.SPARK_BUILD_RDD_GRAPH);
     Map<SparkTran, JavaPairRDD<HiveKey, BytesWritable>> tranToOutputRDDMap
         = new HashMap<SparkTran, JavaPairRDD<HiveKey, BytesWritable>>();
     for (SparkTran tran : getAllTrans()) {
@@ -112,7 +112,7 @@ public class SparkPlan {
       }
     }
 
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_BUILD_RDD_GRAPH);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.SPARK_BUILD_RDD_GRAPH);
 
     LOG.info("\n\nSpark RDD Graph:\n\n" + finalRDD.toDebugString() + "\n");
 
@@ -133,7 +133,7 @@ public class SparkPlan {
   private String getLongFormCallSite(SparkTran tran) {
     if 
(this.jobConf.getBoolean(HiveConf.ConfVars.HIVE_SPARK_LOG_EXPLAIN_WEBUI.varname,
 HiveConf
             .ConfVars.HIVE_SPARK_LOG_EXPLAIN_WEBUI.defaultBoolVal)) {
-      perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_CREATE_EXPLAIN_PLAN 
+ tran.getName());
+      perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.SPARK_CREATE_EXPLAIN_PLAN 
+ tran.getName());
 
       ExplainWork explainWork = new ExplainWork();
       explainWork.setConfig(new ExplainConfiguration());
@@ -153,7 +153,7 @@ public class SparkPlan {
         LOG.error("Error while generating explain plan for " + tran.getName(), 
e);
       }
 
-      perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_CREATE_EXPLAIN_PLAN + 
tran.getName());
+      perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.SPARK_CREATE_EXPLAIN_PLAN + 
tran.getName());
       return explainOutput;
     }
     return "";
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
index 806deb5..a6a40cd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkPlanGenerator.java
@@ -18,19 +18,13 @@
 
 package org.apache.hadoop.hive.ql.exec.spark;
 
-import java.io.File;
-import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.lang.reflect.InvocationTargetException;
-import java.net.MalformedURLException;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.stream.Collectors;
 
-import org.apache.hive.spark.client.SparkClientUtilities;
-import org.apache.spark.SparkConf;
 import org.apache.spark.util.CallSite;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -114,7 +108,7 @@ public class SparkPlanGenerator {
   }
 
   public SparkPlan generate(SparkWork sparkWork) throws Exception {
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_BUILD_PLAN);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.SPARK_BUILD_PLAN);
     SparkPlan sparkPlan = new SparkPlan(this.jobConf, this.sc.sc());
     cloneToWork = sparkWork.getCloneToWork();
     workToTranMap.clear();
@@ -125,13 +119,13 @@ public class SparkPlanGenerator {
         // Run the SparkDynamicPartitionPruner, we run this here instead of 
inside the
         // InputFormat so that we don't have to run pruning when creating a 
Record Reader
         runDynamicPartitionPruner(work);
-        perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_CREATE_TRAN + 
work.getName());
+        perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.SPARK_CREATE_TRAN + 
work.getName());
         SparkTran tran = generate(work, sparkWork);
         SparkTran parentTran = generateParentTran(sparkPlan, sparkWork, work);
         sparkPlan.addTran(tran);
         sparkPlan.connect(parentTran, tran);
         workToTranMap.put(work, tran);
-        perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_CREATE_TRAN + 
work.getName());
+        perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.SPARK_CREATE_TRAN + 
work.getName());
       }
     } finally {
       // clear all ThreadLocal cached MapWork/ReduceWork after plan generation
@@ -139,7 +133,7 @@ public class SparkPlanGenerator {
       Utilities.clearWorkMap(jobConf);
     }
 
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_BUILD_PLAN);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.SPARK_BUILD_PLAN);
     return sparkPlan;
   }
 
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
index 454196f..1ce16d9 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
@@ -115,7 +115,7 @@ public class SparkReduceRecordHandler extends 
SparkRecordHandler {
   @Override
   @SuppressWarnings("unchecked")
   public void init(JobConf job, OutputCollector output, Reporter reporter) 
throws Exception {
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_INIT_OPERATORS);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.SPARK_INIT_OPERATORS);
     super.init(job, output, reporter);
 
     rowObjectInspector = new ObjectInspector[Byte.MAX_VALUE];
@@ -258,7 +258,7 @@ public class SparkReduceRecordHandler extends 
SparkRecordHandler {
         throw new RuntimeException("Reduce operator initialization failed", e);
       }
     }
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_INIT_OPERATORS);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.SPARK_INIT_OPERATORS);
   }
 
   /**
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
index f401b4d..727f225 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
@@ -115,10 +115,10 @@ public class SparkTask extends Task<SparkWork> {
       sparkWork.setRequiredCounterPrefix(getOperatorCounters());
 
       // Submit the Spark job
-      perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_SUBMIT_JOB);
+      perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.SPARK_SUBMIT_JOB);
       submitTime = perfLogger.getStartTime(PerfLogger.SPARK_SUBMIT_JOB);
       jobRef = sparkSession.submit(taskQueue, context, sparkWork);
-      perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_SUBMIT_JOB);
+      perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.SPARK_SUBMIT_JOB);
 
       // If the driver context has been shutdown (due to query cancellation) 
kill the Spark job
       if (taskQueue.isShutdown()) {
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/LocalSparkJobMonitor.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/LocalSparkJobMonitor.java
index 911699d..ed7d19e 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/LocalSparkJobMonitor.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/LocalSparkJobMonitor.java
@@ -44,8 +44,8 @@ public class LocalSparkJobMonitor extends SparkJobMonitor {
     JobExecutionStatus lastState = null;
     Map<SparkStage, SparkStageProgress> lastProgressMap = null;
 
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_RUN_JOB);
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_SUBMIT_TO_RUNNING);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.SPARK_RUN_JOB);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.SPARK_SUBMIT_TO_RUNNING);
 
     startTime = System.currentTimeMillis();
 
@@ -73,7 +73,7 @@ public class LocalSparkJobMonitor extends SparkJobMonitor {
           switch (state) {
           case RUNNING:
             if (!running) {
-              perfLogger.PerfLogEnd(CLASS_NAME, 
PerfLogger.SPARK_SUBMIT_TO_RUNNING);
+              perfLogger.perfLogEnd(CLASS_NAME, 
PerfLogger.SPARK_SUBMIT_TO_RUNNING);
               // print job stages.
               console.printInfo("\nQuery Hive on Spark job["
                 + sparkJobStatus.getJobId() + "] stages:");
@@ -136,7 +136,7 @@ public class LocalSparkJobMonitor extends SparkJobMonitor {
       }
     }
 
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_RUN_JOB);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.SPARK_RUN_JOB);
     return rc;
   }
 }
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java
index 35d20e3..f58198f 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java
@@ -58,8 +58,8 @@ public class RemoteSparkJobMonitor extends SparkJobMonitor {
     int rc = 0;
     Map<SparkStage, SparkStageProgress> lastProgressMap = null;
 
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_RUN_JOB);
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_SUBMIT_TO_RUNNING);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.SPARK_RUN_JOB);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.SPARK_SUBMIT_TO_RUNNING);
 
     startTime = System.currentTimeMillis();
     JobHandle.State state = null;
@@ -90,7 +90,7 @@ public class RemoteSparkJobMonitor extends SparkJobMonitor {
           if (sparkJobState == JobExecutionStatus.RUNNING) {
             Map<SparkStage, SparkStageProgress> progressMap = 
sparkJobStatus.getSparkStageProgress();
             if (!running) {
-              perfLogger.PerfLogEnd(CLASS_NAME, 
PerfLogger.SPARK_SUBMIT_TO_RUNNING);
+              perfLogger.perfLogEnd(CLASS_NAME, 
PerfLogger.SPARK_SUBMIT_TO_RUNNING);
               printAppInfo();
               console.printInfo("Hive on Spark Session Web UI URL: " + 
sparkJobStatus.getWebUIURL());
               // print job stages.
@@ -187,7 +187,7 @@ public class RemoteSparkJobMonitor extends SparkJobMonitor {
       }
     }
 
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_RUN_JOB);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.SPARK_RUN_JOB);
     return rc;
   }
 
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RenderStrategy.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RenderStrategy.java
index 67a3a9c..ec5fb50 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RenderStrategy.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RenderStrategy.java
@@ -88,14 +88,14 @@ class RenderStrategy {
             completed.add(s);
 
             if (!perfLogger.startTimeHasMethod(PerfLogger.SPARK_RUN_STAGE + 
s)) {
-              perfLogger.PerfLogBegin(SparkJobMonitor.CLASS_NAME, 
PerfLogger.SPARK_RUN_STAGE + s);
+              perfLogger.perfLogBegin(SparkJobMonitor.CLASS_NAME, 
PerfLogger.SPARK_RUN_STAGE + s);
             }
-            perfLogger.PerfLogEnd(SparkJobMonitor.CLASS_NAME, 
PerfLogger.SPARK_RUN_STAGE + s);
+            perfLogger.perfLogEnd(SparkJobMonitor.CLASS_NAME, 
PerfLogger.SPARK_RUN_STAGE + s);
           }
           if (complete < total && (complete > 0 || running > 0 || failed > 0)) 
{
             /* stage is started, but not complete */
             if (!perfLogger.startTimeHasMethod(PerfLogger.SPARK_RUN_STAGE + 
s)) {
-              perfLogger.PerfLogBegin(SparkJobMonitor.CLASS_NAME, 
PerfLogger.SPARK_RUN_STAGE + s);
+              perfLogger.perfLogBegin(SparkJobMonitor.CLASS_NAME, 
PerfLogger.SPARK_RUN_STAGE + s);
             }
             if (failed > 0) {
               reportBuffer.append(
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
index 5cfa759..a1593cc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java
@@ -121,7 +121,7 @@ public class MapRecordProcessor extends RecordProcessor {
   @Override
   void init(MRTaskReporter mrReporter,
       Map<String, LogicalInput> inputs, Map<String, LogicalOutput> outputs) 
throws Exception {
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
     super.init(mrReporter, inputs, outputs);
     checkAbortCondition();
 
@@ -351,7 +351,7 @@ public class MapRecordProcessor extends RecordProcessor {
         throw new RuntimeException("Map operator initialization failed", e);
       }
     }
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
   }
 
   private void initializeMapRecordSources() throws Exception {
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java
index 13f5f12..5ad5bc8 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java
@@ -74,7 +74,7 @@ public class MergeFileRecordProcessor extends RecordProcessor 
{
       MRTaskReporter mrReporter, Map<String, LogicalInput> inputs,
       Map<String, LogicalOutput> outputs) throws Exception {
     // TODO HIVE-14042. Abort handling.
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
     super.init(mrReporter, inputs, outputs);
     execContext = new ExecMapperContext(jconf);
 
@@ -142,7 +142,7 @@ public class MergeFileRecordProcessor extends 
RecordProcessor {
         throw new RuntimeException("Map operator initialization failed", e);
       }
     }
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
   }
 
   @Override
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java
index 03edbf7..39c098b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java
@@ -101,7 +101,7 @@ public class ReduceRecordProcessor extends RecordProcessor {
   @Override
   void init(MRTaskReporter mrReporter, Map<String, LogicalInput> inputs, 
Map<String, LogicalOutput> outputs)
       throws Exception {
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
     super.init(mrReporter, inputs, outputs);
 
     MapredContext.init(false, new JobConf(jconf));
@@ -241,7 +241,7 @@ public class ReduceRecordProcessor extends RecordProcessor {
       }
     }
 
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
   }
 
   private void initializeMultipleSources(ReduceWork redWork, int numTags, 
ObjectInspector[] ois,
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java
index 2dfa61b..555ecc4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java
@@ -34,8 +34,6 @@ import 
org.apache.hadoop.hive.ql.exec.vector.VectorDeserializeRow;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedBatchUtil;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx;
-import 
org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter;
-import 
org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriterFactory;
 import org.apache.hadoop.hive.ql.log.PerfLogger;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
@@ -242,7 +240,7 @@ public class ReduceRecordSource implements RecordSource {
         throw new RuntimeException("Reduce operator initialization failed", e);
       }
     }
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS);
   }
 
   public TableDesc getKeyTableDesc() {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java
index fa6160f..c026dce 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java
@@ -172,7 +172,7 @@ public class TezProcessor extends 
AbstractLogicalIOProcessor {
 
   @Override
   public void initialize() throws IOException {
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_INITIALIZE_PROCESSOR);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.TEZ_INITIALIZE_PROCESSOR);
     Configuration conf = 
TezUtils.createConfFromUserPayload(getContext().getUserPayload());
     this.jobConf = new JobConf(conf);
     this.processorContext = getContext();
@@ -181,7 +181,7 @@ public class TezProcessor extends 
AbstractLogicalIOProcessor {
       ((Hook)execCtx).initializeHook(this);
     }
     setupMRLegacyConfigs(processorContext);
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_INITIALIZE_PROCESSOR);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.TEZ_INITIALIZE_PROCESSOR);
   }
 
   private void setupMRLegacyConfigs(ProcessorContext processorContext) {
@@ -216,7 +216,7 @@ public class TezProcessor extends 
AbstractLogicalIOProcessor {
       return;
     }
 
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_RUN_PROCESSOR);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.TEZ_RUN_PROCESSOR);
     // in case of broadcast-join read the broadcast edge inputs
     // (possibly asynchronously)
 
@@ -267,7 +267,7 @@ public class TezProcessor extends 
AbstractLogicalIOProcessor {
       rproc.run();
 
       //done - output does not need to be committed as hive does not use 
outputcommitter
-      perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_RUN_PROCESSOR);
+      perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.TEZ_RUN_PROCESSOR);
     } catch (Throwable t) {
       originalThrowable = t;
     } finally {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
index b086fc0..323906c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
@@ -188,10 +188,10 @@ public class TezTask extends Task<TezWork> {
       CallerContext callerContext = CallerContext.create(
           "HIVE", queryPlan.getQueryId(), "HIVE_QUERY_ID", 
queryPlan.getQueryStr());
 
-      perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_GET_SESSION);
+      perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.TEZ_GET_SESSION);
       session = sessionRef.value = WorkloadManagerFederation.getSession(
           sessionRef.value, conf, mi, getWork().getLlapMode(), wmContext);
-      perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_GET_SESSION);
+      perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.TEZ_GET_SESSION);
 
       try {
         ss.setTezSession(session);
@@ -406,7 +406,7 @@ public class TezTask extends Task<TezWork> {
   DAG build(JobConf conf, TezWork tezWork, Path scratchDir, Context ctx,
       Map<String, LocalResource> vertexResources) throws Exception {
 
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_BUILD_DAG);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.TEZ_BUILD_DAG);
 
     // getAllWork returns a topologically sorted list, which we use to make
     // sure that vertices are created before they are used in edges.
@@ -438,7 +438,7 @@ public class TezTask extends Task<TezWork> {
     for (BaseWork workUnit: topologicalWorkList) {
 
       // translate work to vertex
-      perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_CREATE_VERTEX + 
workUnit.getName());
+      perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.TEZ_CREATE_VERTEX + 
workUnit.getName());
 
       if (workUnit instanceof UnionWork) {
         // Special case for unions. These items translate to VertexGroups
@@ -505,7 +505,7 @@ public class TezTask extends Task<TezWork> {
         } // Otherwise just leave it up to Tez to decide how much memory to 
allocate
         dag.addVertex(wx);
         utils.addCredentials(workUnit, dag);
-        perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_CREATE_VERTEX + 
workUnit.getName());
+        perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.TEZ_CREATE_VERTEX + 
workUnit.getName());
         workToVertex.put(workUnit, wx);
         workToConf.put(workUnit, wxConf);
 
@@ -522,7 +522,7 @@ public class TezTask extends Task<TezWork> {
     }
     // Clear the work map after build. TODO: remove caching instead?
     Utilities.clearWorkMap(conf);
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_BUILD_DAG);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.TEZ_BUILD_DAG);
     return dag;
   }
 
@@ -564,7 +564,7 @@ public class TezTask extends Task<TezWork> {
   }
 
   DAGClient submit(DAG dag, Ref<TezSessionState> sessionStateRef) throws 
Exception {
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_SUBMIT_DAG);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.TEZ_SUBMIT_DAG);
     DAGClient dagClient = null;
     TezSessionState sessionState = sessionStateRef.value;
     try {
@@ -598,7 +598,7 @@ public class TezTask extends Task<TezWork> {
       }
     }
 
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_SUBMIT_DAG);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.TEZ_SUBMIT_DAG);
     return new SyncDagClient(dagClient);
   }
 
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/RenderStrategy.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/RenderStrategy.java
index 7cf8ed9..b029683 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/RenderStrategy.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/RenderStrategy.java
@@ -93,17 +93,17 @@ class RenderStrategy {
            * We may have missed the start of the vertex due to the 3 seconds 
interval
            */
             if (!perfLogger.startTimeHasMethod(PerfLogger.TEZ_RUN_VERTEX + s)) 
{
-              perfLogger.PerfLogBegin(TezJobMonitor.CLASS_NAME, 
PerfLogger.TEZ_RUN_VERTEX + s);
+              perfLogger.perfLogBegin(TezJobMonitor.CLASS_NAME, 
PerfLogger.TEZ_RUN_VERTEX + s);
             }
 
             if (!perfLogger.endTimeHasMethod(PerfLogger.TEZ_RUN_VERTEX + s)) {
-              perfLogger.PerfLogEnd(TezJobMonitor.CLASS_NAME, 
PerfLogger.TEZ_RUN_VERTEX + s);
+              perfLogger.perfLogEnd(TezJobMonitor.CLASS_NAME, 
PerfLogger.TEZ_RUN_VERTEX + s);
             }
           }
           if (complete < total && (complete > 0 || running > 0 || failed > 0)) 
{
 
             if (!perfLogger.startTimeHasMethod(PerfLogger.TEZ_RUN_VERTEX + s)) 
{
-              perfLogger.PerfLogBegin(TezJobMonitor.CLASS_NAME, 
PerfLogger.TEZ_RUN_VERTEX + s);
+              perfLogger.perfLogBegin(TezJobMonitor.CLASS_NAME, 
PerfLogger.TEZ_RUN_VERTEX + s);
             }
 
           /* vertex is started, but not complete */
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezJobMonitor.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezJobMonitor.java
index 9729a7b..bfff11a 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezJobMonitor.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezJobMonitor.java
@@ -155,8 +155,8 @@ public class TezJobMonitor {
     synchronized (shutdownList) {
       shutdownList.add(dagClient);
     }
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_RUN_DAG);
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_SUBMIT_TO_RUNNING);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.TEZ_RUN_DAG);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.TEZ_SUBMIT_TO_RUNNING);
     DAGStatus.State lastState = null;
     boolean running = false;
 
@@ -218,7 +218,7 @@ public class TezJobMonitor {
               break;
             case RUNNING:
               if (!running) {
-                perfLogger.PerfLogEnd(CLASS_NAME, 
PerfLogger.TEZ_SUBMIT_TO_RUNNING);
+                perfLogger.perfLogEnd(CLASS_NAME, 
PerfLogger.TEZ_SUBMIT_TO_RUNNING);
                 console.printInfo("Status: Running (" + 
dagClient.getExecutionContext() + ")\n");
                 this.executionStartTime = System.currentTimeMillis();
                 running = true;
@@ -308,7 +308,7 @@ public class TezJobMonitor {
       }
     }
 
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_RUN_DAG);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.TEZ_RUN_DAG);
     printSummary(success, vertexProgressMap);
     return rc;
   }
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
index 1f72477..3997217 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
@@ -504,7 +504,7 @@ public class CombineHiveInputFormat<K extends 
WritableComparable, V extends Writ
   @Override
   public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException 
{
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.GET_SPLITS);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.GET_SPLITS);
     init(job);
 
     ArrayList<InputSplit> result = new ArrayList<InputSplit>();
@@ -532,7 +532,7 @@ public class CombineHiveInputFormat<K extends 
WritableComparable, V extends Writ
         }
       } catch (Exception e) {
         LOG.error("Error checking non-combinable path", e);
-        perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.GET_SPLITS);
+        perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.GET_SPLITS);
         throw new IOException(e);
       }
     }
@@ -585,7 +585,7 @@ public class CombineHiveInputFormat<K extends 
WritableComparable, V extends Writ
     }
 
     LOG.info("Number of all splits " + result.size());
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.GET_SPLITS);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.GET_SPLITS);
     return result.toArray(new InputSplit[result.size()]);
   }
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
index 3a448cf..f564ed7 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
@@ -756,7 +756,7 @@ public class HiveInputFormat<K extends WritableComparable, 
V extends Writable>
   @Override
   public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException 
{
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.GET_SPLITS);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.GET_SPLITS);
     init(job);
     Path[] dirs = getInputPaths(job);
     JobConf newjob = new JobConf(job);
@@ -853,7 +853,7 @@ public class HiveInputFormat<K extends WritableComparable, 
V extends Writable>
     if (LOG.isInfoEnabled()) {
       LOG.info("number of splits " + result.size());
     }
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.GET_SPLITS);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.GET_SPLITS);
     return result.toArray(new HiveInputSplit[result.size()]);
   }
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 062d161..054c55c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -28,7 +28,6 @@ import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
-import static 
org.apache.hadoop.hive.conf.Constants.MATERIALIZED_VIEW_REWRITING_TIME_WINDOW;
 import static 
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE;
 import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
 import static org.apache.hadoop.hive.ql.io.AcidUtils.getFullTableName;
@@ -71,13 +70,8 @@ import javax.jdo.JDODataStoreException;
 import com.google.common.collect.ImmutableList;
 
 import org.apache.calcite.plan.RelOptMaterialization;
-import org.apache.calcite.plan.hep.HepPlanner;
-import org.apache.calcite.plan.hep.HepProgramBuilder;
 import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.RelVisitor;
 import org.apache.calcite.rel.core.Project;
-import org.apache.calcite.rel.core.TableScan;
-import org.apache.calcite.rex.RexBuilder;
 import org.apache.commons.io.FilenameUtils;
 import org.apache.commons.lang3.ObjectUtils;
 import org.apache.commons.lang3.tuple.Pair;
@@ -199,7 +193,6 @@ import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
 import org.apache.hadoop.hive.ql.lockmgr.LockException;
 import org.apache.hadoop.hive.ql.log.PerfLogger;
 import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
-import 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveAugmentMaterializationRule;
 import 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils;
 import 
org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -216,7 +209,6 @@ import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hive.common.util.HiveVersionInfo;
-import org.apache.hive.common.util.TxnIdUtils;
 import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -1623,7 +1615,7 @@ public class Hive {
   public List<String> getTablesByType(String dbName, String pattern, TableType 
type)
       throws HiveException {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_TABLE);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_TABLE);
 
     if (dbName == null) {
       dbName = SessionState.get().getCurrentDatabase();
@@ -1648,7 +1640,7 @@ public class Hive {
     } catch (Exception e) {
       throw new HiveException(e);
     } finally {
-      perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_TABLE, 
"HS2-cache");
+      perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_TABLE, 
"HS2-cache");
     }
   }
 
@@ -2038,7 +2030,7 @@ public class Hive {
    */
   public Database getDatabase(String dbName) throws HiveException {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_DATABASE);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_DATABASE);
     try {
       return getMSC().getDatabase(dbName);
     } catch (NoSuchObjectException e) {
@@ -2046,7 +2038,7 @@ public class Hive {
     } catch (Exception e) {
       throw new HiveException(e);
     } finally {
-      perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_DATABASE, 
"HS2-cache");
+      perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_DATABASE, 
"HS2-cache");
     }
   }
 
@@ -2059,7 +2051,7 @@ public class Hive {
    */
   public Database getDatabase(String catName, String dbName) throws 
HiveException {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_DATABASE_2);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_DATABASE_2);
     try {
       return getMSC().getDatabase(catName, dbName);
     } catch (NoSuchObjectException e) {
@@ -2067,7 +2059,7 @@ public class Hive {
     } catch (Exception e) {
       throw new HiveException(e);
     } finally {
-      perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_DATABASE_2, 
"HS2-cache");
+      perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_DATABASE_2, 
"HS2-cache");
     }
   }
 
@@ -2132,7 +2124,7 @@ public class Hive {
                                  int stmtId, boolean isInsertOverwrite, 
boolean isDirectInsert) throws HiveException {
 
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin("MoveTask", PerfLogger.LOAD_PARTITION);
+    perfLogger.perfLogBegin("MoveTask", PerfLogger.LOAD_PARTITION);
 
     // Get the partition object if it already exists
     Partition oldPart = getPartition(tbl, partSpec, false);
@@ -2169,7 +2161,7 @@ public class Hive {
       }
     }
 
-    perfLogger.PerfLogEnd("MoveTask", PerfLogger.LOAD_PARTITION);
+    perfLogger.perfLogEnd("MoveTask", PerfLogger.LOAD_PARTITION);
 
     return newTPart;
   }
@@ -2257,7 +2249,7 @@ public class Hive {
           ? genPartPathFromTable(tbl, partSpec, tblDataLocationPath) : 
oldPartPath;
       }
 
-      perfLogger.PerfLogBegin("MoveTask", PerfLogger.FILE_MOVES);
+      perfLogger.perfLogBegin("MoveTask", PerfLogger.FILE_MOVES);
 
       // Note: the stats for ACID tables do not have any coordination with 
either Hive ACID logic
       //       like txn commits, time outs, etc.; nor the lower level sync in 
metastore pertaining
@@ -2313,7 +2305,7 @@ public class Hive {
               tbl.getNumBuckets() > 0, isFullAcidTable, isManaged, false);
         }
       }
-      perfLogger.PerfLogEnd("MoveTask", PerfLogger.FILE_MOVES);
+      perfLogger.perfLogEnd("MoveTask", PerfLogger.FILE_MOVES);
       Partition newTPart = oldPart != null ? oldPart : new Partition(tbl, 
partSpec, newPartPath);
       alterPartitionSpecInMemory(tbl, partSpec, newTPart.getTPartition(), 
inheritTableSpecs, newPartPath.toString());
       validatePartition(newTPart);
@@ -2771,7 +2763,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
       boolean isInsertOverwrite, boolean isDirectInsert) throws HiveException {
 
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS);
+    perfLogger.perfLogBegin("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS);
 
     // Get all valid partition paths and existing partitions for them (if any)
     final Table tbl = getTable(tableName);
@@ -2967,7 +2959,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
       }
       LOG.info("Loaded " + result.size() + "partitionsToAdd");
 
-      perfLogger.PerfLogEnd("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS);
+      perfLogger.perfLogEnd("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS);
 
       return result;
     } catch (TException te) {
@@ -3004,7 +2996,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
       Long writeId, int stmtId, boolean isInsertOverwrite, boolean 
isDirectInsert) throws HiveException {
 
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin("MoveTask", PerfLogger.LOAD_TABLE);
+    perfLogger.perfLogBegin("MoveTask", PerfLogger.LOAD_TABLE);
 
     List<Path> newFiles = null;
     Table tbl = getTable(tableName);
@@ -3052,7 +3044,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
       Utilities.FILE_OP_LOGGER.debug("moving " + loadPath + " to " + tblPath
           + " (replace = " + loadFileType + ")");
 
-      perfLogger.PerfLogBegin("MoveTask", PerfLogger.FILE_MOVES);
+      perfLogger.perfLogBegin("MoveTask", PerfLogger.FILE_MOVES);
 
       boolean isManaged = tbl.getTableType() == TableType.MANAGED_TABLE;
 
@@ -3073,7 +3065,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
           throw new HiveException("addFiles: filesystem error in check phase", 
e);
         }
       }
-      perfLogger.PerfLogEnd("MoveTask", PerfLogger.FILE_MOVES);
+      perfLogger.perfLogEnd("MoveTask", PerfLogger.FILE_MOVES);
     }
     if (!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
       LOG.debug("setting table statistics false for " + tbl.getDbName() + "." 
+ tbl.getTableName());
@@ -3115,7 +3107,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
       fireInsertEvent(tbl, null, (loadFileType == LoadFileType.REPLACE_ALL), 
newFiles);
     }
 
-    perfLogger.PerfLogEnd("MoveTask", PerfLogger.LOAD_TABLE);
+    perfLogger.perfLogEnd("MoveTask", PerfLogger.LOAD_TABLE);
   }
 
   /**
@@ -3592,7 +3584,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
    */
   public List<Partition> getPartitions(Table tbl) throws HiveException {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_PARTITIONS);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_PARTITIONS);
 
     try {
       if (tbl.isPartitioned()) {
@@ -3626,7 +3618,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
         return Collections.singletonList(new Partition(tbl));
       }
     } finally {
-      perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_PARTITIONS, 
"HS2-cache");
+      perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_PARTITIONS, 
"HS2-cache");
     }
   }
 
@@ -3669,7 +3661,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
       short limit)
   throws HiveException {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_PARTITIONS_2);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_PARTITIONS_2);
     try {
       if (!tbl.isPartitioned()) {
         throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, 
tbl.getTableName());
@@ -3692,7 +3684,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
       return qlPartitions;
     } finally {
-      perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_PARTITIONS_2, 
"HS2-cache");
+      perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_PARTITIONS_2, 
"HS2-cache");
     }
   }
 
@@ -3909,7 +3901,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
   public boolean getPartitionsByExpr(Table tbl, ExprNodeGenericFuncDesc expr, 
HiveConf conf,
       List<Partition> partitions) throws HiveException, TException {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, 
PerfLogger.HIVE_GET_PARTITIONS_BY_EXPR);
+    perfLogger.perfLogBegin(CLASS_NAME, 
PerfLogger.HIVE_GET_PARTITIONS_BY_EXPR);
     try {
       Preconditions.checkNotNull(partitions);
       byte[] exprBytes = 
SerializationUtilities.serializeExpressionToKryo(expr);
@@ -3932,7 +3924,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
       return hasUnknownParts;
     } finally {
-      perfLogger.PerfLogEnd(CLASS_NAME, 
PerfLogger.HIVE_GET_PARTITIONS_BY_EXPR, "HS2-cache");
+      perfLogger.perfLogEnd(CLASS_NAME, 
PerfLogger.HIVE_GET_PARTITIONS_BY_EXPR, "HS2-cache");
     }
   }
 
@@ -5288,7 +5280,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
       throws HiveException {
 
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, 
PerfLogger.HIVE_GET_TABLE_COLUMN_STATS);
+    perfLogger.perfLogBegin(CLASS_NAME, 
PerfLogger.HIVE_GET_TABLE_COLUMN_STATS);
     List<ColumnStatisticsObj> retv = null;
     try {
       if (checkTransactional) {
@@ -5305,7 +5297,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
       LOG.debug(StringUtils.stringifyException(e));
       throw new HiveException(e);
     } finally {
-      perfLogger.PerfLogEnd(CLASS_NAME, 
PerfLogger.HIVE_GET_TABLE_COLUMN_STATS, "HS2-cache");
+      perfLogger.perfLogEnd(CLASS_NAME, 
PerfLogger.HIVE_GET_TABLE_COLUMN_STATS, "HS2-cache");
     }
   }
 
@@ -5332,7 +5324,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
   public AggrStats getAggrColStatsFor(String dbName, String tblName,
      List<String> colNames, List<String> partName, boolean checkTransactional) 
{
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_AGGR_COL_STATS);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_AGGR_COL_STATS);
     String writeIdList = null;
     try {
       if (checkTransactional) {
@@ -5348,7 +5340,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
       LOG.debug(StringUtils.stringifyException(e));
       return new AggrStats(new ArrayList<ColumnStatisticsObj>(),0);
     } finally {
-      perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_AGGR_COL_STATS, 
"HS2-cache");
+      perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_AGGR_COL_STATS, 
"HS2-cache");
     }
   }
 
@@ -5719,7 +5711,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
   private PrimaryKeyInfo getPrimaryKeys(String dbName, String tblName, boolean 
onlyReliable)
       throws HiveException {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_PK);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_PK);
     try {
       List<SQLPrimaryKey> primaryKeys = getMSC().getPrimaryKeys(new 
PrimaryKeysRequest(dbName, tblName));
       if (onlyReliable && primaryKeys != null && !primaryKeys.isEmpty()) {
@@ -5732,7 +5724,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
     } catch (Exception e) {
       throw new HiveException(e);
     } finally {
-      perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_PK, "HS2-cache");
+      perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_PK, "HS2-cache");
     }
   }
 
@@ -5763,7 +5755,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
   private ForeignKeyInfo getForeignKeys(String dbName, String tblName, boolean 
onlyReliable)
       throws HiveException {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_FK);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_FK);
     try {
       List<SQLForeignKey> foreignKeys = getMSC().getForeignKeys(new 
ForeignKeysRequest(null, null, dbName, tblName));
       if (onlyReliable && foreignKeys != null && !foreignKeys.isEmpty()) {
@@ -5776,7 +5768,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
     } catch (Exception e) {
       throw new HiveException(e);
     } finally {
-      perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_FK, "HS2-cache");
+      perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_FK, "HS2-cache");
     }
   }
 
@@ -5807,7 +5799,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
   private UniqueConstraint getUniqueConstraints(String dbName, String tblName, 
boolean onlyReliable)
       throws HiveException {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_UNIQ_CONSTRAINT);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_UNIQ_CONSTRAINT);
     try {
       List<SQLUniqueConstraint> uniqueConstraints = 
getMSC().getUniqueConstraints(
               new UniqueConstraintsRequest(getDefaultCatalog(conf), dbName, 
tblName));
@@ -5821,7 +5813,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
     } catch (Exception e) {
       throw new HiveException(e);
     } finally {
-      perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_UNIQ_CONSTRAINT, 
"HS2-cache");
+      perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_UNIQ_CONSTRAINT, 
"HS2-cache");
     }
   }
 
@@ -5923,7 +5915,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
   private NotNullConstraint getNotNullConstraints(String dbName, String 
tblName, boolean onlyReliable)
       throws HiveException {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, 
PerfLogger.HIVE_GET_NOT_NULL_CONSTRAINT);
+    perfLogger.perfLogBegin(CLASS_NAME, 
PerfLogger.HIVE_GET_NOT_NULL_CONSTRAINT);
     try {
       List<SQLNotNullConstraint> notNullConstraints = 
getMSC().getNotNullConstraints(
               new NotNullConstraintsRequest(getDefaultCatalog(conf), dbName, 
tblName));
@@ -5937,7 +5929,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
     } catch (Exception e) {
       throw new HiveException(e);
     } finally {
-      perfLogger.PerfLogEnd(CLASS_NAME, 
PerfLogger.HIVE_GET_NOT_NULL_CONSTRAINT, "HS2-cache");
+      perfLogger.perfLogEnd(CLASS_NAME, 
PerfLogger.HIVE_GET_NOT_NULL_CONSTRAINT, "HS2-cache");
     }
   }
 
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
index 48180b2..512c3b6 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
@@ -167,7 +167,7 @@ public final class HiveMaterializedViewsRegistry {
       ss.setIsHiveServerQuery(true); // All is served from HS2, we do not need 
e.g. Tez sessions
       SessionState.start(ss);
       PerfLogger perfLogger = SessionState.getPerfLogger();
-      perfLogger.PerfLogBegin(CLASS_NAME, 
PerfLogger.MATERIALIZED_VIEWS_REGISTRY_REFRESH);
+      perfLogger.perfLogBegin(CLASS_NAME, 
PerfLogger.MATERIALIZED_VIEWS_REGISTRY_REFRESH);
       try {
         if (initialized.get()) {
           for (Table mvTable : db.getAllMaterializedViewObjectsForRewriting()) 
{
@@ -200,7 +200,7 @@ public final class HiveMaterializedViewsRegistry {
           LOG.error("Problem connecting to the metastore when initializing the 
view registry", e);
         }
       }
-      perfLogger.PerfLogEnd(CLASS_NAME, 
PerfLogger.MATERIALIZED_VIEWS_REGISTRY_REFRESH);
+      perfLogger.perfLogEnd(CLASS_NAME, 
PerfLogger.MATERIALIZED_VIEWS_REGISTRY_REFRESH);
     }
   }
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Transform.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Transform.java
index 6c57797..7757d13 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Transform.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Transform.java
@@ -42,15 +42,15 @@ public abstract class Transform {
   
   public void beginPerfLogging() {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
+    perfLogger.perfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
   }
 
   public void endPerfLogging() {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER);
+    perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER);
   }
   public void endPerfLogging(String additionalInfo) {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-       perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER, 
additionalInfo);
+       perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER, 
additionalInfo);
   }  
 }
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
index 709b221..cb35ddd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
@@ -450,7 +450,7 @@ public class PartitionPruner extends Transform {
       boolean hasUnknownPartitions = false;
       PerfLogger perfLogger = SessionState.getPerfLogger();
       if (!doEvalClientSide) {
-        perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING);
+        perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING);
         try {
           hasUnknownPartitions = Hive.get().getPartitionsByExpr(
               tab, compactExpr, conf, partitions);
@@ -459,7 +459,7 @@ public class PartitionPruner extends Transform {
           LOG.warn("Metastore doesn't support getPartitionsByExpr", ime);
           doEvalClientSide = true;
         } finally {
-          perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING);
+          perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING);
         }
       }
       if (doEvalClientSide) {
@@ -482,9 +482,9 @@ public class PartitionPruner extends Transform {
 
   private static Set<Partition> getAllPartitions(Table tab) throws 
HiveException {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING);
     Set<Partition> result = Hive.get().getAllPartitionsOf(tab);
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING);
     return result;
   }
 
@@ -500,7 +500,7 @@ public class PartitionPruner extends Transform {
   static private boolean pruneBySequentialScan(Table tab, List<Partition> 
partitions,
       ExprNodeGenericFuncDesc prunerExpr, HiveConf conf) throws HiveException, 
MetaException {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PRUNE_LISTING);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.PRUNE_LISTING);
 
     List<String> partNames = Hive.get().getPartitionNames(
         tab.getDbName(), tab.getTableName(), (short) -1);
@@ -511,13 +511,13 @@ public class PartitionPruner extends Transform {
 
     boolean hasUnknownPartitions = prunePartitionNames(
         partCols, partColTypeInfos, prunerExpr, defaultPartitionName, 
partNames);
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PRUNE_LISTING);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.PRUNE_LISTING);
 
-    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING);
+    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING);
     if (!partNames.isEmpty()) {
       partitions.addAll(Hive.get().getPartitionsByNames(tab, partNames));
     }
-    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING);
+    perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING);
     return hasUnknownPartitions;
   }
 
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 3797971..a089340 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -1858,7 +1858,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
 
       PerfLogger perfLogger = SessionState.getPerfLogger();
       // 1. Gen Calcite Plan
-      perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
+      perfLogger.perfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
       try {
         calciteGenPlan = genLogicalPlan(getQB(), true, null, null);
         // if it is to create view, we do not use table alias
@@ -1869,7 +1869,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
         semanticException = e;
         throw new RuntimeException(e);
       }
-      perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER, 
"Calcite: Plan generation");
+      perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER, 
"Calcite: Plan generation");
 
       // Create executor
       RexExecutor executorProvider = new HiveRexExecutorImpl();
@@ -2145,9 +2145,9 @@ public class CalcitePlanner extends SemanticAnalyzer {
       }
 
       // Trigger program
-      perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
+      perfLogger.perfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
       basePlan = executeProgram(basePlan, program.build(), mdProvider, 
executorProvider);
-      perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER,
+      perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER,
           "Calcite: Prejoin ordering transformation");
 
       return basePlan;
@@ -2214,7 +2214,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
         return calcitePreMVRewritingPlan;
       }
 
-      perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
+      perfLogger.perfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
 
       // We need to expand IN/BETWEEN expressions when materialized view 
rewriting
       // is triggered since otherwise this may prevent some rewritings from 
happening
@@ -2271,7 +2271,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
         
RelMetadataQuery.THREAD_PROVIDERS.set(JaninoRelMetadataProvider.of(mdProvider));
       }
 
-      perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER, 
"Calcite: View-based rewriting");
+      perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER, 
"Calcite: View-based rewriting");
 
       List<Table> materializedViewsUsedOriginalPlan = 
getMaterializedViewsUsed(calcitePreMVRewritingPlan);
       List<Table> materializedViewsUsedAfterRewrite = 
getMaterializedViewsUsed(basePlan);
@@ -2376,7 +2376,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
       generatePartialProgram(program, false, HepMatchOrder.BOTTOM_UP,
           new JoinToMultiJoinRule(HiveJoin.class), new 
LoptOptimizeJoinRule(HiveRelFactories.HIVE_BUILDER));
 
-      perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
+      perfLogger.perfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
       RelNode calciteOptimizedPlan;
       try {
         calciteOptimizedPlan = executeProgram(basePlan, program.build(), 
mdProvider, executorProvider);
@@ -2390,7 +2390,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
           throw e;
         }
       }
-      perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER, 
"Calcite: Join Reordering");
+      perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER, 
"Calcite: Join Reordering");
 
       return calciteOptimizedPlan;
     }
@@ -2532,9 +2532,9 @@ public class CalcitePlanner extends SemanticAnalyzer {
       }
 
       // Trigger program
-      perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
+      perfLogger.perfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
       basePlan = executeProgram(basePlan, program.build(), mdProvider, 
executorProvider);
-      perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER,
+      perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER,
           "Calcite: Postjoin ordering transformation");
 
       return basePlan;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
index 7665d5e..52732ff 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
@@ -175,14 +175,14 @@ public class TezCompiler extends TaskCompiler {
     // Create the context for the walker
     OptimizeTezProcContext procCtx = new OptimizeTezProcContext(conf, pCtx, 
inputs, outputs);
 
-    perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
+    perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
     runTopNKeyOptimization(procCtx);
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"Run top n key optimization");
+    perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"Run top n key optimization");
 
-    perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
+    perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
     // setup dynamic partition pruning where possible
     runDynamicPartitionPruning(procCtx, inputs, outputs);
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"Setup dynamic partition pruning");
+    perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"Setup dynamic partition pruning");
 
     
if(procCtx.conf.getBoolVar(ConfVars.TEZ_DYNAMIC_SEMIJOIN_REDUCTION_MULTICOLUMN))
 {
       SemiJoinReductionMerge sjmerge = new SemiJoinReductionMerge();
@@ -196,52 +196,52 @@ public class TezCompiler extends TaskCompiler {
       new 
ConstantPropagate(ConstantPropagateOption.SHORTCUT).transform(procCtx.parseContext);
     }
 
-    perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
+    perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
     // setup stats in the operator plan
     runStatsAnnotation(procCtx);
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"Setup stats in the operator plan");
+    perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"Setup stats in the operator plan");
 
     // run Sorted dynamic partition optimization
     if(HiveConf.getBoolVar(procCtx.conf, 
HiveConf.ConfVars.DYNAMICPARTITIONING) &&
         HiveConf.getVar(procCtx.conf, 
HiveConf.ConfVars.DYNAMICPARTITIONINGMODE).equals("nonstrict") &&
         !HiveConf.getBoolVar(procCtx.conf, 
HiveConf.ConfVars.HIVEOPTLISTBUCKETING)) {
-      perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
+      perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
       new SortedDynPartitionOptimizer().transform(procCtx.parseContext);
-      perfLogger.PerfLogEnd(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER, "Sorted dynamic partition optimization");
+      perfLogger.perfLogEnd(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER, "Sorted dynamic partition optimization");
     }
 
     if(HiveConf.getBoolVar(procCtx.conf, 
HiveConf.ConfVars.HIVEOPTREDUCEDEDUPLICATION)) {
-      perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
+      perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
       // Dynamic sort partition adds an extra RS therefore need to de-dup
       new ReduceSinkDeDuplication().transform(procCtx.parseContext);
       // there is an issue with dedup logic wherein SELECT is created with 
wrong columns
       // NonBlockingOpDeDupProc fixes that
       // (kind of hackish, the issue in de-dup should be fixed but it needs 
more investigation)
       new NonBlockingOpDeDupProc().transform(procCtx.parseContext);
-      perfLogger.PerfLogEnd(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER, "Reduce Sink de-duplication");
+      perfLogger.perfLogEnd(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER, "Reduce Sink de-duplication");
     }
 
-    perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
+    perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
     // run the optimizations that use stats for optimization
     runStatsDependentOptimizations(procCtx, inputs, outputs);
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"Run the optimizations that use stats for optimization");
+    perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"Run the optimizations that use stats for optimization");
 
     // repopulate bucket versions; join conversion may have created some new 
reducesinks
     new BucketVersionPopulator().transform(pCtx);
 
-    perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
+    perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
     if(procCtx.conf.getBoolVar(ConfVars.HIVEOPTJOINREDUCEDEDUPLICATION)) {
       new ReduceSinkJoinDeDuplication().transform(procCtx.parseContext);
     }
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"Run reduce sink after join algorithm selection");
+    perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"Run reduce sink after join algorithm selection");
 
     semijoinRemovalBasedTransformations(procCtx, inputs, outputs);
 
-    perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
+    perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
     if(procCtx.conf.getBoolVar(ConfVars.HIVE_SHARED_WORK_OPTIMIZATION)) {
       new SharedWorkOptimizer().transform(procCtx.parseContext);
     }
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"Shared scans optimization");
+    perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"Shared scans optimization");
 
     // need a new run of the constant folding because we might have created 
lots
     // of "and true and true" conditions.
@@ -251,10 +251,10 @@ public class TezCompiler extends TaskCompiler {
       new 
ConstantPropagate(ConstantPropagateOption.SHORTCUT).transform(procCtx.parseContext);
     }
 
-    perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
+    perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
     AuxOpTreeSignature.linkAuxSignatures(procCtx.parseContext);
     markOperatorsWithUnstableRuntimeStats(procCtx);
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"markOperatorsWithUnstableRuntimeStats");
+    perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"markOperatorsWithUnstableRuntimeStats");
 
     if (procCtx.conf.getBoolVar(ConfVars.HIVE_IN_TEST)) {
       bucketingVersionSanityCheck(procCtx);
@@ -504,58 +504,58 @@ public class TezCompiler extends TaskCompiler {
     final boolean extendedReductionEnabled = dynamicPartitionPruningEnabled &&
         
procCtx.conf.getBoolVar(ConfVars.TEZ_DYNAMIC_PARTITION_PRUNING_EXTENDED);
 
-    perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
+    perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
     if (dynamicPartitionPruningEnabled) {
       runRemoveDynamicPruningOptimization(procCtx, inputs, outputs);
     }
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"Run remove dynamic pruning by size");
+    perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"Run remove dynamic pruning by size");
 
     if (semiJoinReductionEnabled) {
-      perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
+      perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
       markSemiJoinForDPP(procCtx);
-      perfLogger.PerfLogEnd(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER, "Mark certain semijoin edges important based ");
+      perfLogger.perfLogEnd(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER, "Mark certain semijoin edges important based ");
 
       // Remove any semi join edges from Union Op
-      perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
+      perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
       removeSemiJoinEdgesForUnion(procCtx);
-      perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER,
+      perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER,
                             "Remove any semi join edge between Union and RS");
 
       // Remove any parallel edge between semijoin and mapjoin.
-      perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
+      perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
       removeSemijoinsParallelToMapJoin(procCtx);
-      perfLogger.PerfLogEnd(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER, "Remove any parallel edge between semijoin and 
mapjoin");
+      perfLogger.perfLogEnd(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER, "Remove any parallel edge between semijoin and 
mapjoin");
 
       // Remove semijoin optimization if SMB join is created.
-      perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
+      perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
       removeSemijoinOptimizationFromSMBJoins(procCtx);
-      perfLogger.PerfLogEnd(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER, "Remove semijoin optimizations if needed");
+      perfLogger.perfLogEnd(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER, "Remove semijoin optimizations if needed");
 
       // Remove bloomfilter if no stats generated
-      perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
+      perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
       removeSemiJoinIfNoStats(procCtx);
-      perfLogger.PerfLogEnd(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER, "Remove bloom filter optimizations if needed");
+      perfLogger.perfLogEnd(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER, "Remove bloom filter optimizations if needed");
 
       // Removing semijoin optimization when it may not be beneficial
-      perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
+      perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
       removeSemijoinOptimizationByBenefit(procCtx);
-      perfLogger.PerfLogEnd(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER, "Remove Semijoins based on cost benefits");
+      perfLogger.perfLogEnd(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER, "Remove Semijoins based on cost benefits");
     }
 
     // after the stats phase we might have some cyclic dependencies that we 
need
     // to take care of.
-    perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
+    perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
     if (dynamicPartitionPruningEnabled) {
       runCycleAnalysisForPartitionPruning(procCtx, inputs, outputs);
     }
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"Run cycle analysis for partition pruning");
+    perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"Run cycle analysis for partition pruning");
 
     // remove redundant dpp and semijoins
-    perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
+    perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
     if (extendedReductionEnabled) {
       removeRedundantSemijoinAndDpp(procCtx);
     }
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"Remove redundant semijoin reduction");
+    perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"Remove redundant semijoin reduction");
   }
 
   private void runRemoveDynamicPruningOptimization(OptimizeTezProcContext 
procCtx,
@@ -613,7 +613,7 @@ public class TezCompiler extends TaskCompiler {
       throws SemanticException {
 
        PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
+    perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
     ParseContext tempParseContext = getParseContext(pCtx, rootTasks);
     GenTezUtils utils = new GenTezUtils();
     GenTezWork genTezWork = new GenTezWork(utils);
@@ -697,7 +697,7 @@ public class TezCompiler extends TaskCompiler {
       LOG.debug("Handling AppMasterEventOperator: " + event);
       GenTezUtils.processAppMasterEvent(procCtx, event);
     }
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"generateTaskTree");
+    perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"generateTaskTree");
   }
 
   @Override
@@ -759,7 +759,7 @@ public class TezCompiler extends TaskCompiler {
   protected void optimizeTaskPlan(List<Task<?>> rootTasks, ParseContext pCtx,
       Context ctx) throws SemanticException {
     PerfLogger perfLogger = SessionState.getPerfLogger();
-    perfLogger.PerfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
+    perfLogger.perfLogBegin(this.getClass().getName(), 
PerfLogger.TEZ_COMPILER);
     PhysicalContext physicalCtx = new PhysicalContext(conf, pCtx, 
pCtx.getContext(), rootTasks,
        pCtx.getFetchTask());
 
@@ -821,7 +821,7 @@ public class TezCompiler extends TaskCompiler {
       new AnnotateRunTimeStatsOptimizer().resolve(physicalCtx);
     }
 
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"optimizeTaskPlan");
+    perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, 
"optimizeTaskPlan");
     return;
   }
 
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkCompiler.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkCompiler.java
index 7e0c9d0..f56f1f6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkCompiler.java
@@ -109,7 +109,7 @@ public class SparkCompiler extends TaskCompiler {
   @Override
   protected void optimizeOperatorPlan(ParseContext pCtx, Set<ReadEntity> 
inputs,
       Set<WriteEntity> outputs) throws SemanticException {
-    PERF_LOGGER.PerfLogBegin(CLASS_NAME, 
PerfLogger.SPARK_OPTIMIZE_OPERATOR_TREE);
+    PERF_LOGGER.perfLogBegin(CLASS_NAME, 
PerfLogger.SPARK_OPTIMIZE_OPERATOR_TREE);
 
     OptimizeSparkProcContext procCtx = new OptimizeSparkProcContext(conf, 
pCtx, inputs, outputs);
 
@@ -145,7 +145,7 @@ public class SparkCompiler extends TaskCompiler {
       new 
ConstantPropagate(ConstantPropagateProcCtx.ConstantPropagateOption.SHORTCUT).transform(pCtx);
     }
 
-    PERF_LOGGER.PerfLogEnd(CLASS_NAME, 
PerfLogger.SPARK_OPTIMIZE_OPERATOR_TREE);
+    PERF_LOGGER.perfLogEnd(CLASS_NAME, 
PerfLogger.SPARK_OPTIMIZE_OPERATOR_TREE);
   }
 
   private void runRemoveDynamicPruning(OptimizeSparkProcContext procCtx) 
throws SemanticException {
@@ -352,7 +352,7 @@ public class SparkCompiler extends TaskCompiler {
   protected void generateTaskTree(List<Task<?>> rootTasks, ParseContext pCtx,
       List<Task<MoveWork>> mvTask, Set<ReadEntity> inputs, Set<WriteEntity> 
outputs)
       throws SemanticException {
-    PERF_LOGGER.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_GENERATE_TASK_TREE);
+    PERF_LOGGER.perfLogBegin(CLASS_NAME, PerfLogger.SPARK_GENERATE_TASK_TREE);
 
     GenSparkUtils utils = GenSparkUtils.getUtils();
     utils.resetSequenceNumber();
@@ -421,7 +421,7 @@ public class SparkCompiler extends TaskCompiler {
       utils.processPartitionPruningSink(procCtx, 
(SparkPartitionPruningSinkOperator) prunerSink);
     }
 
-    PERF_LOGGER.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_GENERATE_TASK_TREE);
+    PERF_LOGGER.perfLogEnd(CLASS_NAME, PerfLogger.SPARK_GENERATE_TASK_TREE);
   }
 
   private void generateTaskTreeHelper(GenSparkProcContext procCtx, List<Node> 
topNodes)
@@ -564,7 +564,7 @@ public class SparkCompiler extends TaskCompiler {
   @Override
   protected void optimizeTaskPlan(List<Task<?>> rootTasks, ParseContext pCtx,
       Context ctx) throws SemanticException {
-    PERF_LOGGER.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_OPTIMIZE_TASK_TREE);
+    PERF_LOGGER.perfLogBegin(CLASS_NAME, PerfLogger.SPARK_OPTIMIZE_TASK_TREE);
     PhysicalContext physicalCtx = new PhysicalContext(conf, pCtx, 
pCtx.getContext(), rootTasks,
        pCtx.getFetchTask());
 
@@ -622,7 +622,7 @@ public class SparkCompiler extends TaskCompiler {
       new AnnotateRunTimeStatsOptimizer().resolve(physicalCtx);
     }
 
-    PERF_LOGGER.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_OPTIMIZE_TASK_TREE);
+    PERF_LOGGER.perfLogEnd(CLASS_NAME, PerfLogger.SPARK_OPTIMIZE_TASK_TREE);
     return;
   }
 }
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveProtoLoggingHook.java 
b/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveProtoLoggingHook.java
index 71de23b..4582837 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveProtoLoggingHook.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveProtoLoggingHook.java
@@ -209,8 +209,8 @@ public class TestHiveProtoLoggingHook {
   @Test
   public void testPostEventLog() throws Exception {
     context.setHookType(HookType.POST_EXEC_HOOK);
-    context.getPerfLogger().PerfLogBegin("test", "LogTest");
-    context.getPerfLogger().PerfLogEnd("test", "LogTest");
+    context.getPerfLogger().perfLogBegin("test", "LogTest");
+    context.getPerfLogger().perfLogEnd("test", "LogTest");
 
     EventLogger evtLogger = new EventLogger(conf, SystemClock.getInstance());
     evtLogger.handle(context);
diff --git 
a/standalone-metastore/metastore-common/spotbugs/spotbugs-exclude.xml 
b/standalone-metastore/metastore-common/spotbugs/spotbugs-exclude.xml
index 446c91a..e2c76d0 100644
--- a/standalone-metastore/metastore-common/spotbugs/spotbugs-exclude.xml
+++ b/standalone-metastore/metastore-common/spotbugs/spotbugs-exclude.xml
@@ -21,7 +21,4 @@
     <Match>
         <Class name="~org.apache.hadoop.hive.metastore.api.*" />
     </Match>
-    <Match>
-        <Class name="~org.apache.hadoop.hive.metastore.metrics.*" />
-    </Match>
 </FindBugsFilter>
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
index 86945f3..69a158a 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
@@ -103,7 +103,7 @@ public class RetryingHMSHandler implements 
InvocationHandler {
     int threadId = baseHandler.getThreadId();
     boolean error = true;
     PerfLogger perfLogger = PerfLogger.getPerfLogger(false);
-    perfLogger.PerfLogBegin(CLASS_NAME, method.getName());
+    perfLogger.perfLogBegin(CLASS_NAME, method.getName());
     try {
       Result result = invokeInternal(proxy, method, args);
       retryCount = result.numRetries;
@@ -113,7 +113,7 @@ public class RetryingHMSHandler implements 
InvocationHandler {
       StringBuilder additionalInfo = new StringBuilder();
       additionalInfo.append("threadId=").append(threadId).append(" 
retryCount=").append(retryCount)
         .append(" error=").append(error);
-      perfLogger.PerfLogEnd(CLASS_NAME, method.getName(), 
additionalInfo.toString());
+      perfLogger.perfLogEnd(CLASS_NAME, method.getName(), 
additionalInfo.toString());
     }
   }
 
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/JsonReporter.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/JsonReporter.java
index e198fbc..ee5aef4 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/JsonReporter.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/JsonReporter.java
@@ -37,6 +37,7 @@ import org.slf4j.LoggerFactory;
 import java.io.BufferedWriter;
 import java.io.FileWriter;
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Paths;
@@ -156,7 +157,7 @@ public class JsonReporter extends ScheduledReporter {
     // Use try .. finally to cleanup temp file if something goes wrong
     try {
       // Write json to the temp file
-      try (BufferedWriter bw = new BufferedWriter(new 
FileWriter(tmpFile.toFile()))) {
+      try (BufferedWriter bw = Files.newBufferedWriter(tmpFile, 
StandardCharsets.UTF_8)) {
         bw.write(json);
       } catch (IOException e) {
         LOG.error("Unable to write to temp file {}" + tmpFile, e);
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/Metrics.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/Metrics.java
index 4dc7aa6..b47bb4a 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/Metrics.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/Metrics.java
@@ -75,7 +75,7 @@ public class Metrics {
     return self.registry;
   }
 
-  public static void shutdown() {
+  public static synchronized void shutdown() {
     if (self != null) {
       for (ScheduledReporter reporter : self.scheduledReporters) {
         reporter.stop();
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/PerfLogger.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/PerfLogger.java
index 5465187..aeede4e 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/PerfLogger.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/PerfLogger.java
@@ -88,7 +88,7 @@ public class PerfLogger {
    * @param callerName the logging object to be used.
    * @param method method or ID that identifies this perf log element.
    */
-  public void PerfLogBegin(String callerName, String method) {
+  public void perfLogBegin(String callerName, String method) {
     long startTime = System.currentTimeMillis();
     startTimes.put(method, Long.valueOf(startTime));
     if (LOG.isDebugEnabled()) {
@@ -102,8 +102,8 @@ public class PerfLogger {
    * @param method
    * @return long duration  the difference between now and startTime, or -1 if 
startTime is null
    */
-  public long PerfLogEnd(String callerName, String method) {
-    return PerfLogEnd(callerName, method, null);
+  public long perfLogEnd(String callerName, String method) {
+    return perfLogEnd(callerName, method, null);
   }
 
   /**
@@ -112,7 +112,7 @@ public class PerfLogger {
    * @param method
    * @return long duration  the difference between now and startTime, or -1 if 
startTime is null
    */
-  public long PerfLogEnd(String callerName, String method, String 
additionalInfo) {
+  public long perfLogEnd(String callerName, String method, String 
additionalInfo) {
     Long startTime = startTimes.get(method);
     long endTime = System.currentTimeMillis();
     endTimes.put(method, Long.valueOf(endTime));
diff --git a/standalone-metastore/spotbugs/spotbugs-exclude.xml 
b/standalone-metastore/spotbugs/spotbugs-exclude.xml
index b716039..da4b99b 100644
--- a/standalone-metastore/spotbugs/spotbugs-exclude.xml
+++ b/standalone-metastore/spotbugs/spotbugs-exclude.xml
@@ -27,7 +27,4 @@
             <Class name="~org.apache.hadoop.hive.metastore.Metastore.*" />
         </Or>
     </Match>
-    <Match>
-        <Class name="~org.apache.hadoop.hive.metastore.metrics.*" />
-    </Match>
 </FindBugsFilter>

Reply via email to