http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
index a1dd2d0..7ff0453 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
@@ -625,7 +625,7 @@ public class FSHLog extends AbstractFSWAL<Writer> {
     } finally {
       rollWriterLock.unlock();
     }
-    if (lowReplication || writer != null && writer.getLength() > logrollsize) {
+    if (lowReplication || (writer != null && writer.getLength() > 
logrollsize)) {
       requestLogRoll(lowReplication);
     }
   }
@@ -866,7 +866,7 @@ public class FSHLog extends AbstractFSWAL<Writer> {
     private final SyncFuture[] syncFutures;
     // Had 'interesting' issues when this was non-volatile. On occasion, we'd 
not pass all
     // syncFutures to the next sync'ing thread.
-    private volatile int syncFuturesCount = 0;
+    private AtomicInteger syncFuturesCount = new AtomicInteger();
     private volatile SafePointZigZagLatch zigzagLatch;
     /**
      * Set if we get an exception appending or syncing so that all subsequence 
appends and syncs on
@@ -894,10 +894,10 @@ public class FSHLog extends AbstractFSWAL<Writer> {
 
     private void cleanupOutstandingSyncsOnException(final long sequence, final 
Exception e) {
       // There could be handler-count syncFutures outstanding.
-      for (int i = 0; i < this.syncFuturesCount; i++) {
+      for (int i = 0; i < this.syncFuturesCount.get(); i++) {
         this.syncFutures[i].done(sequence, e);
       }
-      this.syncFuturesCount = 0;
+      this.syncFuturesCount.set(0);
     }
 
     /**
@@ -905,7 +905,7 @@ public class FSHLog extends AbstractFSWAL<Writer> {
      */
     private boolean isOutstandingSyncs() {
       // Look at SyncFutures in the EventHandler
-      for (int i = 0; i < this.syncFuturesCount; i++) {
+      for (int i = 0; i < this.syncFuturesCount.get(); i++) {
         if (!this.syncFutures[i].isDone()) {
           return true;
         }
@@ -938,9 +938,9 @@ public class FSHLog extends AbstractFSWAL<Writer> {
 
       try {
         if (truck.type() == RingBufferTruck.Type.SYNC) {
-          this.syncFutures[this.syncFuturesCount++] = truck.unloadSync();
+          this.syncFutures[this.syncFuturesCount.getAndIncrement()] = 
truck.unloadSync();
           // Force flush of syncs if we are carrying a full complement of 
syncFutures.
-          if (this.syncFuturesCount == this.syncFutures.length) {
+          if (this.syncFuturesCount.get() == this.syncFutures.length) {
             endOfBatch = true;
           }
         } else if (truck.type() == RingBufferTruck.Type.APPEND) {
@@ -979,7 +979,7 @@ public class FSHLog extends AbstractFSWAL<Writer> {
         if (this.exception == null) {
           // If not a batch, return to consume more events from the ring 
buffer before proceeding;
           // we want to get up a batch of syncs and appends before we go do a 
filesystem sync.
-          if (!endOfBatch || this.syncFuturesCount <= 0) {
+          if (!endOfBatch || this.syncFuturesCount.get() <= 0) {
             return;
           }
           // syncRunnerIndex is bound to the range [0, Integer.MAX_INT - 1] as 
follows:
@@ -997,7 +997,7 @@ public class FSHLog extends AbstractFSWAL<Writer> {
             // Below expects that the offer 'transfers' responsibility for the 
outstanding syncs to
             // the syncRunner. We should never get an exception in here.
             this.syncRunners[this.syncRunnerIndex].offer(sequence, 
this.syncFutures,
-              this.syncFuturesCount);
+              this.syncFuturesCount.get());
           } catch (Exception e) {
             // Should NEVER get here.
             requestLogRoll();
@@ -1010,7 +1010,7 @@ public class FSHLog extends AbstractFSWAL<Writer> {
               ? this.exception : new DamagedWALException("On sync", 
this.exception));
         }
         attainSafePoint(sequence);
-        this.syncFuturesCount = 0;
+        this.syncFuturesCount.set(0);
       } catch (Throwable t) {
         LOG.error("UNEXPECTED!!! syncFutures.length=" + 
this.syncFutures.length, t);
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
index 6efd7bc..1ffe7f6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
@@ -85,6 +85,7 @@ class FSWALEntry extends Entry {
     }
   }
 
+  @Override
   public String toString() {
     return "sequence=" + this.txid + ", " + super.toString();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
index ebb6079..5d8d8c0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
@@ -102,10 +102,12 @@ public class ProtobufLogReader extends ReaderBase {
   public long trailerSize() {
     if (trailerPresent) {
       // sizeof PB_WAL_COMPLETE_MAGIC + sizof trailerSize + trailer
-      final long calculatedSize = PB_WAL_COMPLETE_MAGIC.length + 
Bytes.SIZEOF_INT + trailer.getSerializedSize();
+      final long calculatedSize = (long) PB_WAL_COMPLETE_MAGIC.length + 
Bytes.SIZEOF_INT
+          + trailer.getSerializedSize();
       final long expectedSize = fileLength - walEditsStopOffset;
       if (expectedSize != calculatedSize) {
-        LOG.warn("After parsing the trailer, we expect the total footer to be 
"+ expectedSize +" bytes, but we calculate it as being " + calculatedSize);
+        LOG.warn("After parsing the trailer, we expect the total footer to be 
{} bytes, but we "
+            + "calculate it as being {}", expectedSize, calculatedSize);
       }
       return expectedSize;
     } else {

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java
index 7b91bee..56576a6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java
@@ -84,7 +84,7 @@ public abstract class BaseReplicationEndpoint extends 
AbstractService
         for (String filterName : filterNames) {
           try {
             Class<?> clazz = Class.forName(filterName);
-            filters.add((WALEntryFilter) clazz.newInstance());
+            filters.add((WALEntryFilter) 
clazz.getDeclaredConstructor().newInstance());
           } catch (Exception e) {
             LOG.error("Unable to create WALEntryFilter " + filterName, e);
           }

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java
index b28c58f..8a4d331 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java
@@ -35,7 +35,7 @@ public class DefaultSourceFSConfigurationProvider implements 
SourceFSConfigurati
       LoggerFactory.getLogger(DefaultSourceFSConfigurationProvider.class);
 
   // Map containing all the source clusters configurations against their 
replication cluster id
-  private Map<String, Configuration> sourceClustersConfs = new HashMap<>();
+  private final Map<String, Configuration> sourceClustersConfs = new 
HashMap<>();
   private static final String XML = ".xml";
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
index d1a3266..2fa5a9b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
@@ -105,6 +105,7 @@ public class Replication implements
   public Replication() {
   }
 
+  @Override
   public void initialize(Server server, FileSystem fs, Path logDir, Path 
oldLogDir,
       WALFileLengthProvider walFileLengthProvider) throws IOException {
     this.server = server;
@@ -164,6 +165,7 @@ public class Replication implements
    /*
     * Returns an object to listen to new wal changes
     **/
+  @Override
   public WALActionsListener getWALActionsListener() {
     return this;
   }
@@ -176,6 +178,7 @@ public class Replication implements
   /**
    * Stops replication service.
    */
+  @Override
   public void stopReplicationService() {
     join();
   }
@@ -204,6 +207,7 @@ public class Replication implements
    * @param sourceHFileArchiveDirPath Path that point to the source cluster 
hfile archive directory
    * @throws IOException
    */
+  @Override
   public void replicateLogEntries(List<WALEntry> entries, CellScanner cells,
       String replicationClusterId, String sourceBaseNamespaceDirPath,
       String sourceHFileArchiveDirPath) throws IOException {
@@ -216,6 +220,7 @@ public class Replication implements
    * it starts
    * @throws IOException
    */
+  @Override
   public void startReplicationService() throws IOException {
     this.replicationManager.init();
     this.replicationSink = new ReplicationSink(this.conf, this.server);

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
index 57e185a..902971e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
@@ -110,7 +110,7 @@ public class ReplicationSink {
     try {
       @SuppressWarnings("rawtypes")
       Class c = Class.forName(className);
-      this.provider = (SourceFSConfigurationProvider) c.newInstance();
+      this.provider = (SourceFSConfigurationProvider) 
c.getDeclaredConstructor().newInstance();
     } catch (Exception e) {
       throw new IllegalArgumentException("Configured source fs configuration 
provider class "
           + className + " throws error.", e);
@@ -123,7 +123,7 @@ public class ReplicationSink {
     WALEntrySinkFilter filter = null;
     try {
       filter = walEntryFilterClass == null? null:
-          (WALEntrySinkFilter)walEntryFilterClass.newInstance();
+          
(WALEntrySinkFilter)walEntryFilterClass.getDeclaredConstructor().newInstance();
     } catch (Exception e) {
       LOG.warn("Failed to instantiate " + walEntryFilterClass);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 923d893..09b6cc1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -63,7 +63,6 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
-
 /**
  * Class that handles the source of a replication stream.
  * Currently does not handle more than 1 slave
@@ -520,7 +519,9 @@ public class ReplicationSource implements 
ReplicationSourceInterface {
   public Path getCurrentPath() {
     // only for testing
     for (ReplicationSourceShipper worker : workerThreads.values()) {
-      if (worker.getCurrentPath() != null) return worker.getCurrentPath();
+      if (worker.getCurrentPath() != null) {
+        return worker.getCurrentPath();
+      }
     }
     return null;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index e087127..ab86d7c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -137,7 +137,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   private final Configuration conf;
   private final FileSystem fs;
   // The paths to the latest log of each wal group, for new coming peers
-  private Set<Path> latestPaths;
+  private final Set<Path> latestPaths;
   // Path to the wals directories
   private final Path logDir;
   // Path to the wal archive

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java
index 90a421d..c12dcb6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java
@@ -355,7 +355,8 @@ public class ReplicationSourceWALReader extends Thread {
           List<StoreDescriptor> stores = bld.getStoresList();
           int totalStores = stores.size();
           for (int j = 0; j < totalStores; j++) {
-            totalStoreFilesSize += stores.get(j).getStoreFileSizeBytes();
+            totalStoreFilesSize =
+                (int) (totalStoreFilesSize + 
stores.get(j).getStoreFileSizeBytes());
           }
         } catch (IOException e) {
           LOG.error("Failed to deserialize bulk load entry from wal edit. "

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java
index e0427ce..0797561 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java
@@ -154,6 +154,7 @@ class AccessControlFilter extends FilterBase {
   /**
    * @return The filter serialized using pb
    */
+  @Override
   public byte [] toByteArray() {
     // no implementation, server-side use only
     throw new UnsupportedOperationException(

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 60842be..6acc133 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -2187,8 +2187,8 @@ public class AccessController implements 
MasterCoprocessor, RegionCoprocessor,
           // Also using acl as table name to be inline  with the results of 
global admin and will
           // help in avoiding any leakage of information about being 
superusers.
           for (String user: Superusers.getSuperUsers()) {
-            perms.add(new UserPermission(user.getBytes(), 
AccessControlLists.ACL_TABLE_NAME, null,
-                Action.values()));
+            perms.add(new UserPermission(Bytes.toBytes(user), 
AccessControlLists.ACL_TABLE_NAME,
+                null, Action.values()));
           }
         }
         response = AccessControlUtil.buildGetUserPermissionsResponse(perms);

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java
index 1949b98..cecca41 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java
@@ -207,6 +207,7 @@ public class AuthResult {
     return sb.toString();
   }
 
+  @Override
   public String toString() {
     return "AuthResult" + toContextString();
   }
@@ -279,6 +280,7 @@ public class AuthResult {
       return this;
     }
 
+    @Override
     public String toString() {
       String familiesString = toFamiliesString(families, family, qualifier);
       String[] params = new String[] {

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java
index de8ea5d..59b91a0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java
@@ -330,6 +330,7 @@ public class AuthenticationTokenSecretManager
       interrupt();
     }
 
+    @Override
     public void run() {
       zkLeader.start();
       zkLeader.waitToBecomeLeader();

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java
index 0bc74b1..c2dd046 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.security.visibility;
 
+import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Stack;
@@ -103,7 +104,8 @@ public class ExpressionParser {
             }
             index++;
           } while (index < endPos && !isEndOfLabel(exp[index]));
-          leafExp = new String(exp, labelOffset, index - labelOffset).trim();
+          leafExp =
+              new String(exp, labelOffset, index - labelOffset, 
StandardCharsets.UTF_8).trim();
           if (leafExp.isEmpty()) {
             throw new ParseException("Error parsing expression " + expS + " at 
column : " + index);
           }

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
index 1ba6029..6e00f40 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
@@ -36,6 +36,7 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Optional;
 
 import org.apache.hadoop.conf.Configuration;
@@ -753,8 +754,9 @@ public class VisibilityController implements 
MasterCoprocessor, RegionCoprocesso
           logResult(true, "addLabels", "Adding labels allowed", null, labels, 
null);
           int i = 0;
           for (OperationStatus status : opStatus) {
-            while (response.getResult(i) != successResult)
+            while (!Objects.equals(response.getResult(i), successResult)) {
               i++;
+            }
             if (status.getOperationStatusCode() != SUCCESS) {
               RegionActionResult.Builder failureResultBuilder = 
RegionActionResult.newBuilder();
               failureResultBuilder.setException(buildException(new 
DoNotRetryIOException(

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java
index f6ed72f..f3e4853 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java
@@ -149,7 +149,7 @@ public class VisibilityNewVersionBehaivorTracker extends 
NewVersionBehaviorTrack
     List<Tag> putVisTags = new ArrayList<>();
     Byte putCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(put, 
putVisTags);
     return putVisTags.isEmpty() == delInfo.tags.isEmpty() && (
-        putVisTags.isEmpty() && delInfo.tags.isEmpty() || 
VisibilityLabelServiceManager
+        (putVisTags.isEmpty() && delInfo.tags.isEmpty()) || 
VisibilityLabelServiceManager
             .getInstance().getVisibilityLabelService()
             .matchVisibility(putVisTags, putCellVisTagsFormat, delInfo.tags, 
delInfo.format));
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java
index 78b5037..fd479b4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java
@@ -58,6 +58,7 @@ public class LeafExpressionNode implements ExpressionNode {
     return true;
   }
 
+  @Override
   public LeafExpressionNode deepClone() {
     LeafExpressionNode clone = new LeafExpressionNode(this.identifier);
     return clone;

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java
index 77b34e9..83610fa 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java
@@ -91,6 +91,7 @@ public class NonLeafExpressionNode implements ExpressionNode {
     return this.op == Operator.NOT;
   }
 
+  @Override
   public NonLeafExpressionNode deepClone() {
     NonLeafExpressionNode clone = new NonLeafExpressionNode(this.op);
     for (ExpressionNode exp : this.childExps) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java
index 0bb8137..2281453 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java
@@ -23,12 +23,13 @@ import org.apache.yetus.audience.InterfaceAudience;
 public enum Operator {
   AND('&'), OR('|'), NOT('!');
 
-  private char rep;
+  private final char rep;
 
   private Operator(char rep) {
     this.rep = rep;
   }
 
+  @Override
   public String toString() {
     return String.valueOf(this.rep);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java
index 7d7e526..97a74ed 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java
@@ -300,7 +300,8 @@ public final class SnapshotInfo extends AbstractHBaseTool {
       boolean inArchive = false;
       long size = -1;
       try {
-        if ((inArchive = fs.exists(link.getArchivePath()))) {
+        if (fs.exists(link.getArchivePath())) {
+          inArchive = true;
           size = fs.getFileStatus(link.getArchivePath()).getLen();
           hfilesArchiveSize.addAndGet(size);
           hfilesArchiveCount.incrementAndGet();
@@ -311,7 +312,8 @@ public final class SnapshotInfo extends AbstractHBaseTool {
               !isArchivedFileStillReferenced(link.getArchivePath(), filesMap)) 
{
             nonSharedHfilesArchiveSize.addAndGet(size);
           }
-        } else if (inArchive = fs.exists(link.getMobPath())) {
+        } else if (fs.exists(link.getMobPath())) {
+          inArchive = true;
           size = fs.getFileStatus(link.getMobPath()).getLen();
           hfilesMobSize.addAndGet(size);
           hfilesMobCount.incrementAndGet();

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
index a5468ee..1d7f4f6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
@@ -75,23 +75,28 @@ public final class SnapshotManifestV1 {
       this.fs = fs;
     }
 
+    @Override
     public HRegionFileSystem regionOpen(final RegionInfo regionInfo) throws 
IOException {
       HRegionFileSystem snapshotRegionFs = 
HRegionFileSystem.createRegionOnFileSystem(conf,
         fs, snapshotDir, regionInfo);
       return snapshotRegionFs;
     }
 
+    @Override
     public void regionClose(final HRegionFileSystem region) {
     }
 
+    @Override
     public Path familyOpen(final HRegionFileSystem snapshotRegionFs, final 
byte[] familyName) {
       Path familyDir = 
snapshotRegionFs.getStoreDir(Bytes.toString(familyName));
       return familyDir;
     }
 
+    @Override
     public void familyClose(final HRegionFileSystem region, final Path family) 
{
     }
 
+    @Override
     public void storeFile(final HRegionFileSystem region, final Path familyDir,
         final StoreFileInfo storeFile) throws IOException {
       Path referenceFile = new Path(familyDir, storeFile.getPath().getName());

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java
index 4d35f0b..4e60d67 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java
@@ -77,12 +77,14 @@ public final class SnapshotManifestV2 {
       this.fs = fs;
     }
 
+    @Override
     public SnapshotRegionManifest.Builder regionOpen(final RegionInfo 
regionInfo) {
       SnapshotRegionManifest.Builder manifest = 
SnapshotRegionManifest.newBuilder();
       manifest.setRegionInfo(ProtobufUtil.toRegionInfo(regionInfo));
       return manifest;
     }
 
+    @Override
     public void regionClose(final SnapshotRegionManifest.Builder region) 
throws IOException {
       // we should ensure the snapshot dir exist, maybe it has been deleted by 
master
       // see HBASE-16464
@@ -99,6 +101,7 @@ public final class SnapshotManifestV2 {
       }
     }
 
+    @Override
     public SnapshotRegionManifest.FamilyFiles.Builder familyOpen(
         final SnapshotRegionManifest.Builder region, final byte[] familyName) {
       SnapshotRegionManifest.FamilyFiles.Builder family =
@@ -107,11 +110,13 @@ public final class SnapshotManifestV2 {
       return family;
     }
 
+    @Override
     public void familyClose(final SnapshotRegionManifest.Builder region,
         final SnapshotRegionManifest.FamilyFiles.Builder family) {
       region.addFamilyFiles(family.build());
     }
 
+    @Override
     public void storeFile(final SnapshotRegionManifest.Builder region,
         final SnapshotRegionManifest.FamilyFiles.Builder family, final 
StoreFileInfo storeFile)
         throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
index c14c944..3cff047 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
@@ -971,7 +971,7 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
         continue;
       }
       Path familyDir = familyStat.getPath();
-      byte[] familyName = familyDir.getName().getBytes();
+      byte[] familyName = Bytes.toBytes(familyDir.getName());
       // Skip invalid family
       try {
         ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(familyName);

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java
index 9950570..efad97e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java
@@ -155,6 +155,7 @@ public class BoundedPriorityBlockingQueue<E> extends 
AbstractQueue<E> implements
     this.queue = new PriorityQueue<>(capacity, comparator);
   }
 
+  @Override
   public boolean offer(E e) {
     if (e == null) throw new NullPointerException();
 
@@ -171,6 +172,7 @@ public class BoundedPriorityBlockingQueue<E> extends 
AbstractQueue<E> implements
     return false;
   }
 
+  @Override
   public void put(E e) throws InterruptedException {
     if (e == null) throw new NullPointerException();
 
@@ -186,6 +188,7 @@ public class BoundedPriorityBlockingQueue<E> extends 
AbstractQueue<E> implements
     }
   }
 
+  @Override
   public boolean offer(E e, long timeout, TimeUnit unit)
       throws InterruptedException {
     if (e == null) throw new NullPointerException();
@@ -206,6 +209,7 @@ public class BoundedPriorityBlockingQueue<E> extends 
AbstractQueue<E> implements
     return true;
   }
 
+  @Override
   public E take() throws InterruptedException {
     E result = null;
     lock.lockInterruptibly();
@@ -221,6 +225,7 @@ public class BoundedPriorityBlockingQueue<E> extends 
AbstractQueue<E> implements
     return result;
   }
 
+  @Override
   public E poll() {
     E result = null;
     lock.lock();
@@ -235,6 +240,7 @@ public class BoundedPriorityBlockingQueue<E> extends 
AbstractQueue<E> implements
     return result;
   }
 
+  @Override
   public E poll(long timeout, TimeUnit unit)
       throws InterruptedException {
     long nanos = unit.toNanos(timeout);
@@ -254,6 +260,7 @@ public class BoundedPriorityBlockingQueue<E> extends 
AbstractQueue<E> implements
     return result;
   }
 
+  @Override
   public E peek() {
     lock.lock();
     try {
@@ -263,6 +270,7 @@ public class BoundedPriorityBlockingQueue<E> extends 
AbstractQueue<E> implements
     }
   }
 
+  @Override
   public int size() {
     lock.lock();
     try {
@@ -272,6 +280,7 @@ public class BoundedPriorityBlockingQueue<E> extends 
AbstractQueue<E> implements
     }
   }
 
+  @Override
   public Iterator<E> iterator() {
     throw new UnsupportedOperationException();
   }
@@ -280,6 +289,7 @@ public class BoundedPriorityBlockingQueue<E> extends 
AbstractQueue<E> implements
     return queue.comparator();
   }
 
+  @Override
   public int remainingCapacity() {
     lock.lock();
     try {
@@ -289,10 +299,12 @@ public class BoundedPriorityBlockingQueue<E> extends 
AbstractQueue<E> implements
     }
   }
 
+  @Override
   public boolean remove(Object o) {
     throw new UnsupportedOperationException();
   }
 
+  @Override
   public boolean contains(Object o) {
     lock.lock();
     try {
@@ -302,10 +314,12 @@ public class BoundedPriorityBlockingQueue<E> extends 
AbstractQueue<E> implements
     }
   }
 
+  @Override
   public int drainTo(Collection<? super E> c) {
     return drainTo(c, Integer.MAX_VALUE);
   }
 
+  @Override
   public int drainTo(Collection<? super E> c, int maxElements) {
     if (c == null)
         throw new NullPointerException();

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSMapRUtils.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSMapRUtils.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSMapRUtils.java
index 4207f39..2cf3bb9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSMapRUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSMapRUtils.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 public class FSMapRUtils extends FSUtils {
   private static final Logger LOG = LoggerFactory.getLogger(FSMapRUtils.class);
 
+  @Override
   public void recoverFileLease(final FileSystem fs, final Path p,
       Configuration conf, CancelableProgressable reporter) throws IOException {
     LOG.info("Recovering file " + p.toString() +

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java
index f258e6c..04a3384 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java
@@ -52,7 +52,7 @@ class FSRegionScanner implements Runnable {
   /**
    * Maps each region to the RS with highest locality for that region.
    */
-  private Map<String,String> regionToBestLocalityRSMapping;
+  private final Map<String,String> regionToBestLocalityRSMapping;
 
   /**
    * Maps region encoded names to maps of hostnames to fractional locality of

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index c76cd90..c3f3bd8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -479,7 +479,7 @@ public class FSTableDescriptors implements TableDescriptors 
{
       // Clean away old versions
       for (FileStatus file : status) {
         Path path = file.getPath();
-        if (file != mostCurrent) {
+        if (!file.equals(mostCurrent)) {
           if (!fs.delete(file.getPath(), false)) {
             LOG.warn("Failed cleanup of " + path);
           } else {

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 062e516..b4a22e4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -3801,7 +3801,7 @@ public class HBaseFsck extends Configured implements 
Closeable {
     @Override
     public int hashCode() {
       int hash = Arrays.hashCode(getRegionName());
-      hash ^= getRegionId();
+      hash = (int) (hash ^ getRegionId());
       hash ^= Arrays.hashCode(getStartKey());
       hash ^= Arrays.hashCode(getEndKey());
       hash ^= Boolean.valueOf(isOffline()).hashCode();
@@ -3809,7 +3809,7 @@ public class HBaseFsck extends Configured implements 
Closeable {
       if (regionServer != null) {
         hash ^= regionServer.hashCode();
       }
-      hash ^= modTime;
+      hash = (int) (hash ^ modTime);
       return hash;
     }
   }
@@ -4055,7 +4055,7 @@ public class HBaseFsck extends Configured implements 
Closeable {
         return -1;
       }
       // both l.hdfsEntry and r.hdfsEntry must not be null.
-      return (int) (l.hdfsEntry.hri.getRegionId()- 
r.hdfsEntry.hri.getRegionId());
+      return Long.compare(l.hdfsEntry.hri.getRegionId(), 
r.hdfsEntry.hri.getRegionId());
     }
   };
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
index e06805c..eba9acd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
@@ -52,6 +52,7 @@ public class IdLock {
       this.id = id;
     }
 
+    @Override
     public String toString() {
       return "id=" + id + ", numWaiter=" + numWaiters + ", isLocked="
           + locked;

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
index 7b9cbb6..75b8ccd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
@@ -685,7 +685,7 @@ public class RegionSplitter {
       }
     }
     try {
-      return splitClass.asSubclass(SplitAlgorithm.class).newInstance();
+      return 
splitClass.asSubclass(SplitAlgorithm.class).getDeclaredConstructor().newInstance();
     } catch (Exception e) {
       throw new IOException("Problem loading split algorithm: ", e);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java
index 2819b82..f1c9ad3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java
@@ -38,6 +38,7 @@ public class RowBloomContext extends BloomContext {
     super(bloomFilterWriter, comparator);
   }
 
+  @Override
   public void addLastBloomKey(Writer writer) throws IOException {
     if (this.getLastCell() != null) {
       byte[] key = CellUtil.copyRow(this.getLastCell());

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java
index 0464b7c..e731bd7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java
@@ -52,10 +52,12 @@ abstract public class ShutdownHookManager {
 
   private static class ShutdownHookManagerV1 extends ShutdownHookManager {
     // priority is ignored in hadoop versions earlier than 2.0
-    public void addShutdownHook(Thread shutdownHookThread, int priority) {     
 
+    @Override
+    public void addShutdownHook(Thread shutdownHookThread, int priority) {
       Runtime.getRuntime().addShutdownHook(shutdownHookThread);
     }
     
+    @Override
     public boolean removeShutdownHook(Runnable shutdownHook) {
       Thread shutdownHookThread = null;
       if (!(shutdownHook instanceof Thread)) {
@@ -67,6 +69,7 @@ abstract public class ShutdownHookManager {
   }
 
   private static class ShutdownHookManagerV2 extends ShutdownHookManager {
+    @Override
     public void addShutdownHook(Thread shutdownHookThread, int priority) {
       try {
         Methods.call(shutdownHookManagerClass, 
@@ -79,6 +82,7 @@ abstract public class ShutdownHookManager {
       }
     }
     
+    @Override
     public boolean removeShutdownHook(Runnable shutdownHook) {
       try {
         return (Boolean)

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
index 74d502e..d9badfa 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import java.util.Objects;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.regex.Pattern;
 
@@ -455,7 +456,7 @@ public abstract class AbstractFSWALProvider<T extends 
AbstractFSWAL<?>> implemen
       } catch (FileNotFoundException fnfe) {
         // If the log was archived, continue reading from there
         Path archivedLog = AbstractFSWALProvider.getArchivedLogPath(path, 
conf);
-        if (path != archivedLog) {
+        if (!Objects.equals(path, archivedLog)) {
           return openReader(archivedLog, conf);
         } else {
           throw fnfe;

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
index 725f9ff..2105490 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
@@ -185,7 +185,7 @@ class DisabledWALProvider implements WALProvider {
     public void sync() {
       if (!this.listeners.isEmpty()) {
         for (WALActionsListener listener : this.listeners) {
-          listener.postSync(0l, 0);
+          listener.postSync(0L, 0);
         }
       }
     }
@@ -195,6 +195,7 @@ class DisabledWALProvider implements WALProvider {
       sync();
     }
 
+    @Override
     public Long startCacheFlush(final byte[] encodedRegionName, Map<byte[], 
Long>
         flushedFamilyNamesToSeq) {
       return startCacheFlush(encodedRegionName, 
flushedFamilyNamesToSeq.keySet());

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java
index 14505a8..f1662bc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java
@@ -63,7 +63,7 @@ public class FSHLogProvider extends 
AbstractFSWALProvider<FSHLog> {
       ProtobufLogWriter.class, Writer.class);
     Writer writer = null;
     try {
-      writer = logWriterClass.newInstance();
+      writer = logWriterClass.getDeclaredConstructor().newInstance();
       writer.init(fs, path, conf, overwritable);
       return writer;
     } catch (Exception e) { 

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
index a3e54a5..a0ef817 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
@@ -104,19 +104,14 @@ public class RegionGroupingProvider implements 
WALProvider {
     }
     LOG.info("Instantiating RegionGroupingStrategy of type " + clazz);
     try {
-      final RegionGroupingStrategy result = clazz.newInstance();
+      final RegionGroupingStrategy result = 
clazz.getDeclaredConstructor().newInstance();
       result.init(conf, providerId);
       return result;
-    } catch (InstantiationException exception) {
+    } catch (Exception e) {
       LOG.error("couldn't set up region grouping strategy, check config key " +
           REGION_GROUPING_STRATEGY);
-      LOG.debug("Exception details for failure to load region grouping 
strategy.", exception);
-      throw new IOException("couldn't set up region grouping strategy", 
exception);
-    } catch (IllegalAccessException exception) {
-      LOG.error("couldn't set up region grouping strategy, check config key " +
-          REGION_GROUPING_STRATEGY);
-      LOG.debug("Exception details for failure to load region grouping 
strategy.", exception);
-      throw new IOException("couldn't set up region grouping strategy", 
exception);
+      LOG.debug("Exception details for failure to load region grouping 
strategy.", e);
+      throw new IOException("couldn't set up region grouping strategy", e);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java
index 4008bb0..d478e4f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java
@@ -99,6 +99,7 @@ public interface WAL extends Closeable, WALFileLengthProvider 
{
    * underlying resources after this call; i.e. filesystem based WALs can 
archive or
    * delete files.
    */
+  @Override
   void close() throws IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
index d70b8cd..d59c824 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
@@ -75,7 +75,7 @@ public class WALFactory implements WALFileLengthProvider {
     multiwal(RegionGroupingProvider.class),
     asyncfs(AsyncFSWALProvider.class);
 
-    Class<? extends WALProvider> clazz;
+    final Class<? extends WALProvider> clazz;
     Providers(Class<? extends WALProvider> clazz) {
       this.clazz = clazz;
     }
@@ -139,17 +139,13 @@ public class WALFactory implements WALFileLengthProvider {
       List<WALActionsListener> listeners, String providerId) throws 
IOException {
     LOG.info("Instantiating WALProvider of type " + clazz);
     try {
-      final WALProvider result = clazz.newInstance();
+      final WALProvider result = clazz.getDeclaredConstructor().newInstance();
       result.init(this, conf, listeners, providerId);
       return result;
-    } catch (InstantiationException exception) {
-      LOG.error("couldn't set up WALProvider, the configured class is " + 
clazz);
-      LOG.debug("Exception details for failure to load WALProvider.", 
exception);
-      throw new IOException("couldn't set up WALProvider", exception);
-    } catch (IllegalAccessException exception) {
+    } catch (Exception e) {
       LOG.error("couldn't set up WALProvider, the configured class is " + 
clazz);
-      LOG.debug("Exception details for failure to load WALProvider.", 
exception);
-      throw new IOException("couldn't set up WALProvider", exception);
+      LOG.debug("Exception details for failure to load WALProvider.", e);
+      throw new IOException("couldn't set up WALProvider", e);
     }
   }
 
@@ -294,7 +290,7 @@ public class WALFactory implements WALFileLengthProvider {
       AbstractFSWALProvider.Reader reader = null;
       while (true) {
         try {
-          reader = lrClass.newInstance();
+          reader = lrClass.getDeclaredConstructor().newInstance();
           reader.init(fs, path, conf, null);
           return reader;
         } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java
index 983fae9..0a5acda 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java
@@ -470,8 +470,8 @@ public class WALKeyImpl implements WALKey {
   @Override
   public int hashCode() {
     int result = Bytes.hashCode(this.encodedRegionName);
-    result ^= getSequenceId();
-    result ^= this.writeTime;
+    result = (int) (result ^ getSequenceId());
+    result = (int) (result ^ this.writeTime);
     return result;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 2aad203..0d73045 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -1085,7 +1085,7 @@ public class WALSplitter {
     protected EntryBuffers entryBuffers;
 
     protected ConcurrentHashMap<String, SinkWriter> writers = new 
ConcurrentHashMap<>();
-    protected ConcurrentHashMap<String, Long> regionMaximumEditLogSeqNum =
+    protected final ConcurrentHashMap<String, Long> regionMaximumEditLogSeqNum 
=
         new ConcurrentHashMap<>();
 
 
@@ -1642,8 +1642,10 @@ public class WALSplitter {
         List<IOException> thrown, List<Path> paths)
         throws InterruptedException, ExecutionException {
       for (final Map.Entry<byte[], RegionEntryBuffer> buffer : 
entryBuffers.buffers.entrySet()) {
-        LOG.info("Submitting writeThenClose of {}", 
buffer.getValue().encodedRegionName);
+        LOG.info("Submitting writeThenClose of {}",
+            Arrays.toString(buffer.getValue().encodedRegionName));
         completionService.submit(new Callable<Void>() {
+          @Override
           public Void call() throws Exception {
             Path dst = writeThenClose(buffer.getValue());
             paths.add(dst);

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java
index aec4bbd..44d3e87 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java
@@ -151,6 +151,7 @@ public class AcidGuaranteesTestTool extends 
AbstractHBaseTool {
       table = connection.getTable(TABLE_NAME);
     }
 
+    @Override
     public void doAnAction() throws Exception {
       // Pick a random row to write into
       byte[] targetRow = targetRows[rand.nextInt(targetRows.length)];
@@ -197,6 +198,7 @@ public class AcidGuaranteesTestTool extends 
AbstractHBaseTool {
       table = connection.getTable(TABLE_NAME);
     }
 
+    @Override
     public void doAnAction() throws Exception {
       Get g = new Get(targetRow);
       Result res = table.get(g);
@@ -264,6 +266,7 @@ public class AcidGuaranteesTestTool extends 
AbstractHBaseTool {
       table = connection.getTable(TABLE_NAME);
     }
 
+    @Override
     public void doAnAction() throws Exception {
       Scan s = new Scan();
       for (byte[] family : targetFamilies) {
@@ -344,6 +347,7 @@ public class AcidGuaranteesTestTool extends 
AbstractHBaseTool {
     }
     // Add a flusher
     ctx.addThread(new RepeatingTestThread(ctx) {
+      @Override
       public void doAnAction() throws Exception {
         try {
           admin.flush(TABLE_NAME);

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
index 92581b8..5f2ffb2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hbase;
 
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 import java.util.NavigableMap;
 
 import org.apache.hadoop.conf.Configuration;
@@ -383,7 +384,7 @@ public abstract class HBaseTestCase extends TestCase {
         if (res_value != null) {
           assertEquals(Bytes.toString(family) + " " + 
Bytes.toString(qualifier) +
               " at timestamp " +
-              timestamp, value, new String(res_value));
+              timestamp, value, new String(res_value, StandardCharsets.UTF_8));
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index a686e33..75abd5e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -33,6 +33,7 @@ import java.net.InetAddress;
 import java.net.ServerSocket;
 import java.net.Socket;
 import java.net.UnknownHostException;
+import java.nio.charset.StandardCharsets;
 import java.security.MessageDigest;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -1596,7 +1597,7 @@ public class HBaseTestingUtility extends 
HBaseZKTestingUtility {
       if (status.getSecond() != 0) {
         LOG.debug(status.getSecond() - status.getFirst() + "/" + 
status.getSecond()
           + " regions updated.");
-        Thread.sleep(1 * 1000l);
+        Thread.sleep(1 * 1000L);
       } else {
         LOG.debug("All regions updated.");
         break;
@@ -1983,7 +1984,7 @@ public class HBaseTestingUtility extends 
HBaseZKTestingUtility {
               expectedCount = 1;
             }
             if (count != expectedCount) {
-              String row = new String(new byte[] {b1,b2,b3});
+              String row = new String(new byte[] {b1,b2,b3}, 
StandardCharsets.UTF_8);
               throw new RuntimeException("Row:" + row + " has a seen count of 
" + count + " " +
                   "instead of " + expectedCount);
             }
@@ -2079,7 +2080,7 @@ public class HBaseTestingUtility extends 
HBaseZKTestingUtility {
       get.setConsistency(Consistency.TIMELINE);
       Result result = table.get(get);
       assertTrue(failMsg, result.containsColumn(f, null));
-      assertEquals(failMsg, result.getColumnCells(f, null).size(), 1);
+      assertEquals(failMsg, 1, result.getColumnCells(f, null).size());
       Cell cell = result.getColumnLatestCell(f, null);
       assertTrue(failMsg,
         Bytes.equals(data, 0, data.length, cell.getValueArray(), 
cell.getValueOffset(),
@@ -2114,7 +2115,7 @@ public class HBaseTestingUtility extends 
HBaseZKTestingUtility {
       if (!present) continue;
 
       assertTrue(failMsg, result.containsColumn(f, null));
-      assertEquals(failMsg, result.getColumnCells(f, null).size(), 1);
+      assertEquals(failMsg, 1, result.getColumnCells(f, null).size());
       Cell cell = result.getColumnLatestCell(f, null);
       assertTrue(failMsg,
         Bytes.equals(data, 0, data.length, cell.getValueArray(), 
cell.getValueOffset(),

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
index 378f6ec..9959e31 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
@@ -165,7 +165,8 @@ public class MiniHBaseCluster extends HBaseCluster {
     @Override
     public void run() {
       try {
-        this.user.runAs(new PrivilegedAction<Object>(){
+        this.user.runAs(new PrivilegedAction<Object>() {
+          @Override
           public Object run() {
             runRegionServer();
             return null;
@@ -195,6 +196,7 @@ public class MiniHBaseCluster extends HBaseCluster {
     @Override
     public void abort(final String reason, final Throwable cause) {
       this.user.runAs(new PrivilegedAction<Object>() {
+        @Override
         public Object run() {
           abortRegionServer(reason, cause);
           return null;
@@ -497,6 +499,7 @@ public class MiniHBaseCluster extends HBaseCluster {
    * Returns the current active master, if available.
    * @return the active HMaster, null if none is active.
    */
+  @Override
   public MasterService.BlockingInterface getMasterAdminService() {
     return this.hbaseCluster.getActiveMaster().getMasterRpcServices();
   }
@@ -588,6 +591,7 @@ public class MiniHBaseCluster extends HBaseCluster {
    *         masters left.
    * @throws InterruptedException
    */
+  @Override
   public boolean waitForActiveAndReadyMaster(long timeout) throws IOException {
     List<JVMClusterUtil.MasterThread> mts;
     long start = System.currentTimeMillis();
@@ -628,6 +632,7 @@ public class MiniHBaseCluster extends HBaseCluster {
   /**
    * Shut down the mini HBase cluster
    */
+  @Override
   public void shutdown() throws IOException {
     if (this.hbaseCluster != null) {
       this.hbaseCluster.shutdown();

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java
index 1d8de45..86ac2f8 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java
@@ -119,6 +119,7 @@ public abstract class MultithreadedTestUtil {
       this.ctx = ctx;
     }
 
+    @Override
     public void run() {
       try {
         doWork();
@@ -143,6 +144,7 @@ public abstract class MultithreadedTestUtil {
       super(ctx);
     }
 
+    @Override
     public final void doWork() throws Exception {
       try {
         while (ctx.shouldRun() && !stopped) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java
index 06cfdcf..f21d79d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java
@@ -49,6 +49,7 @@ public class TestHDFSBlocksDistribution {
   }
 
   public class MockHDFSBlocksDistribution extends HDFSBlocksDistribution {
+    @Override
     public Map<String,HostAndWeight> getHostAndWeights() {
       HashMap<String, HostAndWeight> map = new HashMap<>();
       map.put("test", new HostAndWeight(null, 100));

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
index c5cda27..1210361 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
@@ -24,6 +24,7 @@ import java.io.IOException;
 import java.util.Collection;
 import java.util.List;
 import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -93,7 +94,7 @@ public class TestIOFencing {
   }
 
   public abstract static class CompactionBlockerRegion extends HRegion {
-    volatile int compactCount = 0;
+    AtomicInteger compactCount = new AtomicInteger();
     volatile CountDownLatch compactionsBlocked = new CountDownLatch(0);
     volatile CountDownLatch compactionsWaiting = new CountDownLatch(0);
 
@@ -129,7 +130,7 @@ public class TestIOFencing {
       try {
         return super.compact(compaction, store, throughputController);
       } finally {
-        compactCount++;
+        compactCount.getAndIncrement();
       }
     }
 
@@ -139,7 +140,7 @@ public class TestIOFencing {
       try {
         return super.compact(compaction, store, throughputController, user);
       } finally {
-        compactCount++;
+        compactCount.getAndIncrement();
       }
     }
 
@@ -336,7 +337,7 @@ public class TestIOFencing {
       }
       LOG.info("Allowing compaction to proceed");
       compactingRegion.allowCompactions();
-      while (compactingRegion.compactCount == 0) {
+      while (compactingRegion.compactCount.get() == 0) {
         Thread.sleep(1000);
       }
       // The server we killed stays up until the compaction that was started 
before it was killed
@@ -349,7 +350,7 @@ public class TestIOFencing {
         FIRST_BATCH_COUNT + SECOND_BATCH_COUNT);
       admin.majorCompact(TABLE_NAME);
       startWaitTime = System.currentTimeMillis();
-      while (newRegion.compactCount == 0) {
+      while (newRegion.compactCount.get() == 0) {
         Thread.sleep(1000);
         assertTrue("New region never compacted",
           System.currentTimeMillis() - startWaitTime < 180000);

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java
index 7b6c5a5..620abef 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java
@@ -163,6 +163,7 @@ public class TestMetaTableAccessorNoCluster {
           .thenThrow(new ServiceException("Server not running (2 of 3)"))
           .thenThrow(new ServiceException("Server not running (3 of 3)"))
           .thenAnswer(new Answer<ScanResponse>() {
+            @Override
             public ScanResponse answer(InvocationOnMock invocation) throws 
Throwable {
               ((HBaseRpcController) 
invocation.getArgument(0)).setCellScanner(CellUtil
                   .createCellScanner(cellScannables));

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java
index 71492b1..63d2cc2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java
@@ -127,14 +127,14 @@ public class TestMetaTableLocator {
       assertEquals(state, 
MetaTableLocator.getMetaRegionState(this.watcher).getState());
     }
     MetaTableLocator.setMetaLocation(this.watcher, SN, RegionState.State.OPEN);
-    assertEquals(mtl.getMetaRegionLocation(this.watcher), SN);
+    assertEquals(SN, mtl.getMetaRegionLocation(this.watcher));
     assertEquals(RegionState.State.OPEN,
       MetaTableLocator.getMetaRegionState(this.watcher).getState());
 
     mtl.deleteMetaLocation(this.watcher);
     
assertNull(MetaTableLocator.getMetaRegionState(this.watcher).getServerName());
-    assertEquals(MetaTableLocator.getMetaRegionState(this.watcher).getState(),
-      RegionState.State.OFFLINE);
+    assertEquals(RegionState.State.OFFLINE,
+        MetaTableLocator.getMetaRegionState(this.watcher).getState());
     assertNull(mtl.getMetaRegionLocation(this.watcher));
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionsCleaner.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionsCleaner.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionsCleaner.java
index bdb74a4..24a8830 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionsCleaner.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionsCleaner.java
@@ -48,6 +48,7 @@ import java.io.IOException;
       super(conf);
     }
 
+    @Override
     protected int movedRegionCleanerPeriod() {
       return 500;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java
index 1a0215e..acf7861 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java
@@ -101,6 +101,7 @@ public class TestMultiVersions {
     // TODO: Remove these deprecated classes or pull them in here if this is
     // only test using them.
     TimestampTestBase.doTestDelete(table, new FlushCache() {
+      @Override
       public void flushcache() throws IOException {
         UTIL.getHBaseCluster().flushcache();
       }
@@ -109,6 +110,7 @@ public class TestMultiVersions {
     // Perhaps drop and readd the table between tests so the former does
     // not pollute this latter?  Or put into separate tests.
     TimestampTestBase.doTestTimestampScanning(table, new FlushCache() {
+      @Override
       public void flushcache() throws IOException {
         UTIL.getMiniHBaseCluster().flushcache();
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerName.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerName.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerName.java
index c1b5dac..cfc5c2f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerName.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerName.java
@@ -94,9 +94,8 @@ public class TestServerName {
       ServerName.valueOf("www.example.org", 1234, 5678).toString());
     assertEquals(sn.toString(),
       ServerName.valueOf("www.example.org:1234", 5678).toString());
-    assertEquals(sn.toString(),
-      "www.example.org" + ServerName.SERVERNAME_SEPARATOR + "1234" +
-      ServerName.SERVERNAME_SEPARATOR + "5678");
+    assertEquals("www.example.org" + ServerName.SERVERNAME_SEPARATOR + "1234"
+        + ServerName.SERVERNAME_SEPARATOR + "5678", sn.toString());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java
index 2639821..4aeedb9 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java
@@ -205,7 +205,8 @@ public class TestServerSideScanMetricsFromClientSide {
     }
 
     // The filter should filter out all rows, but we still expect to see every 
row.
-    Filter filter = new RowFilter(CompareOperator.EQUAL, new 
BinaryComparator("xyz".getBytes()));
+    Filter filter =
+        new RowFilter(CompareOperator.EQUAL, new 
BinaryComparator(Bytes.toBytes("xyz")));
     scan = new Scan(baseScan);
     scan.setFilter(filter);
     testMetric(scan, 
ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME, ROWS.length);
@@ -255,7 +256,8 @@ public class TestServerSideScanMetricsFromClientSide {
     testRowsFilteredMetric(baseScan, null, 0);
 
     // Row filter doesn't match any row key. All rows should be filtered
-    Filter filter = new RowFilter(CompareOperator.EQUAL, new 
BinaryComparator("xyz".getBytes()));
+    Filter filter =
+        new RowFilter(CompareOperator.EQUAL, new 
BinaryComparator(Bytes.toBytes("xyz")));
     testRowsFilteredMetric(baseScan, filter, ROWS.length);
 
     // Filter will return results containing only the first key. Number of 
entire rows filtered
@@ -269,7 +271,7 @@ public class TestServerSideScanMetricsFromClientSide {
     testRowsFilteredMetric(baseScan, filter, 0);
 
     // Column prefix will NOT find any matching qualifier on any row. All rows 
should be filtered
-    filter = new ColumnPrefixFilter("xyz".getBytes());
+    filter = new ColumnPrefixFilter(Bytes.toBytes("xyz"));
     testRowsFilteredMetric(baseScan, filter, ROWS.length);
 
     // Matching column value should exist in each row. No rows should be 
filtered.

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
index fba429a..f81a36d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
@@ -421,7 +421,7 @@ public class TestAdmin1 {
     this.admin.createTable(htd);
     Table table = TEST_UTIL.getConnection().getTable(htd.getTableName());
     TableDescriptor confirmedHtd = table.getDescriptor();
-    assertEquals(TableDescriptor.COMPARATOR.compare(htd, confirmedHtd), 0);
+    assertEquals(0, TableDescriptor.COMPARATOR.compare(htd, confirmedHtd));
     MetaTableAccessor.fullScanMetaAndPrint(TEST_UTIL.getConnection());
     table.close();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
index 05b8edc..57bd158 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
@@ -303,7 +303,7 @@ public class TestAdmin2 {
     TableName tableName = TableName
         .valueOf("testTableNotFoundExceptionWithoutAnyTables");
     Table ht = TEST_UTIL.getConnection().getTable(tableName);
-    ht.get(new Get("e".getBytes()));
+    ht.get(new Get(Bytes.toBytes("e")));
   }
 
   @Test (timeout=300000)
@@ -582,8 +582,9 @@ public class TestAdmin2 {
     }
     // Before the fix for HBASE-6146, the below table creation was failing as 
the hbase:meta table
     // actually getting disabled by the disableTable() call.
-    HTableDescriptor htd = new 
HTableDescriptor(TableName.valueOf(name.getMethodName().getBytes()));
-    HColumnDescriptor hcd = new HColumnDescriptor("cf1".getBytes());
+    HTableDescriptor htd =
+        new 
HTableDescriptor(TableName.valueOf(Bytes.toBytes(name.getMethodName())));
+    HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("cf1"));
     htd.addFamily(hcd);
     TEST_UTIL.getHBaseAdmin().createTable(htd);
   }
@@ -695,13 +696,13 @@ public class TestAdmin2 {
     assertTrue(decommissionedRegionServers.isEmpty());
 
     final TableName tableName = TableName.valueOf(name.getMethodName());
-    TEST_UTIL.createMultiRegionTable(tableName, "f".getBytes(), 6);
+    TEST_UTIL.createMultiRegionTable(tableName, Bytes.toBytes("f"), 6);
 
     ArrayList<ServerName> clusterRegionServers =
         new 
ArrayList<>(admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
           .getLiveServerMetrics().keySet());
 
-    assertEquals(clusterRegionServers.size(), 3);
+    assertEquals(3, clusterRegionServers.size());
 
     HashMap<ServerName, List<RegionInfo>> serversToDecommssion = new 
HashMap<>();
     // Get a server that has regions. We will decommission two of the servers,

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java
index ab8ebb5..3344c4b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java
@@ -56,6 +56,7 @@ public class TestAsyncClusterAdminApi2 extends 
TestAsyncAdminBase {
   }
 
   @Before
+  @Override
   public void setUp() throws Exception {
     TEST_UTIL.startMiniCluster(1, 3);
     ASYNC_CONN = 
ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get();
@@ -63,6 +64,7 @@ public class TestAsyncClusterAdminApi2 extends 
TestAsyncAdminBase {
   }
 
   @After
+  @Override
   public void tearDown() throws Exception {
     IOUtils.closeQuietly(ASYNC_CONN);
     TEST_UTIL.shutdownMiniCluster();

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncDecommissionAdminApi.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncDecommissionAdminApi.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncDecommissionAdminApi.java
index 8c2b060..8968b39 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncDecommissionAdminApi.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncDecommissionAdminApi.java
@@ -49,7 +49,7 @@ public class TestAsyncDecommissionAdminApi extends 
TestAsyncAdminBase {
         new 
ArrayList<>(admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).get()
           .getLiveServerMetrics().keySet());
 
-    assertEquals(clusterRegionServers.size(), 2);
+    assertEquals(2, clusterRegionServers.size());
 
     HashMap<ServerName, List<RegionInfo>> serversToDecommssion = new 
HashMap<>();
     // Get a server that has regions. We will decommission one of the servers,

http://git-wip-us.apache.org/repos/asf/hbase/blob/93a182f2/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcedureAdminApi.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcedureAdminApi.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcedureAdminApi.java
index 7a2c00f..d50e039 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcedureAdminApi.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcedureAdminApi.java
@@ -90,7 +90,7 @@ public class TestAsyncProcedureAdminApi extends 
TestAsyncAdminBase {
     byte[] result = 
admin.execProcedureWithReturn(SimpleMasterProcedureManager.SIMPLE_SIGNATURE,
       "myTest2", new HashMap<>()).get();
     assertArrayEquals("Incorrect return data from execProcedure",
-      SimpleMasterProcedureManager.SIMPLE_DATA.getBytes(), result);
+      Bytes.toBytes(SimpleMasterProcedureManager.SIMPLE_DATA), result);
   }
 
   @Test

Reply via email to