HBASE-18308 Eliminate the findbugs warnings for hbase-server

Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2da5b432
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2da5b432
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2da5b432

Branch: refs/heads/HBASE-15631-branch-1
Commit: 2da5b432a18fac0438346a2bb0ccea3a0beb90fe
Parents: 9802095
Author: Chia-Ping Tsai <chia7...@gmail.com>
Authored: Thu Jul 20 00:36:16 2017 +0800
Committer: Chia-Ping Tsai <chia7...@gmail.com>
Committed: Thu Jul 20 00:36:16 2017 +0800

----------------------------------------------------------------------
 .../apache/hadoop/hbase/LocalHBaseCluster.java  | 10 ++++----
 .../hadoop/hbase/constraint/Constraints.java    |  4 ++--
 .../hadoop/hbase/mapreduce/JarFinder.java       |  3 +++
 .../apache/hadoop/hbase/master/DeadServer.java  |  5 ----
 .../hadoop/hbase/master/ServerManager.java      |  5 ++--
 .../hbase/master/balancer/BaseLoadBalancer.java |  2 ++
 .../hadoop/hbase/regionserver/HRegion.java      |  3 ---
 .../hbase/regionserver/HRegionServer.java       |  3 ++-
 .../querymatcher/ExplicitColumnTracker.java     | 14 +++++-------
 .../regionserver/ReplicationSource.java         |  2 +-
 .../org/apache/hadoop/hbase/tool/Canary.java    |  5 ++--
 .../apache/hadoop/hbase/wal/WALSplitter.java    | 24 ++++++++++++--------
 12 files changed, 40 insertions(+), 40 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/2da5b432/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
index b98078a..42484e7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
@@ -313,12 +313,10 @@ public class LocalHBaseCluster {
    */
   public HMaster getActiveMaster() {
     for (JVMClusterUtil.MasterThread mt : masterThreads) {
-      if (mt.getMaster().isActiveMaster()) {
-        // Ensure that the current active master is not stopped.
-        // We don't want to return a stopping master as an active master.
-        if (mt.getMaster().isActiveMaster()  && !mt.getMaster().isStopped()) {
-          return mt.getMaster();
-        }
+      // Ensure that the current active master is not stopped.
+      // We don't want to return a stopping master as an active master.
+      if (mt.getMaster().isActiveMaster() && !mt.getMaster().isStopped()) {
+        return mt.getMaster();
       }
     }
     return null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/2da5b432/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
index 85ef717..c96bf3d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
@@ -616,8 +616,8 @@ public final class Constraints {
     @Override
     public int compare(Constraint c1, Constraint c2) {
       // compare the priorities of the constraints stored in their 
configuration
-      return Long.valueOf(c1.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY))
-          .compareTo(c2.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY));
+      return Long.compare(c1.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY),
+        c2.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY));
     }
   };
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2da5b432/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java
index dfbe648..e0421d9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java
@@ -90,6 +90,9 @@ public class JarFinder {
   private static void zipDir(File dir, String relativePath, ZipOutputStream 
zos,
                              boolean start) throws IOException {
     String[] dirList = dir.list();
+    if (dirList == null) {
+      return;
+    }
     for (String aDirList : dirList) {
       File f = new File(dir, aDirList);
       if (!f.isHidden()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/2da5b432/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java
index c33cdcc..81accd2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java
@@ -134,11 +134,6 @@ public class DeadServer {
 
     assert numProcessing >= 0: "Number of dead servers in processing should 
always be non-negative";
 
-    if (numProcessing < 0) {
-      LOG.error("Number of dead servers in processing = " + numProcessing
-          + ". Something went wrong, this should always be non-negative.");
-      numProcessing = 0;
-    }
     if (numProcessing == 0) { processing = false; }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2da5b432/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index 3a2926c..93e532b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -709,9 +709,8 @@ public class ServerManager {
     if (!services.getAssignmentManager().isFailoverCleanupDone()) {
       LOG.info("AssignmentManager hasn't finished failover cleanup; waiting");
     }
-
-    for(ServerName tmpServerName : requeuedDeadServers.keySet()){
-      processDeadServer(tmpServerName, requeuedDeadServers.get(tmpServerName));
+    for (Map.Entry<ServerName, Boolean> entry : 
requeuedDeadServers.entrySet()) {
+      processDeadServer(entry.getKey(), entry.getValue());
     }
     requeuedDeadServers.clear();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2da5b432/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index b6b317d..3bb12ce 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -1096,6 +1096,8 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   protected MetricsBalancer metricsBalancer = null;
   protected ClusterStatus clusterStatus = null;
   protected ServerName masterServerName;
+  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC",
+  justification="The services is just assigned once when master start")
   protected MasterServices services;
 
   protected static String[] getTablesOnMaster(Configuration conf) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/2da5b432/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index fc737a9..fe2a16a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -2816,7 +2816,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
     checkResources();
     startRegionOperation(Operation.DELETE);
     try {
-      delete.getRow();
       // All edits for the given row (across all column families) must happen 
atomically.
       doBatchMutate(delete);
     } finally {
@@ -3148,7 +3147,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
     }
   }
 
-  @SuppressWarnings("unchecked")
   private long doMiniBatchMutation(BatchOperationInProgress<?> batchOp) throws 
IOException {
     boolean isInReplay = batchOp.isInReplay();
     // variable to note if all Put items are for the same CF -- metrics related
@@ -3402,7 +3400,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
         // They don't have to be, it will still work, just write more WALEdits 
than needed.
         if (nonceGroup != currentNonceGroup || nonce != currentNonce) {
           if (walEdit.size() > 0) {
-            assert isInReplay;
             if (!isInReplay) {
               throw new IOException("Multiple nonces per batch and not in 
replay");
             }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2da5b432/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 849eba4..107e1c3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -816,7 +816,8 @@ public class HRegionServer extends HasThread implements
    * @throws IOException
    * @throws InterruptedException
    */
-  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE",
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(
+    value={"RV_RETURN_VALUE_IGNORED_BAD_PRACTICE", "RV_RETURN_VALUE_IGNORED"},
     justification="cluster Id znode read would give us correct response")
   private void initializeZooKeeper() throws IOException, InterruptedException {
     // Create the master address tracker, register with zk, and start it.  Then

http://git-wip-us.apache.org/repos/asf/hbase/blob/2da5b432/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java
index b206055..2ec3483 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java
@@ -137,15 +137,13 @@ public class ExplicitColumnTracker implements 
ColumnTracker {
       // is interested in. That means there is no more data for the column
       // of interest. Advance the ExplicitColumnTracker state to next
       // column of interest, and check again.
-      if (ret <= -1) {
-        ++this.index;
-        if (done()) {
-          // No more to match, do not include, done with this row.
-          return ScanQueryMatcher.MatchCode.SEEK_NEXT_ROW; // done_row
-        }
-        // This is the recursive case.
-        this.column = this.columns[this.index];
+      ++this.index;
+      if (done()) {
+        // No more to match, do not include, done with this row.
+        return ScanQueryMatcher.MatchCode.SEEK_NEXT_ROW; // done_row
       }
+      // This is the recursive case.
+      this.column = this.columns[this.index];
     } while (true);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2da5b432/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 8378b9b..3278f0c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -461,7 +461,7 @@ public class ReplicationSource extends Thread implements 
ReplicationSourceInterf
 
     @Override
     public int compare(Path o1, Path o2) {
-      return Long.valueOf(getTS(o1)).compareTo(getTS(o2));
+      return Long.compare(getTS(o1), getTS(o2));
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/2da5b432/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index 92c47c7..60c2079 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -1077,10 +1077,11 @@ public final class Canary implements Tool {
             }
           }
           Map<String, AtomicLong> actualReadTableLatency = 
regionSink.getReadLatencyMap();
-          for (String tableName : this.configuredReadTableTimeouts.keySet()) {
+          for (Map.Entry<String, Long> entry : 
configuredReadTableTimeouts.entrySet()) {
+            String tableName = entry.getKey();
             if (actualReadTableLatency.containsKey(tableName)) {
               Long actual = actualReadTableLatency.get(tableName).longValue();
-              Long configured = 
this.configuredReadTableTimeouts.get(tableName);
+              Long configured = entry.getValue();
               LOG.info("Read operation for " + tableName + " took " + actual +
                 " ms. The configured read timeout was " + configured + " ms.");
               if (actual > configured) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/2da5b432/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 293820b..005e948 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -1769,12 +1769,13 @@ public class WALSplitter {
       int maxSize = 0;
       List<Pair<HRegionLocation, Entry>> maxQueue = null;
       synchronized (this.serverToBufferQueueMap) {
-        for (String key : this.serverToBufferQueueMap.keySet()) {
-          List<Pair<HRegionLocation, Entry>> curQueue = 
this.serverToBufferQueueMap.get(key);
+        for (Map.Entry<String, List<Pair<HRegionLocation, Entry>>> entry:
+              serverToBufferQueueMap.entrySet()) {
+          List<Pair<HRegionLocation, Entry>> curQueue = entry.getValue();
           if (curQueue.size() > maxSize) {
             maxSize = curQueue.size();
             maxQueue = curQueue;
-            maxLocKey = key;
+            maxLocKey = entry.getKey();
           }
         }
         if (maxSize < minBatchSize
@@ -2065,8 +2066,10 @@ public class WALSplitter {
       int curSize = 0;
       List<Pair<HRegionLocation, Entry>> curQueue = null;
       synchronized (this.serverToBufferQueueMap) {
-        for (String locationKey : this.serverToBufferQueueMap.keySet()) {
-          curQueue = this.serverToBufferQueueMap.get(locationKey);
+        for (Map.Entry<String, List<Pair<HRegionLocation, Entry>>> entry :
+                serverToBufferQueueMap.entrySet()) {
+          String locationKey = entry.getKey();
+          curQueue = entry.getValue();
           if (!curQueue.isEmpty()) {
             curSize = curQueue.size();
             curLoc = locationKey;
@@ -2144,8 +2147,9 @@ public class WALSplitter {
           }
         } finally {
           synchronized (writers) {
-            for (String locationKey : writers.keySet()) {
-              RegionServerWriter tmpW = writers.get(locationKey);
+            for (Map.Entry<String, RegionServerWriter> entry : 
writers.entrySet()) {
+              String locationKey = entry.getKey();
+              RegionServerWriter tmpW = entry.getValue();
               try {
                 tmpW.close();
               } catch (IOException ioe) {
@@ -2157,8 +2161,10 @@ public class WALSplitter {
 
           // close connections
           synchronized (this.tableNameToHConnectionMap) {
-            for (TableName tableName : 
this.tableNameToHConnectionMap.keySet()) {
-              HConnection hconn = 
this.tableNameToHConnectionMap.get(tableName);
+            for (Map.Entry<TableName, HConnection> entry :
+                  tableNameToHConnectionMap.entrySet()) {
+              TableName tableName = entry.getKey();
+              HConnection hconn = entry.getValue();
               try {
                 hconn.clearRegionCache();
                 hconn.close();

Reply via email to