Repository: hbase Updated Branches: refs/heads/master d0e4a643a -> 3574757f7
HBASE-18308 Eliminate the findbugs warnings for hbase-server Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3574757f Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3574757f Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3574757f Branch: refs/heads/master Commit: 3574757f74762ba7ba563595d1cda3314312ef8f Parents: d0e4a64 Author: Chia-Ping Tsai <chia7...@gmail.com> Authored: Thu Jul 20 00:35:07 2017 +0800 Committer: Chia-Ping Tsai <chia7...@gmail.com> Committed: Thu Jul 20 00:35:07 2017 +0800 ---------------------------------------------------------------------- .../java/org/apache/hadoop/hbase/LocalHBaseCluster.java | 10 ++++------ .../org/apache/hadoop/hbase/constraint/Constraints.java | 4 ++-- .../hbase/coordination/ZkSplitLogWorkerCoordination.java | 3 ++- .../java/org/apache/hadoop/hbase/mapreduce/JarFinder.java | 3 +++ .../hadoop/hbase/mapreduce/LoadIncrementalHFiles.java | 6 ------ .../java/org/apache/hadoop/hbase/master/DeadServer.java | 5 ----- .../org/apache/hadoop/hbase/regionserver/HRegion.java | 4 ---- .../hbase/replication/regionserver/ReplicationSource.java | 2 +- .../main/java/org/apache/hadoop/hbase/tool/Canary.java | 5 +++-- 9 files changed, 15 insertions(+), 27 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hbase/blob/3574757f/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java index b04e685..2dad81a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java @@ -309,12 +309,10 @@ public class LocalHBaseCluster { */ public HMaster getActiveMaster() { for (JVMClusterUtil.MasterThread mt : masterThreads) { - if (mt.getMaster().isActiveMaster()) { - // Ensure that the current active master is not stopped. - // We don't want to return a stopping master as an active master. - if (mt.getMaster().isActiveMaster() && !mt.getMaster().isStopped()) { - return mt.getMaster(); - } + // Ensure that the current active master is not stopped. + // We don't want to return a stopping master as an active master. + if (mt.getMaster().isActiveMaster() && !mt.getMaster().isStopped()) { + return mt.getMaster(); } } return null; http://git-wip-us.apache.org/repos/asf/hbase/blob/3574757f/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java index 203442a..c6c3688 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java @@ -614,8 +614,8 @@ public final class Constraints { @Override public int compare(Constraint c1, Constraint c2) { // compare the priorities of the constraints stored in their configuration - return Long.valueOf(c1.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY)) - .compareTo(c2.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY)); + return Long.compare(c1.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY), + c2.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY)); } }; http://git-wip-us.apache.org/repos/asf/hbase/blob/3574757f/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java index 70445bd..e4fffa1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; +import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; @@ -572,7 +573,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements getDataSetWatchFailure(path); return; } - data = watcher.getRecoverableZooKeeper().removeMetaData(data); + data = RecoverableZooKeeper.removeMetaData(data); getDataSetWatchSuccess(path, data); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/3574757f/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java index 7d0216a..6dd4039 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java @@ -90,6 +90,9 @@ public class JarFinder { private static void zipDir(File dir, String relativePath, ZipOutputStream zos, boolean start) throws IOException { String[] dirList = dir.list(); + if (dirList == null) { + return; + } for (String aDirList : dirList) { File f = new File(dir, aDirList); if (!f.isHidden()) { http://git-wip-us.apache.org/repos/asf/hbase/blob/3574757f/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index b4b0f0f..9a4cc32 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -1026,12 +1026,6 @@ public class LoadIncrementalHFiles extends Configured implements Tool { protected List<LoadQueueItem> tryAtomicRegionLoad(ClientServiceCallable<byte[]> serviceCallable, final TableName tableName, final byte[] first, final Collection<LoadQueueItem> lqis) throws IOException { - final List<Pair<byte[], String>> famPaths = new ArrayList<>(lqis.size()); - for (LoadQueueItem lqi : lqis) { - if (!unmatchedFamilies.contains(Bytes.toString(lqi.family))) { - famPaths.add(Pair.newPair(lqi.family, lqi.hfilePath.toString())); - } - } try { List<LoadQueueItem> toRetry = new ArrayList<>(); Configuration conf = getConf(); http://git-wip-us.apache.org/repos/asf/hbase/blob/3574757f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java index 34a7633..c394424 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java @@ -134,11 +134,6 @@ public class DeadServer { assert numProcessing >= 0: "Number of dead servers in processing should always be non-negative"; - if (numProcessing < 0) { - LOG.error("Number of dead servers in processing = " + numProcessing - + ". Something went wrong, this should always be non-negative."); - numProcessing = 0; - } if (numProcessing == 0) { processing = false; } } http://git-wip-us.apache.org/repos/asf/hbase/blob/3574757f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index b02b042..7327896 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -2858,7 +2858,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi checkResources(); startRegionOperation(Operation.DELETE); try { - delete.getRow(); // All edits for the given row (across all column families) must happen atomically. doBatchMutate(delete); } finally { @@ -3192,9 +3191,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi * In here we also handle replay of edits on region recover. * @return Change in size brought about by applying <code>batchOp</code> */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="UL_UNRELEASED_LOCK", - justification="Findbugs seems to be confused on this.") - @SuppressWarnings("unchecked") // TODO: This needs a rewrite. Doesn't have to be this long. St.Ack 20160120 private void doMiniBatchMutate(BatchOperation<?> batchOp) throws IOException { boolean replay = batchOp.isInReplay(); http://git-wip-us.apache.org/repos/asf/hbase/blob/3574757f/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 3d4353f..2911f7a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -505,7 +505,7 @@ public class ReplicationSource extends Thread implements ReplicationSourceInterf @Override public int compare(Path o1, Path o2) { - return Long.valueOf(getTS(o1)).compareTo(getTS(o2)); + return Long.compare(getTS(o1), getTS(o2)); } /** http://git-wip-us.apache.org/repos/asf/hbase/blob/3574757f/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java index 3316ec5..94fcb9e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java @@ -1081,10 +1081,11 @@ public final class Canary implements Tool { } } Map<String, AtomicLong> actualReadTableLatency = regionSink.getReadLatencyMap(); - for (String tableName : this.configuredReadTableTimeouts.keySet()) { + for (Map.Entry<String, Long> entry : configuredReadTableTimeouts.entrySet()) { + String tableName = entry.getKey(); if (actualReadTableLatency.containsKey(tableName)) { Long actual = actualReadTableLatency.get(tableName).longValue(); - Long configured = this.configuredReadTableTimeouts.get(tableName); + Long configured = entry.getValue(); LOG.info("Read operation for " + tableName + " took " + actual + " ms. The configured read timeout was " + configured + " ms."); if (actual > configured) {