http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java
index 050d26b..e1c8f87 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java
@@ -136,7 +136,6 @@ public class LongMetric implements DatanodeMetric<Long, 
Long> {
    * @throws ClassCastException   if the specified object's type prevents it
    *                              from being compared to this object.
    */
-  @Override
   public int compareTo(Long o) {
     return Long.compare(this.value, o);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
index efd5fd6..a886084 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
@@ -23,7 +23,7 @@ import com.google.common.base.Preconditions;
 /**
  * SCM Node Metric that is used in the placement classes.
  */
-public class SCMNodeMetric implements DatanodeMetric<SCMNodeStat, Long> {
+public class SCMNodeMetric  implements DatanodeMetric<SCMNodeStat, Long> {
   private SCMNodeStat stat;
 
   /**
@@ -191,7 +191,7 @@ public class SCMNodeMetric implements 
DatanodeMetric<SCMNodeStat, Long> {
    * @throws ClassCastException   if the specified object's type prevents it
    *                              from being compared to this object.
    */
-  @Override
+  //@Override
   public int compareTo(SCMNodeStat o) {
     if (isEqual(o)) {
       return 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
index 3c871d3..962bbb4 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
@@ -52,6 +52,7 @@ public class SCMNodeStat implements NodeStat {
   /**
    * @return the total configured capacity of the node.
    */
+  @Override
   public LongMetric getCapacity() {
     return capacity;
   }
@@ -59,6 +60,7 @@ public class SCMNodeStat implements NodeStat {
   /**
    * @return the total SCM used space on the node.
    */
+  @Override
   public LongMetric getScmUsed() {
     return scmUsed;
   }
@@ -66,6 +68,7 @@ public class SCMNodeStat implements NodeStat {
   /**
    * @return the total remaining space available on the node.
    */
+  @Override
   public LongMetric getRemaining() {
     return remaining;
   }
@@ -77,12 +80,9 @@ public class SCMNodeStat implements NodeStat {
    * @param newUsed in bytes
    * @param newRemaining in bytes
    */
+  @Override
   @VisibleForTesting
   public void set(long newCapacity, long newUsed, long newRemaining) {
-    Preconditions.checkNotNull(newCapacity, "Capacity cannot be null");
-    Preconditions.checkNotNull(newUsed, "used cannot be null");
-    Preconditions.checkNotNull(newRemaining, "remaining cannot be null");
-
     Preconditions.checkArgument(newCapacity >= 0, "Capacity cannot be " +
         "negative.");
     Preconditions.checkArgument(newUsed >= 0, "used space cannot be " +
@@ -101,6 +101,7 @@ public class SCMNodeStat implements NodeStat {
    * @param stat Nodestat.
    * @return SCMNodeStat
    */
+  @Override
   public SCMNodeStat add(NodeStat stat) {
     this.capacity.set(this.getCapacity().get() + stat.getCapacity().get());
     this.scmUsed.set(this.getScmUsed().get() + stat.getScmUsed().get());
@@ -114,6 +115,7 @@ public class SCMNodeStat implements NodeStat {
    * @param stat SCMNodeStat.
    * @return Modified SCMNodeStat
    */
+  @Override
   public SCMNodeStat subtract(NodeStat stat) {
     this.capacity.set(this.getCapacity().get() - stat.getCapacity().get());
     this.scmUsed.set(this.getScmUsed().get() - stat.getScmUsed().get());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java
index 993a986..5a2e2b1 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java
@@ -48,12 +48,13 @@ public class ReplicationActivityStatus implements
     replicationStatusListener = new ReplicationStatusListener();
     chillModeStatusListener = new ChillModeStatusListener();
   }
-
+  @Override
   public boolean isReplicationEnabled() {
     return replicationEnabled.get();
   }
 
   @VisibleForTesting
+  @Override
   public void setReplicationEnabled(boolean enabled) {
     replicationEnabled.set(enabled);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
index 8c11e84..e700ecd 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
@@ -98,6 +98,7 @@ public class ReplicationManager implements Runnable {
     threadFactory.newThread(this).start();
   }
 
+  @Override
   public void run() {
 
     while (running) {
@@ -168,6 +169,7 @@ public class ReplicationManager implements Runnable {
 
         } else if (deficit < 0) {
           //TODO: too many replicas. Not handled yet.
+          LOG.debug("Too many replicas is not handled yet.");
         }
 
       } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
index 996478c..eb6dc0d 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
@@ -22,8 +22,8 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.util.Time;
 
+import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.UUID;
@@ -39,7 +39,7 @@ import java.util.concurrent.locks.ReentrantLock;
  */
 public class CommandQueue {
   // This list is used as default return value.
-  private static final List<SCMCommand> DEFAULT_LIST = new LinkedList<>();
+  private static final List<SCMCommand> DEFAULT_LIST = new ArrayList<>();
   private final Map<UUID, Commands> commandMap;
   private final Lock lock;
   private long commandsInQueue;
@@ -136,7 +136,7 @@ public class CommandQueue {
      * Constructs a Commands class.
      */
     Commands() {
-      commands = new LinkedList<>();
+      commands = new ArrayList<>();
       updateTime = 0;
       readTime = 0;
     }
@@ -182,7 +182,7 @@ public class CommandQueue {
      */
     public List<SCMCommand> getCommands() {
       List<SCMCommand> temp = this.commands;
-      this.commands = new LinkedList<>();
+      this.commands = new ArrayList<>();
       readTime = Time.monotonicNow();
       return temp;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
index a459519..cddd3ae 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
@@ -144,6 +144,8 @@ public class NodeStateManager implements Runnable, 
Closeable {
     executorService = HadoopExecutors.newScheduledThreadPool(1,
         new ThreadFactoryBuilder().setDaemon(true)
             .setNameFormat("SCM Heartbeat Processing Thread - %d").build());
+    //BUG:BUG TODO: The return value is ignored, if an exception is thrown in
+    // the executing funtion, it will be ignored.
     executorService.schedule(this, heartbeatCheckerIntervalMs,
         TimeUnit.MILLISECONDS);
   }
@@ -331,7 +333,7 @@ public class NodeStateManager implements Runnable, 
Closeable {
    * @return list of nodes
    */
   public List<DatanodeDetails> getNodes(NodeState state) {
-    List<DatanodeDetails> nodes = new LinkedList<>();
+    List<DatanodeDetails> nodes = new ArrayList<>();
     nodeStateMap.getNodes(state).forEach(
         uuid -> {
           try {
@@ -352,7 +354,7 @@ public class NodeStateManager implements Runnable, 
Closeable {
    * @return all the managed nodes
    */
   public List<DatanodeDetails> getAllNodes() {
-    List<DatanodeDetails> nodes = new LinkedList<>();
+    List<DatanodeDetails> nodes = new ArrayList<>();
     nodeStateMap.getAllNodes().forEach(
         uuid -> {
           try {
@@ -613,6 +615,8 @@ public class NodeStateManager implements Runnable, 
Closeable {
 
     if (!Thread.currentThread().isInterrupted() &&
         !executorService.isShutdown()) {
+      //BUGBUG: The return future needs to checked here to make sure the
+      // exceptions are handled correctly.
       executorService.schedule(this, heartbeatCheckerIntervalMs,
           TimeUnit.MILLISECONDS);
     } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
index 9625f81..c0f46f1 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
@@ -59,6 +59,7 @@ public class Node2ContainerMap extends 
Node2ObjectsMap<ContainerID> {
    * @param datanodeID   -- Datanode UUID
    * @param containerIDs - List of ContainerIDs.
    */
+  @Override
   public void insertNewDatanode(UUID datanodeID, Set<ContainerID> containerIDs)
       throws SCMException {
     super.insertNewDatanode(datanodeID, containerIDs);
@@ -84,6 +85,7 @@ public class Node2ContainerMap extends 
Node2ObjectsMap<ContainerID> {
   }
 
   @VisibleForTesting
+  @Override
   public int size() {
     return dn2ObjectMap.size();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
index a917e79..a68e2b5 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
@@ -172,7 +172,7 @@ public class NodeStateMap {
   public List<UUID> getNodes(NodeState state) {
     lock.readLock().lock();
     try {
-      return new LinkedList<>(stateMap.get(state));
+      return new ArrayList<>(stateMap.get(state));
     } finally {
       lock.readLock().unlock();
     }
@@ -186,7 +186,7 @@ public class NodeStateMap {
   public List<UUID> getAllNodes() {
     lock.readLock().lock();
     try {
-      return new LinkedList<>(nodeMap.keySet());
+      return new ArrayList<>(nodeMap.keySet());
     } finally {
       lock.readLock().unlock();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
index 5e8d0dc..cf1955d 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.server.ServerUtils;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.utils.MetadataKeyFilters;
 import org.apache.hadoop.utils.MetadataStore;
 import org.apache.hadoop.utils.MetadataStoreBuilder;
 import org.slf4j.Logger;
@@ -94,7 +95,8 @@ public class SCMPipelineManager implements PipelineManager {
       return;
     }
     List<Map.Entry<byte[], byte[]>> pipelines =
-        pipelineStore.getSequentialRangeKVs(null, Integer.MAX_VALUE, null);
+        pipelineStore.getSequentialRangeKVs(null, Integer.MAX_VALUE,
+            (MetadataKeyFilters.MetadataKeyFilter[])null);
 
     for (Map.Entry<byte[], byte[]> entry : pipelines) {
       Pipeline pipeline = Pipeline.getFromProtobuf(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index f141ae5..b59042e 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -58,7 +58,6 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Set;
 import java.util.TreeSet;
@@ -354,7 +353,7 @@ public class SCMClientProtocolServer implements
    */
   public List<DatanodeDetails> queryNode(HddsProtos.NodeState state) {
     Preconditions.checkNotNull(state, "Node Query set cannot be null");
-    return new LinkedList<>(queryNodeState(state));
+    return new ArrayList<>(queryNodeState(state));
   }
 
   @VisibleForTesting

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index 0beceab..77ef9c8 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -89,7 +89,7 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.util.LinkedList;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.stream.Collectors;
 
@@ -225,7 +225,7 @@ public class SCMDatanodeProtocolServer implements
   @Override
   public SCMHeartbeatResponseProto sendHeartbeat(
       SCMHeartbeatRequestProto heartbeat) throws IOException {
-    List<SCMCommandProto> cmdResponses = new LinkedList<>();
+    List<SCMCommandProto> cmdResponses = new ArrayList<>();
     for (SCMCommand cmd : heartbeatDispatcher.dispatch(heartbeat)) {
       cmdResponses.add(getCommandResponse(cmd));
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java
index 6c4f249..6f894e8 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java
@@ -42,7 +42,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 /**
- * Unit tests for {@link HddsServerUtil}
+ * Unit tests for {@link HddsServerUtil}.
  */
 public class TestHddsServerUtils {
   public static final Logger LOG = LoggerFactory.getLogger(
@@ -58,6 +58,7 @@ public class TestHddsServerUtils {
    * Test getting OZONE_SCM_DATANODE_ADDRESS_KEY with port.
    */
   @Test
+  @SuppressWarnings("StringSplitter")
   public void testGetDatanodeAddressWithPort() {
     final String scmHost = "host123:100";
     final Configuration conf = new OzoneConfiguration();
@@ -78,8 +79,8 @@ public class TestHddsServerUtils {
     conf.set(OZONE_SCM_DATANODE_ADDRESS_KEY, scmHost);
     final InetSocketAddress address =
         HddsServerUtil.getScmAddressForDataNodes(conf);
-    assertEquals(address.getHostName(), scmHost);
-    assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT);
+    assertEquals(scmHost, address.getHostName());
+    assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort());
   }
 
   /**
@@ -93,8 +94,8 @@ public class TestHddsServerUtils {
     conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
     final InetSocketAddress address =
         HddsServerUtil.getScmAddressForDataNodes(conf);
-    assertEquals(address.getHostName(), scmHost);
-    assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT);
+    assertEquals(scmHost, address.getHostName());
+    assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort());
   }
 
   /**
@@ -103,6 +104,7 @@ public class TestHddsServerUtils {
    * OZONE_SCM_CLIENT_ADDRESS_KEY should be ignored.
    */
   @Test
+  @SuppressWarnings("StringSplitter")
   public void testDatanodeAddressFallbackToClientWithPort() {
     final String scmHost = "host123:100";
     final Configuration conf = new OzoneConfiguration();
@@ -124,8 +126,8 @@ public class TestHddsServerUtils {
     conf.set(OZONE_SCM_NAMES, scmHost);
     final InetSocketAddress address =
         HddsServerUtil.getScmAddressForDataNodes(conf);
-    assertEquals(address.getHostName(), scmHost);
-    assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT);
+    assertEquals(scmHost, address.getHostName());
+    assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort());
   }
 
   /**
@@ -134,6 +136,7 @@ public class TestHddsServerUtils {
    * defined by OZONE_SCM_NAMES should be ignored.
    */
   @Test
+  @SuppressWarnings("StringSplitter")
   public void testDatanodeAddressFallbackToScmNamesWithPort() {
     final String scmHost = "host123:100";
     final Configuration conf = new OzoneConfiguration();
@@ -141,7 +144,7 @@ public class TestHddsServerUtils {
     final InetSocketAddress address =
         HddsServerUtil.getScmAddressForDataNodes(conf);
     assertEquals(address.getHostName(), scmHost.split(":")[0]);
-    assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT);
+    assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
index 08cbdd7..631283c 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
@@ -48,7 +48,7 @@ import org.mockito.Mockito;
 
 import java.io.File;
 import java.io.IOException;
-import java.util.LinkedList;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.UUID;
 import java.util.concurrent.TimeUnit;
@@ -378,12 +378,9 @@ public class TestNodeManager {
    * Check for NPE when datanodeDetails is passed null for sendHeartbeat.
    *
    * @throws IOException
-   * @throws InterruptedException
-   * @throws TimeoutException
    */
   @Test
-  public void testScmCheckForErrorOnNullDatanodeDetails() throws IOException,
-      InterruptedException, TimeoutException {
+  public void testScmCheckForErrorOnNullDatanodeDetails() throws IOException {
     try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
       nodeManager.processHeartbeat(null);
     } catch (NullPointerException npe) {
@@ -588,7 +585,7 @@ public class TestNodeManager {
    */
   private List<DatanodeDetails> createNodeSet(SCMNodeManager nodeManager, int
       count) {
-    List<DatanodeDetails> list = new LinkedList<>();
+    List<DatanodeDetails> list = new ArrayList<>();
     for (int x = 0; x < count; x++) {
       DatanodeDetails datanodeDetails = TestUtils
           .createRandomDatanodeAndRegister(nodeManager);
@@ -943,7 +940,7 @@ public class TestNodeManager {
   }
 
   @Test
-  public void testHandlingSCMCommandEvent() {
+  public void testHandlingSCMCommandEvent() throws IOException {
     OzoneConfiguration conf = getConf();
     conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
         100, TimeUnit.MILLISECONDS);
@@ -974,6 +971,7 @@ public class TestNodeManager {
           .assertEquals(command.get(0).getClass(), 
CloseContainerCommand.class);
     } catch (IOException e) {
       e.printStackTrace();
+      throw  e;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
index 623fc16..e12c643 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
@@ -150,22 +150,22 @@ public class TestSCMNodeStorageStatMap {
         path, reportCapacity, reportScmUsed, reportRemaining, null);
     StorageReportResult result =
         map.processNodeReport(key, TestUtils.createNodeReport(storageReport));
-    Assert.assertEquals(result.getStatus(),
-        SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL);
+    Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL,
+        result.getStatus());
     StorageContainerDatanodeProtocolProtos.NodeReportProto.Builder nrb =
         NodeReportProto.newBuilder();
     StorageReportProto srb = reportSet.iterator().next().getProtoBufMessage();
     reportList.add(srb);
     result = map.processNodeReport(key, 
TestUtils.createNodeReport(reportList));
-    Assert.assertEquals(result.getStatus(),
-        SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL);
+    Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL,
+        result.getStatus());
 
     reportList.add(TestUtils
         .createStorageReport(UUID.randomUUID(), path, reportCapacity,
             reportCapacity, 0, null));
     result = map.processNodeReport(key, 
TestUtils.createNodeReport(reportList));
-    Assert.assertEquals(result.getStatus(),
-        SCMNodeStorageStatMap.ReportStatus.STORAGE_OUT_OF_SPACE);
+    
Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.STORAGE_OUT_OF_SPACE,
+        result.getStatus());
     // Mark a disk failed 
     StorageReportProto srb2 = StorageReportProto.newBuilder()
         .setStorageUuid(UUID.randomUUID().toString())
@@ -174,8 +174,8 @@ public class TestSCMNodeStorageStatMap {
     reportList.add(srb2);
     nrb.addAllStorageReport(reportList);
     result = map.processNodeReport(key, nrb.addStorageReport(srb).build());
-    Assert.assertEquals(result.getStatus(),
-        SCMNodeStorageStatMap.ReportStatus.FAILED_AND_OUT_OF_SPACE_STORAGE);
+    Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus
+        .FAILED_AND_OUT_OF_SPACE_STORAGE, result.getStatus());
 
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
index ec1d527..77ed907 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
@@ -51,7 +51,7 @@ public class TestNode2ContainerMap {
     for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) {
       TreeSet<ContainerID> currentSet = new TreeSet<>();
       for (int cnIndex = 1; cnIndex <= CONTAINER_COUNT; cnIndex++) {
-        long currentCnIndex = (dnIndex * CONTAINER_COUNT) + cnIndex;
+        long currentCnIndex = (long) (dnIndex * CONTAINER_COUNT) + cnIndex;
         currentSet.add(new ContainerID(currentCnIndex));
       }
       testData.put(UUID.randomUUID(), currentSet);
@@ -115,8 +115,8 @@ public class TestNode2ContainerMap {
     map.insertNewDatanode(key, values);
     Assert.assertTrue(map.isKnownDatanode(key));
     ReportResult result = map.processReport(key, values);
-    Assert.assertEquals(result.getStatus(),
-        ReportResult.ReportStatus.ALL_IS_WELL);
+    Assert.assertEquals(ReportResult.ReportStatus.ALL_IS_WELL,
+        result.getStatus());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
index 7150d1b..328ba30 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
@@ -19,9 +19,8 @@ package org.apache.hadoop.ozone.container.placement;
 
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
-import org.junit.Rule;
 import org.junit.Test;
-import org.junit.rules.ExpectedException;
+
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
@@ -30,20 +29,18 @@ import static org.junit.Assert.assertTrue;
  * Tests that test Metrics that support placement.
  */
 public class TestDatanodeMetrics {
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
   @Test
   public void testSCMNodeMetric() {
     SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L);
     assertEquals((long) stat.getCapacity().get(), 100L);
-    assertEquals((long) stat.getScmUsed().get(), 10L);
-    assertEquals((long) stat.getRemaining().get(), 90L);
+    assertEquals(10L, (long) stat.getScmUsed().get());
+    assertEquals(90L, (long) stat.getRemaining().get());
     SCMNodeMetric metric = new SCMNodeMetric(stat);
 
     SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L);
-    assertEquals((long) stat.getCapacity().get(), 100L);
-    assertEquals((long) stat.getScmUsed().get(), 10L);
-    assertEquals((long) stat.getRemaining().get(), 90L);
+    assertEquals(100L, (long) stat.getCapacity().get());
+    assertEquals(10L, (long) stat.getScmUsed().get());
+    assertEquals(90L, (long) stat.getRemaining().get());
 
     SCMNodeMetric newMetric = new SCMNodeMetric(newStat);
     assertTrue(metric.isEqual(newMetric.get()));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
index 136a684..353996b 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
@@ -124,6 +124,7 @@ public class ObjectStore {
    * @return String - Ozone Volume name.
    * @throws IOException - Throws if the s3Bucket does not exist.
    */
+  @SuppressWarnings("StringSplitter")
   public String getOzoneVolumeName(String s3BucketName) throws IOException {
     String mapping = getOzoneBucketMapping(s3BucketName);
     return mapping.split("/")[0];
@@ -136,6 +137,7 @@ public class ObjectStore {
    * @return String - Ozone bucket Name.
    * @throws IOException - Throws if the s3bucket does not exist.
    */
+  @SuppressWarnings("StringSplitter")
   public String getOzoneBucketName(String s3BucketName) throws IOException {
     String mapping = getOzoneBucketMapping(s3BucketName);
     return mapping.split("/")[1];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index ea002ec..98481e1 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -610,6 +610,7 @@ public class RpcClient implements ClientProtocol {
   }
 
   @Override
+  @SuppressWarnings("StringSplitter")
   public String getOzoneVolumeName(String s3BucketName) throws IOException {
     String mapping = getOzoneBucketMapping(s3BucketName);
     return mapping.split("/")[0];
@@ -617,6 +618,7 @@ public class RpcClient implements ClientProtocol {
   }
 
   @Override
+  @SuppressWarnings("StringSplitter")
   public String getOzoneBucketName(String s3BucketName) throws IOException {
     String mapping = getOzoneBucketMapping(s3BucketName);
     return mapping.split("/")[1];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
 
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
index 9850778..9e60e4e 100644
--- 
a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
+++ 
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
@@ -40,11 +40,12 @@ import static org.junit.Assert.assertThat;
 
 /**
  * This test class verifies the parsing of SCM endpoint config settings. The
- * parsing logic is in {@link 
org.apache.hadoop.hdds.scm.client.HddsClientUtils}.
+ * parsing logic is in
+ * {@link org.apache.hadoop.hdds.scm.client.HddsClientUtils}.
  */
 public class TestHddsClientUtils {
   @Rule
-  public Timeout timeout = new Timeout(300_000);
+  public Timeout timeout = new Timeout(300000);
 
   @Rule
   public ExpectedException thrown= ExpectedException.none();
@@ -114,13 +115,14 @@ public class TestHddsClientUtils {
     final String scmHost = "host123";
     final Configuration conf = new OzoneConfiguration();
     conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
-    final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients(
+    final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients(
         conf);
-    assertEquals(address.getHostName(), scmHost);
-    assertEquals(address.getPort(), OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT);
+    assertEquals(scmHost, address.getHostName());
+    assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort());
   }
 
   @Test
+  @SuppressWarnings("StringSplitter")
   public void testBlockClientFallbackToClientWithPort() {
     // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY is undefined it should
     // fallback to OZONE_SCM_CLIENT_ADDRESS_KEY.
@@ -132,8 +134,8 @@ public class TestHddsClientUtils {
     conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
     final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients(
         conf);
-    assertEquals(address.getHostName(), scmHost.split(":")[0]);
-    assertEquals(address.getPort(), OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT);
+    assertEquals(scmHost.split(":")[0], address.getHostName());
+    assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort());
   }
 
   @Test
@@ -143,13 +145,14 @@ public class TestHddsClientUtils {
     final String scmHost = "host456";
     final Configuration conf = new OzoneConfiguration();
     conf.set(OZONE_SCM_NAMES, scmHost);
-    final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients(
+    final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients(
         conf);
-    assertEquals(address.getHostName(), scmHost);
-    assertEquals(address.getPort(), OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT);
+    assertEquals(scmHost, address.getHostName());
+    assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort());
   }
 
   @Test
+  @SuppressWarnings("StringSplitter")
   public void testBlockClientFallbackToScmNamesWithPort() {
     // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY
     // are undefined it should fallback to OZONE_SCM_NAMES.
@@ -159,10 +162,10 @@ public class TestHddsClientUtils {
     final String scmHost = "host456:200";
     final Configuration conf = new OzoneConfiguration();
     conf.set(OZONE_SCM_NAMES, scmHost);
-    final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients(
+    final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients(
         conf);
-    assertEquals(address.getHostName(), scmHost.split(":")[0]);
-    assertEquals(address.getPort(), OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT);
+    assertEquals(scmHost.split(":")[0], address.getHostName());
+    assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort());
   }
 
   @Test
@@ -172,12 +175,13 @@ public class TestHddsClientUtils {
     final String scmHost = "host456";
     final Configuration conf = new OzoneConfiguration();
     conf.set(OZONE_SCM_NAMES, scmHost);
-    final InetSocketAddress address =HddsUtils.getScmAddressForClients(conf);
-    assertEquals(address.getHostName(), scmHost);
-    assertEquals(address.getPort(), OZONE_SCM_CLIENT_PORT_DEFAULT);
+    final InetSocketAddress address = HddsUtils.getScmAddressForClients(conf);
+    assertEquals(scmHost, address.getHostName());
+    assertEquals(OZONE_SCM_CLIENT_PORT_DEFAULT, address.getPort());
   }
 
   @Test
+  @SuppressWarnings("StringSplitter")
   public void testClientFallbackToScmNamesWithPort() {
     // When OZONE_SCM_CLIENT_ADDRESS_KEY is undefined, it should fallback
     // to OZONE_SCM_NAMES.
@@ -187,9 +191,9 @@ public class TestHddsClientUtils {
     final String scmHost = "host456:300";
     final Configuration conf = new OzoneConfiguration();
     conf.set(OZONE_SCM_NAMES, scmHost);
-    final InetSocketAddress address =HddsUtils.getScmAddressForClients(conf);
-    assertEquals(address.getHostName(), scmHost.split(":")[0]);
-    assertEquals(address.getPort(), OZONE_SCM_CLIENT_PORT_DEFAULT);
+    final InetSocketAddress address = HddsUtils.getScmAddressForClients(conf);
+    assertEquals(scmHost.split(":")[0], address.getHostName());
+    assertEquals(OZONE_SCM_CLIENT_PORT_DEFAULT, address.getPort());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java
index de75a05..2584eb5 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java
@@ -34,6 +34,7 @@ import java.util.HashMap;
 /**
  * This helper class keeps a map of all user and their permissions.
  */
+@SuppressWarnings("ProtocolBufferOrdinal")
 public class OmOzoneAclMap {
   // per Acl Type user:rights map
   private ArrayList<Map<String, OzoneAclRights>> aclMaps;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
index cf2810f..26798e9 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.ozone.web.response;
 
 import java.io.IOException;
-import java.util.LinkedList;
+import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.hdds.protocol.StorageType;
@@ -85,7 +85,7 @@ public class BucketInfo implements Comparable<BucketInfo> {
    * Default constructor for BucketInfo.
    */
   public BucketInfo() {
-    acls = new LinkedList<OzoneAcl>();
+    acls = new ArrayList<>();
   }
 
   /**
@@ -318,7 +318,7 @@ public class BucketInfo implements Comparable<BucketInfo> {
    * for the Json serialization.
    */
   @JsonFilter(BUCKET_INFO)
-  class MixIn {
+  static class MixIn {
 
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java
 
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java
index f45883b..e364e7d 100644
--- 
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java
+++ 
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java
@@ -18,17 +18,19 @@
 
 package org.apache.hadoop.ozone.web;
 
-
-import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.ozone.web.response.BucketInfo;
+import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.web.response.BucketInfo;
 import org.junit.Test;
 
 import java.io.IOException;
-import java.util.LinkedList;
+import java.util.ArrayList;
 import java.util.List;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
 /**
  * Test Ozone Bucket Info operation.
  */
@@ -38,7 +40,7 @@ public class TestBucketInfo {
     BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
     String bucketInfoString = bucketInfo.toJsonString();
     BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
-    assert(bucketInfo.equals(newBucketInfo));
+    assertEquals(bucketInfo, newBucketInfo);
   }
 
   @Test
@@ -46,7 +48,7 @@ public class TestBucketInfo {
     BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
     String bucketInfoString = bucketInfo.toDBString();
     BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
-    assert(bucketInfo.equals(newBucketInfo));
+    assertEquals(bucketInfo, newBucketInfo);
   }
 
   @Test
@@ -54,18 +56,17 @@ public class TestBucketInfo {
     BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
     String bucketInfoString = bucketInfo.toDBString();
     BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
-    assert(bucketInfo.equals(newBucketInfo));
-    List<OzoneAcl> aclList = new LinkedList<>();
+    assertEquals(bucketInfo, newBucketInfo);
+    List<OzoneAcl> aclList = new ArrayList<>();
 
     aclList.add(OzoneAcl.parseAcl("user:bilbo:r"));
     aclList.add(OzoneAcl.parseAcl("user:samwise:rw"));
     newBucketInfo.setAcls(aclList);
 
-    assert(newBucketInfo.getAcls() != null);
-    assert(newBucketInfo.getAcls().size() == 2);
+    assertNotNull(newBucketInfo.getAcls());
+    assertEquals(2, newBucketInfo.getAcls().size());
   }
 
-
   @Test
   public void testBucketInfoVersionAndType() throws IOException {
     BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
@@ -75,7 +76,7 @@ public class TestBucketInfo {
     String bucketInfoString = bucketInfo.toDBString();
 
     BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
-    assert(bucketInfo.equals(newBucketInfo));
+    assertEquals(bucketInfo, newBucketInfo);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestQuota.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestQuota.java 
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestQuota.java
index d777d0c..ba4a5ac 100644
--- 
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestQuota.java
+++ 
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestQuota.java
@@ -1,19 +1,18 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
  * http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
  */
 
 package org.apache.hadoop.ozone.web;
@@ -82,29 +81,29 @@ public class TestQuota {
   @Test
   public void testVerifyQuota() {
     OzoneQuota qt = OzoneQuota.parseQuota("10TB");
-    assertEquals(qt.getSize(), 10);
-    assertEquals(qt.getUnit(), OzoneQuota.Units.TB);
-    assertEquals(qt.sizeInBytes(), 10L * (1024L * 1024L * 1024L * 1024L));
+    assertEquals(10, qt.getSize());
+    assertEquals(OzoneQuota.Units.TB, qt.getUnit());
+    assertEquals(10L * (1024L * 1024L * 1024L * 1024L), qt.sizeInBytes());
 
     qt = OzoneQuota.parseQuota("10MB");
-    assertEquals(qt.getSize(), 10);
-    assertEquals(qt.getUnit(), OzoneQuota.Units.MB);
-    assertEquals(qt.sizeInBytes(), 10L * (1024L * 1024L));
+    assertEquals(10, qt.getSize());
+    assertEquals(OzoneQuota.Units.MB, qt.getUnit());
+    assertEquals(10L * (1024L * 1024L), qt.sizeInBytes());
 
     qt = OzoneQuota.parseQuota("10GB");
-    assertEquals(qt.getSize(), 10);
-    assertEquals(qt.getUnit(), OzoneQuota.Units.GB);
-    assertEquals(qt.sizeInBytes(), 10L * (1024L * 1024L * 1024L));
+    assertEquals(10, qt.getSize());
+    assertEquals(OzoneQuota.Units.GB, qt.getUnit());
+    assertEquals(10L * (1024L * 1024L * 1024L), qt.sizeInBytes());
 
     qt = OzoneQuota.parseQuota("10BYTES");
-    assertEquals(qt.getSize(), 10);
-    assertEquals(qt.getUnit(), OzoneQuota.Units.BYTES);
-    assertEquals(qt.sizeInBytes(), 10L);
+    assertEquals(10, qt.getSize());
+    assertEquals(OzoneQuota.Units.BYTES, qt.getUnit());
+    assertEquals(10L, qt.sizeInBytes());
 
     OzoneQuota emptyQuota = new OzoneQuota();
-    assertEquals(emptyQuota.sizeInBytes(), -1L);
-    assertEquals(emptyQuota.getSize(), 0);
-    assertEquals(emptyQuota.getUnit(), OzoneQuota.Units.UNDEFINED);
+    assertEquals(-1L, emptyQuota.sizeInBytes());
+    assertEquals(0, emptyQuota.getSize());
+    assertEquals(OzoneQuota.Units.UNDEFINED, emptyQuota.getUnit());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
index 66bdb5b..4f8943e 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
@@ -23,8 +23,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .ContainerWithPipeline;
+import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.junit.AfterClass;
@@ -36,11 +35,12 @@ import java.io.IOException;
 import java.util.Set;
 import java.util.concurrent.TimeoutException;
 
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
-    .ReplicationFactor.THREE;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
-    .ReplicationType.RATIS;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS;
 
+/**
+ * Tests for Pipeline Closing.
+ */
 public class TestPipelineClose {
 
   private static MiniOzoneCluster cluster;
@@ -88,7 +88,6 @@ public class TestPipelineClose {
     }
   }
 
-
   @Test
   public void testPipelineCloseWithClosedContainer() throws IOException {
     Set<ContainerID> set = pipelineManager
@@ -112,8 +111,8 @@ public class TestPipelineClose {
     pipelineManager.finalizePipeline(ratisContainer1.getPipeline().getId());
     Pipeline pipeline1 = pipelineManager
         .getPipeline(ratisContainer1.getPipeline().getId());
-    Assert.assertEquals(pipeline1.getPipelineState(),
-        Pipeline.PipelineState.CLOSED);
+    Assert.assertEquals(Pipeline.PipelineState.CLOSED,
+        pipeline1.getPipelineState());
     pipelineManager.removePipeline(pipeline1.getId());
     for (DatanodeDetails dn : ratisContainer1.getPipeline().getNodes()) {
       // Assert that the pipeline has been removed from Node2PipelineMap as 
well
@@ -131,12 +130,12 @@ public class TestPipelineClose {
 
     ContainerID cId2 = ratisContainer2.getContainerInfo().containerID();
     pipelineManager.finalizePipeline(ratisContainer2.getPipeline().getId());
-    Assert.assertEquals(
-        pipelineManager.getPipeline(ratisContainer2.getPipeline().getId())
-            .getPipelineState(), Pipeline.PipelineState.CLOSED);
+    Assert.assertEquals(Pipeline.PipelineState.CLOSED,
+        pipelineManager.getPipeline(
+            ratisContainer2.getPipeline().getId()).getPipelineState());
     Pipeline pipeline2 = pipelineManager
         .getPipeline(ratisContainer2.getPipeline().getId());
-    Assert.assertEquals(pipeline2.getPipelineState(),
-        Pipeline.PipelineState.CLOSED);
+    Assert.assertEquals(Pipeline.PipelineState.CLOSED,
+        pipeline2.getPipelineState());
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java
new file mode 100644
index 0000000..f685b17
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+/**
+ * Package info tests.
+ */
+package org.apache.hadoop.hdds.scm.pipeline;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
index 9982da4..ae55746 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
@@ -29,7 +29,15 @@ import java.io.IOException;
 import java.util.List;
 import java.util.function.Consumer;
 
-public class OzoneTestUtils {
+/**
+ * Helper class for Tests.
+ */
+public final class OzoneTestUtils {
+  /**
+   * Never Constructed.
+   */
+  private OzoneTestUtils() {
+  }
 
   /**
    * Close containers which contain the blocks listed in
@@ -55,7 +63,7 @@ public class OzoneTestUtils {
             .getContainer(ContainerID.valueof(
                 blockID.getContainerID())).isOpen());
       } catch (IOException e) {
-        e.printStackTrace();
+        throw new AssertionError("Failed to close the container", e);
       }
     }, omKeyLocationInfoGroups);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
index 0051ecb..c52490f 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
@@ -40,12 +40,11 @@ import 
org.apache.hadoop.ozone.web.interfaces.StorageHandler;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.utils.MetadataKeyFilters;
 import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
-import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
 import org.apache.hadoop.utils.MetadataStore;
 
 import java.io.IOException;
 import java.io.OutputStream;
-import java.util.LinkedList;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -83,8 +82,8 @@ public class TestStorageContainerManagerHelper {
     storageHandler.createVolume(createVolumeArgs);
 
     BucketArgs bucketArgs = new BucketArgs(bucket, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setAddAcls(new ArrayList<>());
+    bucketArgs.setRemoveAcls(new ArrayList<>());
     bucketArgs.setStorageType(StorageType.DISK);
     storageHandler.createBucket(bucketArgs);
 
@@ -144,9 +143,6 @@ public class TestStorageContainerManagerHelper {
   public List<Long> getAllBlocks(Long containeID) throws IOException {
     List<Long> allBlocks = Lists.newArrayList();
     MetadataStore meta = getContainerMetadata(containeID);
-    MetadataKeyFilter filter =
-        (preKey, currentKey, nextKey) -> !DFSUtil.bytes2String(currentKey)
-            .startsWith(OzoneConsts.DELETING_KEY_PREFIX);
     List<Map.Entry<byte[], byte[]>> kvs =
         meta.getRangeKVs(null, Integer.MAX_VALUE,
             MetadataKeyFilters.getNormalKeyFilter());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
index 935423d..405ce8e 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
@@ -61,6 +61,8 @@ import java.util.concurrent.TimeUnit;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 /**
  * Tests Close Container Exception handling by Ozone Client.
  */
@@ -121,7 +123,8 @@ public class TestCloseContainerHandlingByClient {
     OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
     // write data more than 1 chunk
     byte[] data = ContainerTestHelper
-        .getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes();
+        .getFixedLengthString(keyString, chunkSize + chunkSize / 2)
+        .getBytes(UTF_8);
     key.write(data);
 
     Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
@@ -141,9 +144,9 @@ public class TestCloseContainerHandlingByClient {
     Assert.assertEquals(2 * data.length, keyInfo.getDataSize());
 
     // Written the same data twice
-    String dataString = new String(data);
-    dataString.concat(dataString);
-    validateData(keyName, dataString.getBytes());
+    String dataString = new String(data, UTF_8);
+    dataString = dataString.concat(dataString);
+    validateData(keyName, dataString.getBytes(UTF_8));
   }
 
   @Test
@@ -152,7 +155,8 @@ public class TestCloseContainerHandlingByClient {
     OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
     // write data more than 1 chunk
     byte[] data = ContainerTestHelper
-        .getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes();
+        .getFixedLengthString(keyString, chunkSize + chunkSize / 2)
+        .getBytes(UTF_8);
     key.write(data);
 
     Assert.assertTrue(key.getOutputStream() instanceof ChunkGroupOutputStream);
@@ -184,7 +188,7 @@ public class TestCloseContainerHandlingByClient {
     // write data more than 1 chunk
     byte[] data =
         ContainerTestHelper.getFixedLengthString(keyString, (3 * blockSize))
-            .getBytes();
+            .getBytes(UTF_8);
     Assert.assertEquals(data.length, 3 * blockSize);
     key.write(data);
 
@@ -199,7 +203,7 @@ public class TestCloseContainerHandlingByClient {
     // write 1 more block worth of data. It will fail and new block will be
     // allocated
     key.write(ContainerTestHelper.getFixedLengthString(keyString, blockSize)
-        .getBytes());
+        .getBytes(UTF_8));
 
     key.close();
     // read the key from OM again and match the length.The length will still
@@ -232,13 +236,13 @@ public class TestCloseContainerHandlingByClient {
     Assert.assertEquals(4, groupOutputStream.getStreamEntries().size());
     String dataString =
         ContainerTestHelper.getFixedLengthString(keyString, (2 * blockSize));
-    byte[] data = dataString.getBytes();
+    byte[] data = dataString.getBytes(UTF_8);
     key.write(data);
     // 3 block are completely written to the DataNode in 3 blocks.
     // Data of length half of chunkSize resides in the chunkOutput stream 
buffer
     String dataString2 =
         ContainerTestHelper.getFixedLengthString(keyString, chunkSize * 1 / 2);
-    key.write(dataString2.getBytes());
+    key.write(dataString2.getBytes(UTF_8));
     //get the name of a valid container
     OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
         .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
@@ -257,9 +261,9 @@ public class TestCloseContainerHandlingByClient {
     // closeContainerException and remaining data in the chunkOutputStream
     // buffer will be copied into a different allocated block and will be
     // committed.
-    Assert.assertEquals(dataString.concat(dataString2).getBytes().length,
+    Assert.assertEquals(dataString.concat(dataString2).getBytes(UTF_8).length,
         keyInfo.getDataSize());
-    validateData(keyName, dataString.concat(dataString2).getBytes());
+    validateData(keyName, dataString.concat(dataString2).getBytes(UTF_8));
   }
 
   @Test
@@ -274,7 +278,8 @@ public class TestCloseContainerHandlingByClient {
     Assert.assertEquals(4, groupOutputStream.getStreamEntries().size());
     // write data 3 blocks and one more chunk
     byte[] writtenData =
-        ContainerTestHelper.getFixedLengthString(keyString, keyLen).getBytes();
+        ContainerTestHelper.getFixedLengthString(keyString, keyLen)
+            .getBytes(UTF_8);
     byte[] data = Arrays.copyOfRange(writtenData, 0, 3 * blockSize + 
chunkSize);
     Assert.assertEquals(data.length, 3 * blockSize + chunkSize);
     key.write(data);
@@ -367,8 +372,8 @@ public class TestCloseContainerHandlingByClient {
           .isContainerPresent(cluster, containerID, dn))) {
         for (DatanodeDetails datanodeDetails : datanodes) {
           GenericTestUtils.waitFor(() -> ContainerTestHelper
-                  .isContainerClosed(cluster, containerID, datanodeDetails), 
500,
-              15 * 1000);
+                  .isContainerClosed(cluster, containerID, datanodeDetails),
+              500, 15 * 1000);
           //double check if it's really closed
           // (waitFor also throws an exception)
           Assert.assertTrue(ContainerTestHelper
@@ -395,7 +400,7 @@ public class TestCloseContainerHandlingByClient {
     Assert.assertEquals(2, groupOutputStream.getStreamEntries().size());
     String dataString =
         ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
-    byte[] data = dataString.getBytes();
+    byte[] data = dataString.getBytes(UTF_8);
     key.write(data);
     List<OmKeyLocationInfo> locationInfos =
         new ArrayList<>(groupOutputStream.getLocationInfoList());
@@ -411,7 +416,7 @@ public class TestCloseContainerHandlingByClient {
     waitForContainerClose(keyName, key, HddsProtos.ReplicationType.RATIS);
     dataString =
         ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
-    data = dataString.getBytes();
+    data = dataString.getBytes(UTF_8);
     key.write(data);
     Assert.assertEquals(2, groupOutputStream.getStreamEntries().size());
 
@@ -443,7 +448,8 @@ public class TestCloseContainerHandlingByClient {
     String keyName = "ratis";
     OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
     byte[] data = ContainerTestHelper
-        .getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes();
+        .getFixedLengthString(keyString, chunkSize + chunkSize / 2)
+        .getBytes(UTF_8);
     key.write(data);
 
     //get the name of a valid container
@@ -462,9 +468,9 @@ public class TestCloseContainerHandlingByClient {
     // updated correctly in OzoneManager once the steam is closed
     key.close();
     OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
-    String dataString = new String(data);
-    dataString.concat(dataString);
+    String dataString = new String(data, UTF_8);
+    dataString = dataString.concat(dataString);
     Assert.assertEquals(2 * data.length, keyInfo.getDataSize());
-    validateData(keyName, dataString.getBytes());
+    validateData(keyName, dataString.getBytes(UTF_8));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/package-info.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/package-info.java
new file mode 100644
index 0000000..84eb8dd
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+/**
+ * Test utils for Ozone.
+ */
+package org.apache.hadoop.ozone;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java
index 290e834..baf93a2 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.TestOzoneHelper;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.BeforeClass;
 import org.junit.AfterClass;
@@ -158,6 +159,7 @@ public class TestOzoneVolumes extends TestOzoneHelper {
    *
    * @throws IOException
    */
+  @Ignore("Test is ignored for time being, to be enabled after security.")
   public void testGetVolumesByUser() throws IOException {
     testGetVolumesByUser(port);
   }
@@ -167,6 +169,7 @@ public class TestOzoneVolumes extends TestOzoneHelper {
    *
    * @throws IOException
    */
+  @Ignore("Test is ignored for time being, to be enabled after security.")
   public void testGetVolumesOfAnotherUser() throws IOException {
     super.testGetVolumesOfAnotherUser(port);
   }
@@ -177,6 +180,7 @@ public class TestOzoneVolumes extends TestOzoneHelper {
    *
    * @throws IOException
    */
+  @Ignore("Test is ignored for time being, to be enabled after security.")
   public void testGetVolumesOfAnotherUserShouldFail() throws IOException {
     super.testGetVolumesOfAnotherUserShouldFail(port);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/package-info.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/package-info.java
new file mode 100644
index 0000000..91a013c
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+/**
+ * REST client tests.
+ */
+package org.apache.hadoop.ozone.web.client;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/package-info.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/package-info.java
new file mode 100644
index 0000000..fba9a39
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+/**
+ * Rest Client Tests.
+ */
+package org.apache.hadoop.ozone.web;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java
 
b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java
index c6625fd..8f68cc4 100644
--- 
a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java
+++ 
b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java
@@ -26,7 +26,7 @@ import java.io.IOException;
 import java.nio.file.DirectoryNotEmptyException;
 import java.nio.file.FileAlreadyExistsException;
 import java.nio.file.NoSuchFileException;
-import java.util.LinkedList;
+import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.hdds.protocol.StorageType;
@@ -211,7 +211,7 @@ public abstract class BucketProcessTemplate {
         args.getHeaders().getRequestHeader(Header.OZONE_ACLS);
     List<String> filteredSet = null;
     if (aclStrings != null) {
-      filteredSet = new LinkedList<>();
+      filteredSet = new ArrayList<>();
       for (String s : aclStrings) {
         if (s.startsWith(tag)) {
           filteredSet.add(s.replaceFirst(tag, ""));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java
 
b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java
index 9c115a8..836c03a 100644
--- 
a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java
+++ 
b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/interfaces/StorageHandler.java
@@ -312,5 +312,6 @@ public interface StorageHandler extends Closeable{
   /**
    * Closes all the opened resources.
    */
+  @Override
   void close();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/TestErrorCode.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/TestErrorCode.java
 
b/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/TestErrorCode.java
index abb61bb..291ffd3 100644
--- 
a/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/TestErrorCode.java
+++ 
b/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/TestErrorCode.java
@@ -37,9 +37,9 @@ public class TestErrorCode {
     OzoneException e = ErrorTable
         .newError(ErrorTable.ACCESS_DENIED, getRequestID(), "/test/path",
                   "localhost");
-    assertEquals(e.getHostID(), "localhost");
-    assertEquals(e.getShortMessage(),
-                 ErrorTable.ACCESS_DENIED.getShortMessage());
+    assertEquals("localhost", e.getHostID());
+    assertEquals(ErrorTable.ACCESS_DENIED.getShortMessage(),
+        e.getShortMessage());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/package-info.java
 
b/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/package-info.java
new file mode 100644
index 0000000..4ebe859
--- /dev/null
+++ 
b/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+/**
+ * Tests the REST error codes.
+ */
+package org.apache.hadoop.ozone.web;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
index be44bce..b99b98e 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
@@ -257,6 +257,7 @@ public class BucketManagerImpl implements BucketManager {
    * @param bucketName - Name of the bucket.
    * @throws IOException - on Failure.
    */
+  @Override
   public void deleteBucket(String volumeName, String bucketName)
       throws IOException {
     Preconditions.checkNotNull(volumeName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index cb7ca1f..0bfbc1f 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -22,7 +22,6 @@ import com.google.common.collect.Lists;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.server.ServerUtils;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -117,7 +116,7 @@ public class OmMetadataManagerImpl implements 
OMMetadataManager {
   public OmMetadataManagerImpl(OzoneConfiguration conf) throws IOException {
     File metaDir = OmUtils.getOmDbDir(conf);
     this.lock = new OzoneManagerLock(conf);
-    this.openKeyExpireThresholdMS = 1000 * conf.getInt(
+    this.openKeyExpireThresholdMS = 1000L * conf.getInt(
         OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS,
         OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index da56850..bc24a50 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -989,6 +989,7 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
    * @param bucket - Name of the bucket.
    * @throws IOException
    */
+  @Override
   public void deleteBucket(String volume, String bucket) throws IOException {
     Map<String, String> auditMap = buildAuditMap(volume);
     auditMap.put(OzoneConsts.BUCKET, bucket);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java
index acc1634..9aab823 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java
@@ -66,6 +66,7 @@ public class ServiceListJSONServlet  extends HttpServlet  {
 
   private transient OzoneManager om;
 
+  @Override
   public void init() throws ServletException {
     this.om = (OzoneManager) getServletContext()
         .getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
index c232bf1..b948acc 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
@@ -35,7 +35,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.util.LinkedList;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
@@ -73,7 +73,7 @@ public class VolumeManagerImpl implements VolumeManager {
     // Get the volume list
     byte[] dbUserKey = metadataManager.getUserKey(owner);
     byte[] volumeList  = metadataManager.getUserTable().get(dbUserKey);
-    List<String> prevVolList = new LinkedList<>();
+    List<String> prevVolList = new ArrayList<>();
     if (volumeList != null) {
       VolumeList vlist = VolumeList.parseFrom(volumeList);
       prevVolList.addAll(vlist.getVolumeNamesList());
@@ -98,7 +98,7 @@ public class VolumeManagerImpl implements VolumeManager {
     // Get the volume list
     byte[] dbUserKey = metadataManager.getUserKey(owner);
     byte[] volumeList  = metadataManager.getUserTable().get(dbUserKey);
-    List<String> prevVolList = new LinkedList<>();
+    List<String> prevVolList = new ArrayList<>();
     if (volumeList != null) {
       VolumeList vlist = VolumeList.parseFrom(volumeList);
       prevVolList.addAll(vlist.getVolumeNamesList());
@@ -140,7 +140,7 @@ public class VolumeManagerImpl implements VolumeManager {
 
       try(WriteBatch batch = new WriteBatch()) {
         // Write the vol info
-        List<HddsProtos.KeyValue> metadataList = new LinkedList<>();
+        List<HddsProtos.KeyValue> metadataList = new ArrayList<>();
         for (Map.Entry<String, String> entry :
             args.getKeyValueMap().entrySet()) {
           metadataList.add(HddsProtos.KeyValue.newBuilder()
@@ -250,6 +250,7 @@ public class VolumeManagerImpl implements VolumeManager {
    * @param quota - Quota in bytes.
    * @throws IOException
    */
+  @Override
   public void setQuota(String volume, long quota) throws IOException {
     Preconditions.checkNotNull(volume);
     metadataManager.getLock().acquireVolumeLock(volume);
@@ -293,6 +294,7 @@ public class VolumeManagerImpl implements VolumeManager {
    * @return VolumeArgs or exception is thrown.
    * @throws IOException
    */
+  @Override
   public OmVolumeArgs getVolumeInfo(String volume) throws IOException {
     Preconditions.checkNotNull(volume);
     metadataManager.getLock().acquireVolumeLock(volume);
@@ -384,6 +386,7 @@ public class VolumeManagerImpl implements VolumeManager {
    * @return true if the user has access for the volume, false otherwise
    * @throws IOException
    */
+  @Override
   public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
       throws IOException {
     Preconditions.checkNotNull(volume);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to