HDDS-825. Code cleanup based on messages from ErrorProne.
Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a16aa2f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a16aa2f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a16aa2f6

Branch: refs/heads/trunk
Commit: a16aa2f60b27628da2538d4590292cd72eb5b6a9
Parents: fcd94ee
Author: Anu Engineer <aengin...@apache.org>
Authored: Thu Nov 15 17:36:09 2018 -0800
Committer: Anu Engineer <aengin...@apache.org>
Committed: Thu Nov 15 17:36:09 2018 -0800

----------------------------------------------------------------------
 .../hadoop/hdds/scm/XceiverClientGrpc.java      |   1 +
 .../hadoop/hdds/scm/XceiverClientManager.java   |   1 +
 .../hadoop/hdds/scm/XceiverClientRatis.java     |   2 +
 .../hdds/scm/storage/ChunkInputStream.java      |   2 +-
 .../hadoop/hdds/scm/pipeline/Pipeline.java      |  11 ++
 .../apache/hadoop/ozone/common/StorageInfo.java |   1 -
 .../org/apache/hadoop/utils/db/RDBStore.java    |   7 +-
 .../apache/hadoop/ozone/audit/DummyAction.java  |   2 +-
 .../hadoop/ozone/lease/TestLeaseManager.java    |  17 ++-
 .../apache/hadoop/utils/TestHddsIdFactory.java  |   2 +-
 .../apache/hadoop/utils/TestMetadataStore.java  |  96 ++++++-------
 .../hadoop/utils/TestRocksDBStoreMBean.java     |   6 +-
 .../hadoop/utils/db/TestDBStoreBuilder.java     |   4 +-
 .../hadoop/utils/db/TestRDBTableStore.java      |   6 +-
 .../org/apache/hadoop/utils/package-info.java   |  22 +++
 .../keyvalue/impl/BlockManagerImpl.java         |   4 +-
 .../replication/GrpcReplicationClient.java      |   2 +-
 .../ozone/container/common/ScmTestMock.java     |   4 +-
 .../common/TestDatanodeStateMachine.java        |  12 +-
 .../common/impl/TestHddsDispatcher.java         |   3 +-
 .../common/report/TestReportPublisher.java      |   2 +-
 .../container/common/volume/TestHddsVolume.java |  54 +++----
 .../container/common/volume/TestVolumeSet.java  |   6 +-
 .../container/common/volume/package-info.java   |  22 +++
 .../keyvalue/TestBlockManagerImpl.java          | 139 +++++++++----------
 .../keyvalue/TestChunkManagerImpl.java          |   3 +-
 .../keyvalue/TestKeyValueBlockIterator.java     |   4 +-
 .../keyvalue/TestKeyValueContainer.java         |  11 +-
 .../container/keyvalue/TestKeyValueHandler.java |   7 +-
 .../keyvalue/TestTarContainerPacker.java        |  11 +-
 .../ozone/container/keyvalue/package-info.java  |  22 +++
 .../hadoop/hdds/server/events/EventQueue.java   |   7 +-
 .../hdds/server/events/TestEventWatcher.java    |  49 ++++---
 .../hadoop/hdds/server/events/package-info.java |  22 +++
 .../hdds/scm/block/PendingDeleteStatusList.java |  10 +-
 .../hdds/scm/chillmode/ChillModePrecheck.java   |   1 +
 .../hdds/scm/container/ContainerReplica.java    |  16 ++-
 .../placement/algorithms/SCMCommonPolicy.java   |   6 +-
 .../algorithms/SCMContainerPlacementRandom.java |   1 +
 .../placement/metrics/DatanodeMetric.java       |   3 +-
 .../container/placement/metrics/LongMetric.java |   1 -
 .../placement/metrics/SCMNodeMetric.java        |   4 +-
 .../placement/metrics/SCMNodeStat.java          |  10 +-
 .../replication/ReplicationActivityStatus.java  |   3 +-
 .../replication/ReplicationManager.java         |   2 +
 .../hadoop/hdds/scm/node/CommandQueue.java      |   8 +-
 .../hadoop/hdds/scm/node/NodeStateManager.java  |   8 +-
 .../hdds/scm/node/states/Node2ContainerMap.java |   2 +
 .../hdds/scm/node/states/NodeStateMap.java      |   4 +-
 .../hdds/scm/pipeline/SCMPipelineManager.java   |   4 +-
 .../scm/server/SCMClientProtocolServer.java     |   3 +-
 .../scm/server/SCMDatanodeProtocolServer.java   |   4 +-
 .../hadoop/hdds/scm/TestHddsServerUtils.java    |  19 +--
 .../hadoop/hdds/scm/node/TestNodeManager.java   |  12 +-
 .../scm/node/TestSCMNodeStorageStatMap.java     |  16 +--
 .../scm/node/states/TestNode2ContainerMap.java  |   6 +-
 .../placement/TestDatanodeMetrics.java          |  15 +-
 .../apache/hadoop/ozone/client/ObjectStore.java |   2 +
 .../hadoop/ozone/client/rpc/RpcClient.java      |   2 +
 .../ozone/client/TestHddsClientUtils.java       |  42 +++---
 .../hadoop/ozone/om/helpers/OmOzoneAclMap.java  |   1 +
 .../hadoop/ozone/web/response/BucketInfo.java   |   6 +-
 .../apache/hadoop/ozone/web/TestBucketInfo.java |  25 ++--
 .../org/apache/hadoop/ozone/web/TestQuota.java  |  55 ++++----
 .../hdds/scm/pipeline/TestPipelineClose.java    |  27 ++--
 .../hadoop/hdds/scm/pipeline/package-info.java  |  22 +++
 .../org/apache/hadoop/ozone/OzoneTestUtils.java |  12 +-
 .../TestStorageContainerManagerHelper.java      |  10 +-
 .../rpc/TestCloseContainerHandlingByClient.java |  46 +++---
 .../org/apache/hadoop/ozone/package-info.java   |  22 +++
 .../hadoop/ozone/web/TestOzoneVolumes.java      |   4 +
 .../hadoop/ozone/web/client/package-info.java   |  22 +++
 .../apache/hadoop/ozone/web/package-info.java   |  22 +++
 .../web/handlers/BucketProcessTemplate.java     |   4 +-
 .../ozone/web/interfaces/StorageHandler.java    |   1 +
 .../apache/hadoop/ozone/web/TestErrorCode.java  |   6 +-
 .../apache/hadoop/ozone/web/package-info.java   |  22 +++
 .../hadoop/ozone/om/BucketManagerImpl.java      |   1 +
 .../hadoop/ozone/om/OmMetadataManagerImpl.java  |   3 +-
 .../apache/hadoop/ozone/om/OzoneManager.java    |   1 +
 .../hadoop/ozone/om/ServiceListJSONServlet.java |   1 +
 .../hadoop/ozone/om/VolumeManagerImpl.java      |  11 +-
 .../hadoop/ozone/om/TestChunkStreams.java       |  39 +++---
 .../hadoop/ozone/om/TestKeyManagerImpl.java     |  19 +--
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java |   3 +
 .../fs/ozone/TestOzoneFileInterfaces.java       |   3 +-
 .../apache/hadoop/fs/ozone/package-info.java    |  22 +++
 .../ozone/s3/header/AuthorizationHeaderV2.java  |   1 +
 .../ozone/s3/header/AuthorizationHeaderV4.java  |   1 +
 .../hadoop/ozone/s3/header/Credential.java      |   1 +
 .../hadoop/ozone/client/ObjectStoreStub.java    |   2 +
 .../ozone/s3/endpoint/TestBucketDelete.java     |   2 +-
 .../TestMultiDeleteRequestUnmarshaller.java     |   6 +-
 .../hadoop/ozone/s3/endpoint/TestObjectGet.java |   9 +-
 .../ozone/s3/endpoint/TestObjectHead.java       |  10 +-
 .../hadoop/ozone/freon/RandomKeyGenerator.java  |   2 +-
 .../freon/TestFreonWithDatanodeFastRestart.java |   4 +-
 .../apache/hadoop/ozone/freon/package-info.java |  22 +++
 .../apache/hadoop/ozone/om/TestOmSQLCli.java    |   8 +-
 .../apache/hadoop/ozone/om/package-info.java    |  22 +++
 100 files changed, 812 insertions(+), 456 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index 9acd832..bbd3340 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -296,6 +296,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
     // For stand alone pipeline, there is no notion called setup pipeline.
   }
 
+  @Override
   public void destroyPipeline() {
     // For stand alone pipeline, there is no notion called destroy pipeline.
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
index 1973c1d..b2735bc 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
@@ -170,6 +170,7 @@ public class XceiverClientManager implements Closeable {
   /**
    * Close and remove all the cached clients.
    */
+  @Override
   public void close() {
     //closing is done through RemovalListener
     clientCache.invalidateAll();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index e4b711a..dbda2e6 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -100,6 +100,7 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
   /**
    * {@inheritDoc}
    */
+  @Override
   public void createPipeline() throws IOException {
     final RaftGroup group = RatisHelper.newRaftGroup(pipeline);
     LOG.debug("creating pipeline:{} with {}", pipeline.getId(), group);
@@ -110,6 +111,7 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
   /**
    * {@inheritDoc}
    */
+  @Override
   public void destroyPipeline() throws IOException {
     final RaftGroup group = RatisHelper.newRaftGroup(pipeline);
     LOG.debug("destroying pipeline:{} with {}", pipeline.getId(), group);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
index 21b8974..7b243d8 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
@@ -83,7 +83,7 @@ public class ChunkInputStream extends InputStream implements 
Seekable {
   }
 
   private void initializeChunkOffset() {
-    int tempOffset = 0;
+    long tempOffset = 0;
     for (int i = 0; i < chunks.size(); i++) {
       chunkOffset[i] = tempOffset;
       tempOffset += chunks.get(i).getLen();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
index ef055a1..62081f4 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
@@ -196,6 +196,17 @@ public final class Pipeline {
     return new Builder(pipeline);
   }
 
+  @Override
+  public String toString() {
+    return "Pipeline{" +
+        "id=" + id +
+        ", type=" + type +
+        ", factor=" + factor +
+        ", state=" + state +
+        ", nodeStatus=" + nodeStatus +
+        '}';
+  }
+
   /**
    * Builder class for Pipeline.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
index 0e98a4c..1cf39b2 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
@@ -68,7 +68,6 @@ public class StorageInfo {
       throws IOException {
     Preconditions.checkNotNull(type);
     Preconditions.checkNotNull(cid);
-    Preconditions.checkNotNull(cT);
     properties.setProperty(NODE_TYPE, type.name());
     properties.setProperty(CLUSTER_ID, cid);
     properties.setProperty(CREATION_TIME, String.valueOf(cT));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java
index d0644b6..24cd96d 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java
@@ -24,11 +24,9 @@ import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.utils.RocksDBStoreMBean;
-import org.apache.ratis.thirdparty.com.google.common.annotations.
-    VisibleForTesting;
+import 
org.apache.ratis.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.rocksdb.ColumnFamilyDescriptor;
 import org.rocksdb.ColumnFamilyHandle;
-
 import org.rocksdb.DBOptions;
 import org.rocksdb.RocksDB;
 import org.rocksdb.RocksDBException;
@@ -192,7 +190,6 @@ public class RDBStore implements DBStore {
     }
   }
 
-
   @Override
   public void move(byte[] key, byte[] value, Table source,
       Table dest) throws IOException {
@@ -226,7 +223,7 @@ public class RDBStore implements DBStore {
     } catch (RocksDBException rockdbException) {
       LOG.error("Move of key failed. Key:{}", DFSUtil.bytes2String(sourceKey));
       throw toIOException("Unable to move key: " +
-              DFSUtil.bytes2String(sourceKey), rockdbException);
+          DFSUtil.bytes2String(sourceKey), rockdbException);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java
index 6044c0a..76cd39a 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java
@@ -37,7 +37,7 @@ public enum DummyAction implements AuditAction {
   SET_OWNER("SET_OWNER"),
   SET_QUOTA("SET_QUOTA");
 
-  private String action;
+  private final String action;
 
   DummyAction(String action) {
     this.action = action;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
index bdc70fc..3887833 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
@@ -41,7 +41,7 @@ public class TestLeaseManager {
   /**
    * Dummy resource on which leases can be acquired.
    */
-  private final class DummyResource {
+  private static final class DummyResource {
 
     private final String name;
 
@@ -61,6 +61,21 @@ public class TestLeaseManager {
       }
       return false;
     }
+
+    /**
+     * Adding to String method to fix the ErrorProne warning that this method
+     * is later used in String functions, which would print out (e.g.
+     * `org.apache.hadoop.ozone.lease.TestLeaseManager.DummyResource@
+     * 4488aabb`) instead of useful information.
+     *
+     * @return Name of the Dummy object.
+     */
+    @Override
+    public String toString() {
+      return "DummyResource{" +
+          "name='" + name + '\'' +
+          '}';
+    }
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestHddsIdFactory.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestHddsIdFactory.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestHddsIdFactory.java
index a341ccc..35e1b3e 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestHddsIdFactory.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestHddsIdFactory.java
@@ -55,7 +55,7 @@ public class TestHddsIdFactory {
     List<Future<Integer>> result = executor.invokeAll(tasks);
     assertEquals(IDS_PER_THREAD * NUM_OF_THREADS, ID_SET.size());
     for (Future<Integer> r : result) {
-      assertEquals(r.get().intValue(), IDS_PER_THREAD);
+      assertEquals(IDS_PER_THREAD, r.get().intValue());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
index a91bc80..5da8fbc 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java
@@ -1,24 +1,21 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
  * <p>
  * http://www.apache.org/licenses/LICENSE-2.0
  * <p>
  * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
  */
 package org.apache.hadoop.utils;
 
-import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
-
 import com.google.common.collect.Lists;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang3.tuple.ImmutablePair;
@@ -28,9 +25,9 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.utils.MetadataStore.KeyValue;
 import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
 import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
+import org.apache.hadoop.utils.MetadataStore.KeyValue;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
@@ -50,14 +47,14 @@ import java.util.List;
 import java.util.Map;
 import java.util.NoSuchElementException;
 import java.util.UUID;
-
 import java.util.concurrent.atomic.AtomicInteger;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
-
 import static org.junit.runners.Parameterized.Parameters;
 
 /**
@@ -66,27 +63,24 @@ import static org.junit.runners.Parameterized.Parameters;
 @RunWith(Parameterized.class)
 public class TestMetadataStore {
 
+  private final static int MAX_GETRANGE_LENGTH = 100;
   private final String storeImpl;
-
+  @Rule
+  public ExpectedException expectedException = ExpectedException.none();
+  private MetadataStore store;
+  private File testDir;
   public TestMetadataStore(String metadataImpl) {
     this.storeImpl = metadataImpl;
   }
 
   @Parameters
   public static Collection<Object[]> data() {
-    return Arrays.asList(new Object[][] {
+    return Arrays.asList(new Object[][]{
         {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB},
         {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB}
     });
   }
 
-  private MetadataStore store;
-  private File testDir;
-  private final static int MAX_GETRANGE_LENGTH = 100;
-
-  @Rule
-  public ExpectedException expectedException = ExpectedException.none();
-
   @Before
   public void init() throws IOException {
     if (OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(storeImpl)) {
@@ -109,7 +103,7 @@ public class TestMetadataStore {
     // Add 20 entries.
     // {a0 : a-value0} to {a9 : a-value9}
     // {b0 : b-value0} to {b9 : b-value9}
-    for (int i=0; i<10; i++) {
+    for (int i = 0; i < 10; i++) {
       store.put(getBytes("a" + i), getBytes("a-value" + i));
       store.put(getBytes("b" + i), getBytes("b-value" + i));
     }
@@ -178,7 +172,7 @@ public class TestMetadataStore {
     GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG);
     GenericTestUtils.LogCapturer logCapturer =
         GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG);
-    if(storeImpl.equals(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB)) {
+    if (storeImpl.equals(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB)) {
       dbType = "RocksDB";
     } else {
       dbType = "LevelDB";
@@ -241,7 +235,7 @@ public class TestMetadataStore {
 
   @Test
   public void testGetDelete() throws IOException {
-    for (int i=0; i<10; i++) {
+    for (int i = 0; i < 10; i++) {
       byte[] va = store.get(getBytes("a" + i));
       assertEquals("a-value" + i, getString(va));
 
@@ -273,7 +267,7 @@ public class TestMetadataStore {
       return null;
     }
     char[] arr = key.toCharArray();
-    return new StringBuffer().append(arr[0]).append("-value")
+    return new StringBuilder().append(arr[0]).append("-value")
         .append(arr[arr.length - 1]).toString();
   }
 
@@ -326,14 +320,14 @@ public class TestMetadataStore {
       char num = value.charAt(value.length() - 1);
       // each value adds 1
       int i = Character.getNumericValue(num) + 1;
-      value =  value.substring(0, value.length() - 1) + i;
+      value = value.substring(0, value.length() - 1) + i;
       result.add(value);
       return true;
     });
 
     assertFalse(result.isEmpty());
-    for (int i=0; i<result.size(); i++) {
-      assertEquals("b-value" + (i+1), result.get(i));
+    for (int i = 0; i < result.size(); i++) {
+      assertEquals("b-value" + (i + 1), result.get(i));
     }
 
     // iterate from a non exist key
@@ -388,7 +382,7 @@ public class TestMetadataStore {
     result = store.getRangeKVs(null, 100, filter1);
     assertEquals(10, result.size());
     assertTrue(result.stream().allMatch(entry ->
-        new String(entry.getKey()).startsWith("b")
+        new String(entry.getKey(), UTF_8).startsWith("b")
     ));
     assertEquals(20, filter1.getKeysScannedNum());
     assertEquals(10, filter1.getKeysHintedNum());
@@ -416,7 +410,7 @@ public class TestMetadataStore {
     assertEquals("b-value2", getString(result.get(0).getValue()));
 
     // If filter is null, no effect.
-    result = store.getRangeKVs(null, 1, null);
+    result = store.getRangeKVs(null, 1, (MetadataKeyFilter[]) null);
     assertEquals(1, result.size());
     assertEquals("a0", getString(result.get(0).getKey()));
   }
@@ -461,7 +455,7 @@ public class TestMetadataStore {
     // If startKey is invalid, the returned list should be empty.
     List<Map.Entry<byte[], byte[]>> kvs =
         store.getRangeKVs(getBytes("unknownKey"), MAX_GETRANGE_LENGTH);
-    assertEquals(kvs.size(), 0);
+    assertEquals(0, kvs.size());
   }
 
   @Test
@@ -504,7 +498,7 @@ public class TestMetadataStore {
         .build();
 
     List<String> expectedResult = Lists.newArrayList();
-    for (int i = 0; i<10; i++) {
+    for (int i = 0; i < 10; i++) {
       dbStore.put(getBytes("batch-" + i), getBytes("batch-value-" + i));
       expectedResult.add("batch-" + i);
     }
@@ -541,43 +535,44 @@ public class TestMetadataStore {
       new KeyPrefixFilter().addFilter("b0", true).addFilter("b");
     } catch (IllegalArgumentException e) {
       exception = e;
+      assertTrue(exception.getMessage().contains("KeyPrefix: b already " +
+          "rejected"));
     }
-    assertTrue(exception.getMessage().contains("KeyPrefix: b already " +
-        "rejected"));
 
     try {
       new KeyPrefixFilter().addFilter("b0").addFilter("b", true);
     } catch (IllegalArgumentException e) {
       exception = e;
+      assertTrue(exception.getMessage().contains("KeyPrefix: b already " +
+          "accepted"));
     }
-    assertTrue(exception.getMessage().contains("KeyPrefix: b already " +
-        "accepted"));
 
     try {
       new KeyPrefixFilter().addFilter("b", true).addFilter("b0");
     } catch (IllegalArgumentException e) {
       exception = e;
+      assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " +
+          "rejected"));
     }
-    assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " +
-        "rejected"));
 
     try {
       new KeyPrefixFilter().addFilter("b").addFilter("b0", true);
     } catch (IllegalArgumentException e) {
       exception = e;
+      assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " +
+          "accepted"));
     }
-    assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " +
-        "accepted"));
 
     MetadataKeyFilter filter1 = new KeyPrefixFilter(true)
-            .addFilter("a0")
-            .addFilter("a1")
-            .addFilter("b", true);
+        .addFilter("a0")
+        .addFilter("a1")
+        .addFilter("b", true);
     result = store.getRangeKVs(null, 100, filter1);
     assertEquals(2, result.size());
-    assertTrue(result.stream().anyMatch(entry -> new String(entry.getKey())
+    assertTrue(result.stream().anyMatch(entry -> new String(entry.getKey(),
+        UTF_8)
         .startsWith("a0")) && result.stream().anyMatch(entry -> new String(
-            entry.getKey()).startsWith("a1")));
+        entry.getKey(), UTF_8).startsWith("a1")));
 
     filter1 = new KeyPrefixFilter(true).addFilter("b", true);
     result = store.getRangeKVs(null, 100, filter1);
@@ -586,7 +581,8 @@ public class TestMetadataStore {
     filter1 = new KeyPrefixFilter().addFilter("b", true);
     result = store.getRangeKVs(null, 100, filter1);
     assertEquals(10, result.size());
-    assertTrue(result.stream().allMatch(entry -> new String(entry.getKey())
+    assertTrue(result.stream().allMatch(entry -> new String(entry.getKey(),
+        UTF_8)
         .startsWith("a")));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestRocksDBStoreMBean.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestRocksDBStoreMBean.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestRocksDBStoreMBean.java
index db2572c..ccf19b0 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestRocksDBStoreMBean.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestRocksDBStoreMBean.java
@@ -29,12 +29,14 @@ import javax.management.MBeanServer;
 import java.io.File;
 import java.lang.management.ManagementFactory;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 /**
  * Test the JMX interface for the rocksdb metastore implementation.
  */
 public class TestRocksDBStoreMBean {
   
-  Configuration conf;
+  private Configuration conf;
   
   @Before
   public void init() throws Exception {
@@ -57,7 +59,7 @@ public class TestRocksDBStoreMBean {
             .setCreateIfMissing(true).setDbFile(testDir).build();
 
     for (int i = 0; i < 10; i++) {
-      metadataStore.put("key".getBytes(), "value".getBytes());
+      metadataStore.put("key".getBytes(UTF_8), "value".getBytes(UTF_8));
     }
 
     MBeanServer platformMBeanServer =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestDBStoreBuilder.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestDBStoreBuilder.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestDBStoreBuilder.java
index 3e1f364..47ad597 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestDBStoreBuilder.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestDBStoreBuilder.java
@@ -131,7 +131,7 @@ public class TestDBStoreBuilder {
             RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8);
         firstTable.put(key, value);
         byte[] temp = firstTable.get(key);
-        Arrays.equals(value, temp);
+        Assert.assertTrue(Arrays.equals(value, temp));
       }
 
       try (Table secondTable = dbStore.getTable("Second")) {
@@ -161,7 +161,7 @@ public class TestDBStoreBuilder {
             RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8);
         firstTable.put(key, value);
         byte[] temp = firstTable.get(key);
-        Arrays.equals(value, temp);
+        Assert.assertTrue(Arrays.equals(value, temp));
       }
 
       try (Table secondTable = dbStore.getTable("Second")) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestRDBTableStore.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestRDBTableStore.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestRDBTableStore.java
index cd25548..9524f5f 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestRDBTableStore.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestRDBTableStore.java
@@ -35,9 +35,9 @@ import org.rocksdb.StatsLevel;
 import org.rocksdb.WriteBatch;
 
 import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashSet;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Set;
 
@@ -112,8 +112,8 @@ public class TestRDBTableStore {
 
   @Test
   public void delete() throws Exception {
-    List<byte[]> deletedKeys = new LinkedList<>();
-    List<byte[]> validKeys = new LinkedList<>();
+    List<byte[]> deletedKeys = new ArrayList<>();
+    List<byte[]> validKeys = new ArrayList<>();
     byte[] value =
         RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
     for (int x = 0; x < 100; x++) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/package-info.java 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/package-info.java
new file mode 100644
index 0000000..1fafbd3
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+/**
+ * DB test Utils.
+ */
+package org.apache.hadoop.utils;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index ea0e819..86865ac 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -141,8 +141,8 @@ public class BlockManagerImpl implements BlockManager {
     long bcsId = blockID.getBlockCommitSequenceId();
     Preconditions.checkNotNull(blockID,
         "BlockID cannot be null in GetBlock request");
-    Preconditions.checkNotNull(blockID.getContainerID(),
-        "Container name cannot be null");
+    Preconditions.checkNotNull(container,
+        "Container cannot be null");
 
     KeyValueContainerData containerData = (KeyValueContainerData) container
         .getContainerData();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
index c8a40b2..8149e2c 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
@@ -114,7 +114,7 @@ public class GrpcReplicationClient {
       this.containerId = containerId;
       this.outputPath = outputPath;
       try {
-        outputPath = Preconditions.checkNotNull(outputPath);
+        Preconditions.checkNotNull(outputPath, "Output path cannot be null");
         Path parentPath = Preconditions.checkNotNull(outputPath.getParent());
         Files.createDirectories(parentPath);
         stream =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
index 55fcf26..c4b29ba 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
@@ -71,8 +71,8 @@ public class ScmTestMock implements 
StorageContainerDatanodeProtocol {
       new HashMap<>();
   private Map<DatanodeDetails, NodeReportProto> nodeReports = new HashMap<>();
   private AtomicInteger commandStatusReport = new AtomicInteger(0);
-  private List<CommandStatus> cmdStatusList = new LinkedList<>();
-  private List<SCMCommandProto> scmCommandRequests = new LinkedList<>();
+  private List<CommandStatus> cmdStatusList = new ArrayList<>();
+  private List<SCMCommandProto> scmCommandRequests = new ArrayList<>();
   /**
    * Returns the number of heartbeats made to this class.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
index 260b158..8b84b8e 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -50,7 +50,7 @@ import java.io.File;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.nio.file.Paths;
-import java.util.LinkedList;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.UUID;
@@ -86,9 +86,9 @@ public class TestDatanodeStateMachine {
     conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500,
         TimeUnit.MILLISECONDS);
     conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
-    serverAddresses = new LinkedList<>();
-    scmServers = new LinkedList<>();
-    mockServers = new LinkedList<>();
+    serverAddresses = new ArrayList<>();
+    scmServers = new ArrayList<>();
+    mockServers = new ArrayList<>();
     for (int x = 0; x < scmServerCount; x++) {
       int port = SCMTestUtils.getReuseableAddress().getPort();
       String address = "127.0.0.1";
@@ -361,8 +361,8 @@ public class TestDatanodeStateMachine {
   @Test
   public void testDatanodeStateMachineWithInvalidConfiguration()
       throws Exception {
-    LinkedList<Map.Entry<String, String>> confList =
-        new LinkedList<Map.Entry<String, String>>();
+    List<Map.Entry<String, String>> confList =
+        new ArrayList<>();
     confList.add(Maps.immutableEntry(ScmConfigKeys.OZONE_SCM_NAMES, ""));
 
     // Invalid ozone.scm.names

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
index 76632bf..35cda00 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
@@ -49,6 +49,7 @@ import java.io.File;
 import java.io.IOException;
 import java.util.UUID;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
@@ -160,7 +161,7 @@ public class TestHddsDispatcher {
       String datanodeId, Long containerId, Long localId) {
 
     ByteString data = ByteString.copyFrom(
-        UUID.randomUUID().toString().getBytes());
+        UUID.randomUUID().toString().getBytes(UTF_8));
     ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo
         .newBuilder()
         .setChunkName(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
index b632e02..03f0cd4 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
@@ -61,7 +61,7 @@ public class TestReportPublisher {
   /**
    * Dummy report publisher for testing.
    */
-  private class DummyReportPublisher extends ReportPublisher {
+  private static class DummyReportPublisher extends ReportPublisher {
 
     private final long frequency;
     private int getReportCount = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
index 6b46762..0e58e69 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
@@ -1,19 +1,18 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
  */
 
 package org.apache.hadoop.ozone.container.common.volume;
@@ -23,7 +22,6 @@ import org.apache.hadoop.fs.GetSpaceUsed;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
 import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
-import static org.junit.Assert.*;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
@@ -35,19 +33,22 @@ import java.io.IOException;
 import java.util.Properties;
 import java.util.UUID;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 /**
  * Unit tests for {@link HddsVolume}.
  */
 public class TestHddsVolume {
 
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
   private static final String DATANODE_UUID = UUID.randomUUID().toString();
   private static final String CLUSTER_ID = UUID.randomUUID().toString();
   private static final Configuration CONF = new Configuration();
   private static final String DU_CACHE_FILE = "scmUsed";
-
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
   private File rootDir;
   private HddsVolume volume;
   private File versionFile;
@@ -69,9 +70,9 @@ public class TestHddsVolume {
     // clusterID is not specified and the version file should not be written
     // to disk.
     assertTrue(volume.getClusterID() == null);
-    assertEquals(volume.getStorageType(), StorageType.DEFAULT);
-    assertEquals(volume.getStorageState(),
-        HddsVolume.VolumeState.NOT_FORMATTED);
+    assertEquals(StorageType.DEFAULT, volume.getStorageType());
+    assertEquals(HddsVolume.VolumeState.NOT_FORMATTED,
+        volume.getStorageState());
     assertFalse("Version file should not be created when clusterID is not " +
         "known.", versionFile.exists());
 
@@ -84,7 +85,7 @@ public class TestHddsVolume {
     assertTrue("Volume format should create Version file",
         versionFile.exists());
     assertEquals(volume.getClusterID(), CLUSTER_ID);
-    assertEquals(volume.getStorageState(), HddsVolume.VolumeState.NORMAL);
+    assertEquals(HddsVolume.VolumeState.NORMAL, volume.getStorageState());
   }
 
   @Test
@@ -111,7 +112,7 @@ public class TestHddsVolume {
   }
 
   @Test
-  public void testShutdown() throws Exception{
+  public void testShutdown() throws Exception {
     // Return dummy value > 0 for scmUsage so that scm cache file is written
     // during shutdown.
     GetSpaceUsed scmUsageMock = Mockito.mock(GetSpaceUsed.class);
@@ -125,8 +126,7 @@ public class TestHddsVolume {
     volume.shutdown();
 
     // Volume state should be "NON_EXISTENT" when volume is shutdown.
-    assertEquals(volume.getStorageState(),
-        HddsVolume.VolumeState.NON_EXISTENT);
+    assertEquals(HddsVolume.VolumeState.NON_EXISTENT, 
volume.getStorageState());
 
     // Volume should save scmUsed cache file once volume is shutdown
     File scmUsedFile = new File(folder.getRoot(), DU_CACHE_FILE);
@@ -139,7 +139,7 @@ public class TestHddsVolume {
       // as usage thread is shutdown.
       volume.getAvailable();
       fail("HddsVolume#shutdown test failed");
-    } catch (Exception ex){
+    } catch (Exception ex) {
       assertTrue(ex instanceof IOException);
       assertTrue(ex.getMessage().contains(
           "Volume Usage thread is not running."));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
index 7bb8a43..c50ec78 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -22,7 +22,6 @@ import java.io.IOException;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -69,7 +68,7 @@ public class TestVolumeSet {
   }
 
   @Rule
-  public Timeout testTimeout = new Timeout(300_000);
+  public Timeout testTimeout = new Timeout(300000);
 
   @Before
   public void setup() throws Exception {
@@ -153,8 +152,7 @@ public class TestVolumeSet {
     assertTrue(volumeSet.getFailedVolumesList().get(0).isFailed());
 
     // Failed volume should not exist in VolumeMap
-    Path volume1Path = new Path(volume1);
-    assertFalse(volumeSet.getVolumeMap().containsKey(volume1Path));
+    assertFalse(volumeSet.getVolumeMap().containsKey(volume1));
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java
new file mode 100644
index 0000000..3328deb
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+/**
+ * Tests for Container Volumes.
+ */
+package org.apache.hadoop.ozone.container.common.volume;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
index 6fe6d81..e3e683c 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
@@ -25,10 +25,9 @@ import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.volume
-    .RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import 
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
+import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Before;
@@ -37,12 +36,14 @@ import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 import org.mockito.Mockito;
 
-import java.io.IOException;
-import java.util.LinkedList;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.UUID;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.mockito.ArgumentMatchers.anyList;
 import static org.mockito.ArgumentMatchers.anyLong;
 import static org.mockito.Mockito.mock;
@@ -52,6 +53,8 @@ import static org.mockito.Mockito.mock;
  */
 public class TestBlockManagerImpl {
 
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
   private OzoneConfiguration config;
   private String scmId = UUID.randomUUID().toString();
   private VolumeSet volumeSet;
@@ -62,10 +65,6 @@ public class TestBlockManagerImpl {
   private BlockManagerImpl blockManager;
   private BlockID blockID;
 
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-
   @Before
   public void setUp() throws Exception {
     config = new OzoneConfiguration();
@@ -93,7 +92,7 @@ public class TestBlockManagerImpl {
     blockData = new BlockData(blockID);
     blockData.addMetadata("VOLUME", "ozone");
     blockData.addMetadata("OWNER", "hdfs");
-    List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
+    List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
     ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
         .getLocalID(), 0), 0, 1024);
     chunkList.add(info.getProtoBufMessage());
@@ -124,88 +123,74 @@ public class TestBlockManagerImpl {
 
   }
 
-
   @Test
   public void testDeleteBlock() throws Exception {
+    assertEquals(0,
+        keyValueContainer.getContainerData().getKeyCount());
+    //Put Block
+    blockManager.putBlock(keyValueContainer, blockData);
+    assertEquals(1,
+        keyValueContainer.getContainerData().getKeyCount());
+    //Delete Block
+    blockManager.deleteBlock(keyValueContainer, blockID);
+    assertEquals(0,
+        keyValueContainer.getContainerData().getKeyCount());
     try {
-      assertEquals(0,
-          keyValueContainer.getContainerData().getKeyCount());
-      //Put Block
-      blockManager.putBlock(keyValueContainer, blockData);
-      assertEquals(1,
-          keyValueContainer.getContainerData().getKeyCount());
-      //Delete Block
-      blockManager.deleteBlock(keyValueContainer, blockID);
-      assertEquals(0,
-          keyValueContainer.getContainerData().getKeyCount());
-      try {
-        blockManager.getBlock(keyValueContainer, blockID);
-        fail("testDeleteBlock");
-      } catch (StorageContainerException ex) {
-        GenericTestUtils.assertExceptionContains(
-            "Unable to find the block", ex);
-      }
-    } catch (IOException ex) {
-      fail("testDeleteBlock failed");
+      blockManager.getBlock(keyValueContainer, blockID);
+      fail("testDeleteBlock");
+    } catch (StorageContainerException ex) {
+      GenericTestUtils.assertExceptionContains(
+          "Unable to find the block", ex);
     }
   }
 
   @Test
   public void testListBlock() throws Exception {
-    try {
+    blockManager.putBlock(keyValueContainer, blockData);
+    List<BlockData> listBlockData = blockManager.listBlock(
+        keyValueContainer, 1, 10);
+    assertNotNull(listBlockData);
+    assertTrue(listBlockData.size() == 1);
+
+    for (long i = 2; i <= 10; i++) {
+      blockID = new BlockID(1L, i);
+      blockData = new BlockData(blockID);
+      blockData.addMetadata("VOLUME", "ozone");
+      blockData.addMetadata("OWNER", "hdfs");
+      List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
+      ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
+          .getLocalID(), 0), 0, 1024);
+      chunkList.add(info.getProtoBufMessage());
+      blockData.setChunks(chunkList);
       blockManager.putBlock(keyValueContainer, blockData);
-      List<BlockData> listBlockData = blockManager.listBlock(
-          keyValueContainer, 1, 10);
-      assertNotNull(listBlockData);
-      assertTrue(listBlockData.size() == 1);
-
-      for (long i = 2; i <= 10; i++) {
-        blockID = new BlockID(1L, i);
-        blockData = new BlockData(blockID);
-        blockData.addMetadata("VOLUME", "ozone");
-        blockData.addMetadata("OWNER", "hdfs");
-        List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
-        ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
-            .getLocalID(), 0), 0, 1024);
-        chunkList.add(info.getProtoBufMessage());
-        blockData.setChunks(chunkList);
-        blockManager.putBlock(keyValueContainer, blockData);
-      }
-
-      listBlockData = blockManager.listBlock(
-          keyValueContainer, 1, 10);
-      assertNotNull(listBlockData);
-      assertTrue(listBlockData.size() == 10);
-
-    } catch (IOException ex) {
-      fail("testListBlock failed");
     }
+
+    listBlockData = blockManager.listBlock(
+        keyValueContainer, 1, 10);
+    assertNotNull(listBlockData);
+    assertTrue(listBlockData.size() == 10);
   }
 
   @Test
   public void testGetNoSuchBlock() throws Exception {
+    assertEquals(0,
+        keyValueContainer.getContainerData().getKeyCount());
+    //Put Block
+    blockManager.putBlock(keyValueContainer, blockData);
+    assertEquals(1,
+        keyValueContainer.getContainerData().getKeyCount());
+    //Delete Block
+    blockManager.deleteBlock(keyValueContainer, blockID);
+    assertEquals(0,
+        keyValueContainer.getContainerData().getKeyCount());
     try {
-      assertEquals(0,
-          keyValueContainer.getContainerData().getKeyCount());
-      //Put Block
-      blockManager.putBlock(keyValueContainer, blockData);
-      assertEquals(1,
-          keyValueContainer.getContainerData().getKeyCount());
-      //Delete Block
-      blockManager.deleteBlock(keyValueContainer, blockID);
-      assertEquals(0,
-          keyValueContainer.getContainerData().getKeyCount());
-      try {
-        //Since the block has been deleted, we should not be able to find it
-        blockManager.getBlock(keyValueContainer, blockID);
-        fail("testGetNoSuchBlock failed");
-      } catch (StorageContainerException ex) {
-        GenericTestUtils.assertExceptionContains(
-            "Unable to find the block", ex);
-        assertEquals(ContainerProtos.Result.NO_SUCH_BLOCK, ex.getResult());
-      }
-    } catch (IOException ex) {
+      //Since the block has been deleted, we should not be able to find it
+      blockManager.getBlock(keyValueContainer, blockID);
       fail("testGetNoSuchBlock failed");
+    } catch (StorageContainerException ex) {
+      GenericTestUtils.assertExceptionContains(
+          "Unable to find the block", ex);
+      assertEquals(ContainerProtos.Result.NO_SUCH_BLOCK, ex.getResult());
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
index 9e3edf7..3f181d1 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
@@ -43,6 +43,7 @@ import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.UUID;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.junit.Assert.*;
 import static org.mockito.ArgumentMatchers.anyList;
 import static org.mockito.ArgumentMatchers.anyLong;
@@ -88,7 +89,7 @@ public class TestChunkManagerImpl {
 
     keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
 
-    data = "testing write chunks".getBytes();
+    data = "testing write chunks".getBytes(UTF_8);
     // Creating BlockData
     blockID = new BlockID(1L, 1L);
     chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
index fbc5ad0..5fa7b54 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
@@ -42,9 +42,9 @@ import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
 import java.io.File;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.NoSuchElementException;
 import java.util.UUID;
@@ -252,7 +252,7 @@ public class TestKeyValueBlockIterator {
         .randomUUID().toString());
     MetadataStore metadataStore = BlockUtils.getDB(containerData, conf);
 
-    List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
+    List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
     ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024);
     chunkList.add(info.getProtoBufMessage());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index 8c0db4a..770a9f7 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -51,12 +51,13 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.List;
-import java.util.LinkedList;
 import java.util.UUID;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.ratis.util.Preconditions.assertTrue;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -135,7 +136,7 @@ public class TestKeyValueContainer {
       BlockData blockData = new BlockData(blockID);
       blockData.addMetadata("VOLUME", "ozone");
       blockData.addMetadata("OWNER", "hdfs");
-      List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
+      List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
       ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
           .getLocalID(), 0), 0, 1024);
       chunkList.add(info.getProtoBufMessage());
@@ -163,8 +164,6 @@ public class TestKeyValueContainer {
     // Check whether containerMetaDataPath and chunksPath exists or not.
     assertTrue(containerMetaDataPath != null);
     assertTrue(chunksPath != null);
-    File containerMetaDataLoc = new File(containerMetaDataPath);
-
     //Check whether container file and container db file exists or not.
     assertTrue(keyValueContainer.getContainerFile().exists(),
         ".Container File does not exist");
@@ -190,7 +189,7 @@ public class TestKeyValueContainer {
     //write one few keys to check the key count after import
     MetadataStore metadataStore = BlockUtils.getDB(keyValueContainerData, 
conf);
     for (int i = 0; i < numberOfKeysToWrite; i++) {
-      metadataStore.put(("test" + i).getBytes(), "test".getBytes());
+      metadataStore.put(("test" + i).getBytes(UTF_8), "test".getBytes(UTF_8));
     }
     metadataStore.close();
 
@@ -247,7 +246,7 @@ public class TestKeyValueContainer {
         container.importContainerData(fis, packer);
       }
       fail("Container is imported twice. Previous files are overwritten");
-    } catch (Exception ex) {
+    } catch (IOException ex) {
       //all good
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index dcda10b..7fc065f 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -226,9 +226,10 @@ public class TestKeyValueHandler {
       VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
       KeyValueHandler keyValueHandler = new KeyValueHandler(conf, cset,
           volumeSet, metrics);
-      assertEquals(keyValueHandler.getVolumeChoosingPolicyForTesting()
-          .getClass().getName(), "org.apache.hadoop.ozone.container.common" +
-          ".volume.RoundRobinVolumeChoosingPolicy");
+      assertEquals("org.apache.hadoop.ozone.container.common" +
+          ".volume.RoundRobinVolumeChoosingPolicy",
+          keyValueHandler.getVolumeChoosingPolicyForTesting()
+              .getClass().getName());
 
       //Set a class which is not of sub class of VolumeChoosingPolicy
       conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
index a599f72..1a92ca4 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
@@ -23,7 +23,6 @@ import java.io.FileOutputStream;
 import java.io.FileWriter;
 import java.io.IOException;
 import java.nio.charset.Charset;
-import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Paths;
@@ -45,6 +44,8 @@ import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 /**
  * Test the tar/untar for a given container.
  */
@@ -161,7 +162,7 @@ public class TestTarContainerPacker {
     //read the container descriptor only
     try (FileInputStream input = new FileInputStream(targetFile.toFile())) {
       String containerYaml = new 
String(packer.unpackContainerDescriptor(input),
-          Charset.forName(StandardCharsets.UTF_8.name()));
+          Charset.forName(UTF_8.name()));
       Assert.assertEquals(TEST_DESCRIPTOR_FILE_CONTENT, containerYaml);
     }
 
@@ -177,7 +178,7 @@ public class TestTarContainerPacker {
     try (FileInputStream input = new FileInputStream(targetFile.toFile())) {
       descriptor =
           new String(packer.unpackContainerData(destinationContainer, input),
-              Charset.forName(StandardCharsets.UTF_8.name()));
+              Charset.forName(UTF_8.name()));
     }
 
     assertExampleMetadataDbIsGood(
@@ -204,7 +205,7 @@ public class TestTarContainerPacker {
 
     try (FileInputStream testFile = new FileInputStream(dbFile.toFile())) {
       List<String> strings = IOUtils
-          .readLines(testFile, Charset.forName(StandardCharsets.UTF_8.name()));
+          .readLines(testFile, Charset.forName(UTF_8.name()));
       Assert.assertEquals(1, strings.size());
       Assert.assertEquals(TEST_DB_FILE_CONTENT, strings.get(0));
     }
@@ -222,7 +223,7 @@ public class TestTarContainerPacker {
 
     try (FileInputStream testFile = new FileInputStream(chunkFile.toFile())) {
       List<String> strings = IOUtils
-          .readLines(testFile, Charset.forName(StandardCharsets.UTF_8.name()));
+          .readLines(testFile, Charset.forName(UTF_8.name()));
       Assert.assertEquals(1, strings.size());
       Assert.assertEquals(TEST_CHUNK_FILE_CONTENT, strings.get(0));
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java
new file mode 100644
index 0000000..afbf274
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+/**
+ * Chunk Manager Checks.
+ */
+package org.apache.hadoop.ozone.container.keyvalue;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
index 9aeab7b..1a6555c 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
@@ -139,6 +139,7 @@ public class EventQueue implements EventPublisher, 
AutoCloseable {
    * @throws IllegalArgumentException If there is no EventHandler for
    *                                  the specific event.
    */
+  @Override
   public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
       EVENT_TYPE event, PAYLOAD payload) {
 
@@ -219,7 +220,9 @@ public class EventQueue implements EventPublisher, 
AutoCloseable {
       try {
         Thread.sleep(100);
       } catch (InterruptedException e) {
-        e.printStackTrace();
+        LOG.warn("Interrupted exception while sleeping.", e);
+        // We ignore this exception for time being. Review? should we
+        // propogate it back to caller?
       }
 
       if (Time.now() > currentTime + timeout) {
@@ -229,7 +232,7 @@ public class EventQueue implements EventPublisher, 
AutoCloseable {
       }
     }
   }
-
+  @Override
   public void close() {
 
     isRunning = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
index b72d2ae..88ac378 100644
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
@@ -1,24 +1,21 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
  * <p>
  * http://www.apache.org/licenses/LICENSE-2.0
  * <p>
  * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
  */
 package org.apache.hadoop.hdds.server.events;
 
-import java.util.List;
-import java.util.Objects;
 import org.apache.hadoop.hdds.HddsIdFactory;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.ozone.lease.LeaseManager;
@@ -27,6 +24,9 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
+import java.util.List;
+import java.util.Objects;
+
 /**
  * Test the basic functionality of event watcher.
  */
@@ -41,7 +41,7 @@ public class TestEventWatcher {
   private static final TypedEvent<ReplicationCompletedEvent>
       REPLICATION_COMPLETED = new 
TypedEvent<>(ReplicationCompletedEvent.class);
 
-  LeaseManager<Long> leaseManager;
+  private LeaseManager<Long> leaseManager;
 
   @Before
   public void startLeaseManager() {
@@ -56,7 +56,6 @@ public class TestEventWatcher {
     DefaultMetricsSystem.shutdown();
   }
 
-
   @Test
   public void testEventHandling() throws InterruptedException {
     EventQueue queue = new EventQueue();
@@ -180,7 +179,7 @@ public class TestEventWatcher {
 
     queue.fireEvent(REPLICATION_COMPLETED, event1Completed);
 
-    Thread.sleep(2200l);
+    Thread.sleep(2200L);
 
     //until now: 3 in-progress activities are tracked with three
     // UnderreplicatedEvents. The first one is completed, the remaining two
@@ -201,27 +200,29 @@ public class TestEventWatcher {
   }
 
   private EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent>
-  createEventWatcher() {
+      createEventWatcher() {
     return new CommandWatcherExample(WATCH_UNDER_REPLICATED,
         REPLICATION_COMPLETED, leaseManager);
   }
 
-  private class CommandWatcherExample
+  private static class CommandWatcherExample
       extends EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent> {
 
-    public CommandWatcherExample(Event<UnderreplicatedEvent> startEvent,
+    CommandWatcherExample(Event<UnderreplicatedEvent> startEvent,
         Event<ReplicationCompletedEvent> completionEvent,
         LeaseManager<Long> leaseManager) {
       super("TestCommandWatcher", startEvent, completionEvent, leaseManager);
     }
 
     @Override
-    protected void onTimeout(EventPublisher publisher, UnderreplicatedEvent 
payload) {
+    protected void onTimeout(EventPublisher publisher,
+        UnderreplicatedEvent payload) {
       publisher.fireEvent(UNDER_REPLICATED, payload);
     }
 
     @Override
-    protected void onFinished(EventPublisher publisher, UnderreplicatedEvent 
payload) {
+    protected void onFinished(EventPublisher publisher,
+        UnderreplicatedEvent payload) {
       //Good job. We did it.
     }
 
@@ -240,13 +241,14 @@ public class TestEventWatcher {
 
     private final String datanodeId;
 
-    public ReplicationCompletedEvent(long id, String containerId,
+    ReplicationCompletedEvent(long id, String containerId,
         String datanodeId) {
       this.id = id;
       this.containerId = containerId;
       this.datanodeId = datanodeId;
     }
 
+    @Override
     public long getId() {
       return id;
     }
@@ -279,11 +281,12 @@ public class TestEventWatcher {
 
     private final String containerId;
 
-    public UnderreplicatedEvent(long id, String containerId) {
+    UnderreplicatedEvent(long id, String containerId) {
       this.containerId = containerId;
       this.id = id;
     }
 
+    @Override
     public long getId() {
       return id;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/package-info.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/package-info.java
new file mode 100644
index 0000000..720dd6f
--- /dev/null
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+/**
+ * Tests for Event Watcher.
+ */
+package org.apache.hadoop.hdds.server.events;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteStatusList.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteStatusList.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteStatusList.java
index 904762d..ee64c48 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteStatusList.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteStatusList.java
@@ -19,9 +19,12 @@ package org.apache.hadoop.hdds.scm.block;
 
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 
-import java.util.LinkedList;
+import java.util.ArrayList;
 import java.util.List;
 
+/**
+ * Pending Deletes in the block space.
+ */
 public class PendingDeleteStatusList {
 
   private List<PendingDeleteStatus> pendingDeleteStatuses;
@@ -29,7 +32,7 @@ public class PendingDeleteStatusList {
 
   public PendingDeleteStatusList(DatanodeDetails datanodeDetails) {
     this.datanodeDetails = datanodeDetails;
-    pendingDeleteStatuses = new LinkedList<>();
+    pendingDeleteStatuses = new ArrayList<>();
   }
 
   public void addPendingDeleteStatus(long dnDeleteTransactionId,
@@ -39,6 +42,9 @@ public class PendingDeleteStatusList {
             containerId));
   }
 
+  /**
+   * Status of pending deletes.
+   */
   public static class PendingDeleteStatus {
     private long dnDeleteTransactionId;
     private long scmDeleteTransactionId;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModePrecheck.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModePrecheck.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModePrecheck.java
index 0ed06dd..c6367c2 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModePrecheck.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/ChillModePrecheck.java
@@ -44,6 +44,7 @@ public class ChillModePrecheck implements Precheck<ScmOps> {
     }
   }
 
+  @Override
   public boolean check(ScmOps op) throws SCMException {
     if (inChillMode.get() && ChillModeRestrictedOps
         .isRestrictedInChillMode(op)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java
index 9445fe8..8bfcb84 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java
@@ -134,6 +134,16 @@ public final class ContainerReplica implements 
Comparable<ContainerReplica> {
     return new ContainerReplicaBuilder();
   }
 
+  @Override
+  public String toString() {
+    return "ContainerReplica{" +
+        "containerID=" + containerID +
+        ", datanodeDetails=" + datanodeDetails +
+        ", placeOfBirth=" + placeOfBirth +
+        ", sequenceId=" + sequenceId +
+        '}';
+  }
+
   /**
    * Used for building ContainerReplica instance.
    */
@@ -148,12 +158,12 @@ public final class ContainerReplica implements 
Comparable<ContainerReplica> {
     /**
      * Set Container Id.
      *
-     * @param containerId ContainerID
+     * @param cID ContainerID
      * @return ContainerReplicaBuilder
      */
     public ContainerReplicaBuilder setContainerID(
-        final ContainerID containerId) {
-      containerID = containerId;
+        final ContainerID cID) {
+      this.containerID = cID;
       return this;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java
index 60861b7..9fc47ea 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.LinkedList;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Random;
 import java.util.stream.Collectors;
@@ -102,7 +102,7 @@ public abstract class SCMCommonPolicy implements 
ContainerPlacementPolicy {
    * @return list of datanodes chosen.
    * @throws SCMException SCM exception.
    */
-
+  @Override
   public List<DatanodeDetails> chooseDatanodes(
       List<DatanodeDetails> excludedNodes,
       int nodesRequired, final long sizeRequired) throws SCMException {
@@ -167,7 +167,7 @@ public abstract class SCMCommonPolicy implements 
ContainerPlacementPolicy {
   public List<DatanodeDetails> getResultSet(
       int nodesRequired, List<DatanodeDetails> healthyNodes)
       throws SCMException {
-    List<DatanodeDetails> results = new LinkedList<>();
+    List<DatanodeDetails> results = new ArrayList<>();
     for (int x = 0; x < nodesRequired; x++) {
       // invoke the choose function defined in the derived classes.
       DatanodeDetails nodeId = chooseNode(healthyNodes);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java
index 76702d5..a70f633 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java
@@ -83,6 +83,7 @@ public final class SCMContainerPlacementRandom extends 
SCMCommonPolicy
    * @param healthyNodes - all healthy datanodes.
    * @return one randomly chosen datanode that from two randomly chosen 
datanode
    */
+  @Override
   public DatanodeDetails chooseNode(final List<DatanodeDetails> healthyNodes) {
     DatanodeDetails selectedNode =
         healthyNodes.get(getRand().nextInt(healthyNodes.size()));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a16aa2f6/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java
index a6e732c..5305942 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.hdds.scm.exceptions.SCMException;
  * DatanodeMetric acts as the basis for all the metric that is used in
  * comparing 2 datanodes.
  */
-public interface DatanodeMetric<T, S> extends Comparable<T> {
+public interface DatanodeMetric<T, S>  {
 
   /**
    * Some syntactic sugar over Comparable interface. This makes code easier to
@@ -87,5 +87,4 @@ public interface DatanodeMetric<T, S> extends Comparable<T> {
    */
   void subtract(T value);
 
-
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to