This is an automated email from the ASF dual-hosted git repository.

nanda pushed a commit to branch HDDS-2823
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git

commit 9e0dd848d7a363a64733518335fbfb5e8d47a20f
Author: Nandakumar <na...@apache.org>
AuthorDate: Sat Oct 24 21:08:33 2020 +0530

    HDDS-3895. Implement container related operations in ContainerManagerImpl.
---
 .../hadoop/hdds/scm/container/ContainerID.java     |  32 +++-
 .../hadoop/hdds/scm/container/ContainerInfo.java   |   7 +-
 .../scm/container/common/helpers/ExcludeList.java  |   2 +-
 .../interface-client/src/main/proto/hdds.proto     |   7 +
 .../block/DatanodeDeletedBlockTransactions.java    |   2 +-
 .../hadoop/hdds/scm/block/DeletedBlockLogImpl.java |   2 +-
 .../container/AbstractContainerReportHandler.java  |   2 +-
 .../scm/container/ContainerActionsHandler.java     |   2 +-
 .../hdds/scm/container/ContainerManagerImpl.java   | 175 +++++++++----------
 .../hdds/scm/container/ContainerManagerV2.java     |  65 +++----
 .../hdds/scm/container/ContainerReportHandler.java |   6 +-
 .../hdds/scm/container/ContainerStateManager.java  |  36 ++--
 .../scm/container/ContainerStateManagerImpl.java   | 194 ++++++++++++---------
 .../scm/container/ContainerStateManagerV2.java     |  40 ++++-
 .../IncrementalContainerReportHandler.java         |   2 +-
 .../hdds/scm/container/SCMContainerManager.java    |  13 +-
 .../scm/container/states/ContainerAttribute.java   |   2 +-
 .../scm/container/states/ContainerStateMap.java    | 172 +++++++++---------
 .../hadoop/hdds/scm/metadata/ContainerIDCodec.java |   4 +-
 .../hdds/scm/server/SCMClientProtocolServer.java   |  20 +--
 .../hdds/scm/server/StorageContainerManager.java   |   2 +-
 .../hadoop/hdds/scm/block/TestDeletedBlockLog.java |   7 +-
 .../container/TestCloseContainerEventHandler.java  |   4 +-
 .../scm/container/TestContainerActionsHandler.java |   2 +-
 .../scm/container/TestContainerManagerImpl.java    |  30 +++-
 .../scm/container/TestSCMContainerManager.java     |   2 +-
 .../container/states/TestContainerAttribute.java   |  18 +-
 .../hadoop/hdds/scm/node/TestDeadNodeHandler.java  |  10 +-
 .../scm/node/states/TestNode2ContainerMap.java     |  10 +-
 .../hdds/scm/pipeline/TestPipelineManagerImpl.java |   4 +-
 .../scm/pipeline/TestPipelineStateManager.java     |  22 +--
 .../hdds/scm/pipeline/TestSCMPipelineManager.java  |   4 +-
 .../hadoop/ozone/client/io/KeyOutputStream.java    |   2 +-
 .../TestContainerStateManagerIntegration.java      |   8 +-
 .../metrics/TestSCMContainerManagerMetrics.java    |   6 +-
 .../org/apache/hadoop/ozone/OzoneTestUtils.java    |  10 +-
 .../rpc/TestContainerReplicationEndToEnd.java      |   6 +-
 .../client/rpc/TestFailureHandlingByClient.java    |  10 +-
 .../rpc/TestFailureHandlingByClientFlushDelay.java |   2 +-
 .../rpc/TestMultiBlockWritesWithDnFailures.java    |   4 +-
 .../rpc/TestOzoneClientRetriesOnException.java     |   4 +-
 ...estOzoneClientRetriesOnExceptionFlushDelay.java |   2 +-
 .../client/rpc/TestOzoneRpcClientAbstract.java     |   2 +-
 .../hadoop/ozone/client/rpc/TestReadRetries.java   |   2 +-
 .../apache/hadoop/ozone/container/TestHelper.java  |   6 +-
 .../TestCloseContainerByPipeline.java              |   8 +-
 .../commandhandler/TestCloseContainerHandler.java  |   2 +-
 .../commandhandler/TestDeleteContainerHandler.java |   2 +-
 .../hadoop/ozone/dn/scrubber/TestDataScrubber.java |   2 +-
 .../ozone/om/TestContainerReportWithKeys.java      |   2 +-
 .../hadoop/ozone/recon/TestReconAsPassiveScm.java  |   2 +-
 .../org/apache/hadoop/ozone/scm/TestSCMMXBean.java |  12 +-
 .../hadoop/ozone/recon/api/ContainerEndpoint.java  |   4 +-
 .../ozone/recon/fsck/ContainerHealthTask.java      |   2 +-
 .../ozone/recon/scm/ReconContainerManager.java     |   2 +-
 .../recon/scm/ReconContainerReportHandler.java     |   2 +-
 .../ReconIncrementalContainerReportHandler.java    |   2 +-
 .../ozone/recon/api/TestContainerEndpoint.java     |   2 +-
 .../recon/fsck/TestContainerHealthStatus.java      |   2 +-
 .../ozone/recon/fsck/TestContainerHealthTask.java  |  24 +--
 .../TestContainerHealthTaskRecordGenerator.java    |   2 +-
 .../scm/AbstractReconContainerManagerTest.java     |   6 +-
 .../ozone/recon/scm/TestReconContainerManager.java |   2 +-
 ...TestReconIncrementalContainerReportHandler.java |   2 +-
 64 files changed, 563 insertions(+), 483 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
index bb44da4..1a6be96 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
@@ -23,6 +23,7 @@ import com.google.common.primitives.Longs;
 import org.apache.commons.lang3.builder.CompareToBuilder;
 import org.apache.commons.lang3.builder.EqualsBuilder;
 import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 
 /**
  * Container ID is an integer that is a value between 1..MAX_CONTAINER ID.
@@ -34,13 +35,14 @@ public final class ContainerID implements 
Comparable<ContainerID> {
 
   private final long id;
 
-  // TODO: make this private.
   /**
    * Constructs ContainerID.
    *
    * @param id int
    */
-  public ContainerID(long id) {
+  private ContainerID(long id) {
+    Preconditions.checkState(id > 0,
+        "Container ID should be a positive. %s.", id);
     this.id = id;
   }
 
@@ -49,9 +51,7 @@ public final class ContainerID implements 
Comparable<ContainerID> {
    * @param containerID  long
    * @return ContainerID.
    */
-  public static ContainerID valueof(final long containerID) {
-    Preconditions.checkState(containerID > 0,
-        "Container ID should be a positive long. "+ containerID);
+  public static ContainerID valueOf(final long containerID) {
     return new ContainerID(containerID);
   }
 
@@ -60,14 +60,30 @@ public final class ContainerID implements 
Comparable<ContainerID> {
    *
    * @return int
    */
+  @Deprecated
+  /*
+   * Don't expose the int value.
+   */
   public long getId() {
     return id;
   }
 
+  /**
+   * Use proto message.
+   */
+  @Deprecated
   public byte[] getBytes() {
     return Longs.toByteArray(id);
   }
 
+  public HddsProtos.ContainerID getProtobuf() {
+    return HddsProtos.ContainerID.newBuilder().setId(id).build();
+  }
+
+  public static ContainerID getFromProtobuf(HddsProtos.ContainerID proto) {
+    return ContainerID.valueOf(proto.getId());
+  }
+
   @Override
   public boolean equals(final Object o) {
     if (this == o) {
@@ -81,14 +97,14 @@ public final class ContainerID implements 
Comparable<ContainerID> {
     final ContainerID that = (ContainerID) o;
 
     return new EqualsBuilder()
-        .append(getId(), that.getId())
+        .append(id, that.id)
         .isEquals();
   }
 
   @Override
   public int hashCode() {
     return new HashCodeBuilder(61, 71)
-        .append(getId())
+        .append(id)
         .toHashCode();
   }
 
@@ -96,7 +112,7 @@ public final class ContainerID implements 
Comparable<ContainerID> {
   public int compareTo(final ContainerID that) {
     Preconditions.checkNotNull(that);
     return new CompareToBuilder()
-        .append(this.getId(), that.getId())
+        .append(this.id, that.id)
         .build();
   }
 
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
index b8f1a92..e621a4f 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
@@ -121,6 +121,11 @@ public class ContainerInfo implements 
Comparator<ContainerInfo>,
         .build();
   }
 
+  /**
+   * This method is depricated, use {@code containerID()} which returns
+   * {@link ContainerID} object.
+   */
+  @Deprecated
   public long getContainerID() {
     return containerID;
   }
@@ -179,7 +184,7 @@ public class ContainerInfo implements 
Comparator<ContainerInfo>,
   }
 
   public ContainerID containerID() {
-    return new ContainerID(getContainerID());
+    return ContainerID.valueOf(containerID);
   }
 
   /**
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java
index 803aa03..824a1f5 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java
@@ -91,7 +91,7 @@ public class ExcludeList {
       HddsProtos.ExcludeListProto excludeListProto) {
     ExcludeList excludeList = new ExcludeList();
     excludeListProto.getContainerIdsList().forEach(id -> {
-      excludeList.addConatinerId(ContainerID.valueof(id));
+      excludeList.addConatinerId(ContainerID.valueOf(id));
     });
     DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
     excludeListProto.getDatanodesList().forEach(dn -> {
diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto 
b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
index b43a74c..d89e7b4 100644
--- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
@@ -83,6 +83,10 @@ message PipelineID {
   optional UUID uuid128 = 100;
 }
 
+message ContainerID {
+    required uint64 id = 1;
+}
+
 enum PipelineState {
     PIPELINE_ALLOCATED = 1;
     PIPELINE_OPEN = 2;
@@ -181,6 +185,7 @@ enum LifeCycleEvent {
 }
 
 message ContainerInfoProto {
+    // Replace int64 with ContainerID message
     required int64 containerID = 1;
     required LifeCycleState state = 2;
     optional PipelineID pipelineID = 3;
@@ -236,6 +241,7 @@ enum ScmOps {
 
 message ExcludeListProto {
     repeated string datanodes = 1;
+    // Replace int64 with ContainerID message
     repeated int64 containerIds = 2;
     repeated PipelineID pipelineIds = 3;
 }
@@ -244,6 +250,7 @@ message ExcludeListProto {
  * Block ID that uniquely identify a block by SCM.
  */
 message ContainerBlockID {
+    // Replace int64 with ContainerID message
     required int64 containerID = 1;
     required int64 localID = 2;
 }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
index dca1529..2420d61 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
@@ -59,7 +59,7 @@ public class DatanodeDeletedBlockTransactions {
       Set<UUID> dnsWithTransactionCommitted) {
     try {
       boolean success = false;
-      final ContainerID id = ContainerID.valueof(tx.getContainerID());
+      final ContainerID id = ContainerID.valueOf(tx.getContainerID());
       final ContainerInfo container = containerManager.getContainer(id);
       final Set<ContainerReplica> replicas = containerManager
           .getContainerReplicas(id);
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
index edd3d4a..5d43a75 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
@@ -193,7 +193,7 @@ public class DeletedBlockLogImpl
           long txID = transactionResult.getTxID();
           // set of dns which have successfully committed transaction txId.
           dnsWithCommittedTxn = transactionToDNsCommitMap.get(txID);
-          final ContainerID containerId = ContainerID.valueof(
+          final ContainerID containerId = ContainerID.valueOf(
               transactionResult.getContainerID());
           if (dnsWithCommittedTxn == null) {
             // Mostly likely it's a retried delete command response.
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
index 1b190a2..02dc3f5 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
@@ -75,7 +75,7 @@ public class AbstractContainerReportHandler {
       final ContainerReplicaProto replicaProto, final EventPublisher publisher)
       throws IOException {
     final ContainerID containerId = ContainerID
-        .valueof(replicaProto.getContainerID());
+        .valueOf(replicaProto.getContainerID());
 
     if (logger.isDebugEnabled()) {
       logger.debug("Processing replica of container {} from datanode {}",
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java
index e79f268..3d53e29 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java
@@ -45,7 +45,7 @@ public class ContainerActionsHandler implements
     DatanodeDetails dd = containerReportFromDatanode.getDatanodeDetails();
     for (ContainerAction action : containerReportFromDatanode.getReport()
         .getContainerActionsList()) {
-      ContainerID containerId = ContainerID.valueof(action.getContainerID());
+      ContainerID containerId = ContainerID.valueOf(action.getContainerID());
       switch (action.getAction()) {
       case CLOSE:
         if (LOG.isDebugEnabled()) {
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java
index 36b9a30..3477eea 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java
@@ -23,12 +23,14 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.Objects;
+import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.stream.Collectors;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
@@ -37,7 +39,9 @@ import 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.utils.UniqueId;
 import org.apache.hadoop.hdds.utils.db.Table;
+import 
org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -60,6 +64,7 @@ public class ContainerManagerImpl implements 
ContainerManagerV2 {
   /**
    *
    */
+  //Can we move this lock to ContainerStateManager?
   private final ReadWriteLock lock;
 
   /**
@@ -93,94 +98,45 @@ public class ContainerManagerImpl implements 
ContainerManagerV2 {
   }
 
   @Override
-  public Set<ContainerID> getContainerIDs() {
-    lock.readLock().lock();
-    try {
-      return containerStateManager.getContainerIDs();
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  @Override
-  public Set<ContainerInfo> getContainers() {
-    lock.readLock().lock();
-    try {
-      return containerStateManager.getContainerIDs().stream().map(id -> {
-        try {
-          return containerStateManager.getContainer(id);
-        } catch (ContainerNotFoundException e) {
-          // How can this happen? o_O
-          return null;
-        }
-      }).filter(Objects::nonNull).collect(Collectors.toSet());
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  @Override
-  public ContainerInfo getContainer(final ContainerID containerID)
+  public ContainerInfo getContainer(final ContainerID id)
       throws ContainerNotFoundException {
     lock.readLock().lock();
     try {
-      return containerStateManager.getContainer(containerID);
-    } finally {
-      lock.readLock().unlock();
-    }
-  }
-
-  @Override
-  public Set<ContainerInfo> getContainers(final LifeCycleState state) {
-    lock.readLock().lock();
-    try {
-      return containerStateManager.getContainerIDs(state).stream().map(id -> {
-        try {
-          return containerStateManager.getContainer(id);
-        } catch (ContainerNotFoundException e) {
-          // How can this happen? o_O
-          return null;
-        }
-      }).filter(Objects::nonNull).collect(Collectors.toSet());
+      return Optional.ofNullable(containerStateManager
+          .getContainer(id.getProtobuf()))
+          .orElseThrow(() -> new ContainerNotFoundException("ID " + id));
     } finally {
       lock.readLock().unlock();
     }
   }
 
   @Override
-  public boolean exists(final ContainerID containerID) {
+  public List<ContainerInfo> listContainers(final ContainerID startID,
+                                            final int count) {
     lock.readLock().lock();
     try {
-      return (containerStateManager.getContainer(containerID) != null);
-    } catch (ContainerNotFoundException ex) {
-      return false;
+      final long start = startID == null ? 0 : startID.getId();
+      final List<ContainerID> containersIds =
+          new ArrayList<>(containerStateManager.getContainerIDs());
+      Collections.sort(containersIds);
+      return containersIds.stream()
+          .filter(id -> id.getId() > start).limit(count)
+          .map(ContainerID::getProtobuf)
+          .map(containerStateManager::getContainer)
+          .collect(Collectors.toList());
     } finally {
       lock.readLock().unlock();
     }
   }
 
   @Override
-  public List<ContainerInfo> listContainers(final ContainerID startID,
-                                            final int count) {
+  public List<ContainerInfo> listContainers(final LifeCycleState state) {
     lock.readLock().lock();
     try {
-      final long startId = startID == null ? 0 : startID.getId();
-      final List<ContainerID> containersIds =
-          new ArrayList<>(containerStateManager.getContainerIDs());
-      Collections.sort(containersIds);
-      return containersIds.stream()
-          .filter(id -> id.getId() > startId)
-          .limit(count)
-          .map(id -> {
-            try {
-              return containerStateManager.getContainer(id);
-            } catch (ContainerNotFoundException ex) {
-              // This can never happen, as we hold lock no one else can remove
-              // the container after we got the container ids.
-              LOG.warn("Container Missing.", ex);
-              return null;
-            }
-          }).collect(Collectors.toList());
+      return containerStateManager.getContainerIDs(state).stream()
+          .map(ContainerID::getProtobuf)
+          .map(containerStateManager::getContainer)
+          .filter(Objects::nonNull).collect(Collectors.toList());
     } finally {
       lock.readLock().unlock();
     }
@@ -201,8 +157,8 @@ public class ContainerManagerImpl implements 
ContainerManagerV2 {
             replicationFactor + ", State:PipelineState.OPEN");
       }
 
-      final ContainerID containerID = containerStateManager
-          .getNextContainerID();
+      // TODO: Replace this with Distributed unique id generator.
+      final ContainerID containerID = ContainerID.valueOf(UniqueId.next());
       final Pipeline pipeline = pipelines.get(
           (int) containerID.getId() % pipelines.size());
 
@@ -222,43 +178,65 @@ public class ContainerManagerImpl implements 
ContainerManagerV2 {
       if (LOG.isTraceEnabled()) {
         LOG.trace("New container allocated: {}", containerInfo);
       }
-      return containerStateManager.getContainer(containerID);
+      return containerStateManager.getContainer(containerID.getProtobuf());
     } finally {
       lock.writeLock().unlock();
     }
   }
 
   @Override
-  public void deleteContainer(final ContainerID containerID)
-      throws ContainerNotFoundException {
-    throw new UnsupportedOperationException("Not yet implemented!");
-  }
-
-  @Override
-  public void updateContainerState(final ContainerID containerID,
+  public void updateContainerState(final ContainerID id,
                                    final LifeCycleEvent event)
-      throws ContainerNotFoundException {
-    throw new UnsupportedOperationException("Not yet implemented!");
+      throws IOException, InvalidStateTransitionException {
+    final HddsProtos.ContainerID cid = id.getProtobuf();
+    lock.writeLock().lock();
+    try {
+      checkIfContainerExist(cid);
+      containerStateManager.updateContainerState(cid, event);
+    } finally {
+      lock.writeLock().unlock();
+    }
   }
 
   @Override
-  public Set<ContainerReplica> getContainerReplicas(
-      final ContainerID containerID) throws ContainerNotFoundException {
-    throw new UnsupportedOperationException("Not yet implemented!");
+  public Set<ContainerReplica> getContainerReplicas(final ContainerID id)
+      throws ContainerNotFoundException {
+    lock.readLock().lock();
+    try {
+      return Optional.ofNullable(containerStateManager
+          .getContainerReplicas(id.getProtobuf()))
+          .orElseThrow(() -> new ContainerNotFoundException("ID " + id));
+    } finally {
+      lock.readLock().unlock();
+    }
   }
 
   @Override
-  public void updateContainerReplica(final ContainerID containerID,
+  public void updateContainerReplica(final ContainerID id,
                                      final ContainerReplica replica)
       throws ContainerNotFoundException {
-    throw new UnsupportedOperationException("Not yet implemented!");
+    final HddsProtos.ContainerID cid = id.getProtobuf();
+    lock.writeLock().lock();
+    try {
+      checkIfContainerExist(cid);
+      containerStateManager.updateContainerReplica(cid, replica);
+    } finally {
+      lock.writeLock().unlock();
+    }
   }
 
   @Override
-  public void removeContainerReplica(final ContainerID containerID,
+  public void removeContainerReplica(final ContainerID id,
                                      final ContainerReplica replica)
       throws ContainerNotFoundException, ContainerReplicaNotFoundException {
-    throw new UnsupportedOperationException("Not yet implemented!");
+    final HddsProtos.ContainerID cid = id.getProtobuf();
+    lock.writeLock().lock();
+    try {
+      checkIfContainerExist(cid);
+      containerStateManager.removeContainerReplica(cid, replica);
+    } finally {
+      lock.writeLock().unlock();
+    }
   }
 
   @Override
@@ -280,6 +258,27 @@ public class ContainerManagerImpl implements 
ContainerManagerV2 {
   }
 
   @Override
+  public void deleteContainer(final ContainerID id)
+      throws IOException {
+    final HddsProtos.ContainerID cid = id.getProtobuf();
+    lock.writeLock().lock();
+    try {
+      checkIfContainerExist(cid);
+      containerStateManager.removeContainer(cid);
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  private void checkIfContainerExist(final HddsProtos.ContainerID id)
+      throws ContainerNotFoundException {
+    if (!containerStateManager.contains(id)) {
+      throw new ContainerNotFoundException("Container with id #" +
+          id.getId() + " not found.");
+    }
+  }
+
+  @Override
   public void close() throws Exception {
     containerStateManager.close();
   }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java
index 863ca4d..dcedb6c 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java
@@ -17,7 +17,6 @@
 package org.apache.hadoop.hdds.scm.container;
 
 import java.io.IOException;
-import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -27,6 +26,7 @@ import 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import 
org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
 
 /**
  * TODO: Add extensive javadoc.
@@ -38,26 +38,6 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 public interface ContainerManagerV2 extends AutoCloseable {
   // TODO: Rename this to ContainerManager
 
-  /**
-   * Returns all the container Ids managed by ContainerManager.
-   *
-   * @return Set of ContainerID
-   */
-  Set<ContainerID> getContainerIDs();
-
-  /**
-   * Returns all the containers managed by ContainerManager.
-   *
-   * @return List of ContainerInfo
-   */
-  Set<ContainerInfo> getContainers();
-
-  /**
-   * Returns all the containers which are in the specified state.
-   *
-   * @return List of ContainerInfo
-   */
-  Set<ContainerInfo> getContainers(LifeCycleState state);
 
   /**
    * Returns the ContainerInfo from the container ID.
@@ -66,8 +46,6 @@ public interface ContainerManagerV2 extends AutoCloseable {
   ContainerInfo getContainer(ContainerID containerID)
       throws ContainerNotFoundException;
 
-  boolean exists(ContainerID containerID);
-
   /**
    * Returns containers under certain conditions.
    * Search container IDs from start ID(exclusive),
@@ -84,6 +62,14 @@ public interface ContainerManagerV2 extends AutoCloseable {
    */
   List<ContainerInfo> listContainers(ContainerID startID, int count);
 
+
+  /**
+   * Returns all the containers which are in the specified state.
+   *
+   * @return List of ContainerInfo
+   */
+  List<ContainerInfo> listContainers(LifeCycleState state);
+
   /**
    * Allocates a new container for a given keyName and replication factor.
    *
@@ -97,23 +83,15 @@ public interface ContainerManagerV2 extends AutoCloseable {
                                   String owner) throws IOException;
 
   /**
-   * Deletes a container from SCM.
-   *
-   * @param containerID - Container ID
-   * @throws IOException
-   */
-  void deleteContainer(ContainerID containerID)
-      throws ContainerNotFoundException;
-
-  /**
    * Update container state.
    * @param containerID - Container ID
    * @param event - container life cycle event
    * @throws IOException
+   * @throws InvalidStateTransitionException
    */
   void updateContainerState(ContainerID containerID,
                             LifeCycleEvent event)
-      throws ContainerNotFoundException;
+      throws IOException, InvalidStateTransitionException;
 
   /**
    * Returns the latest list of replicas for given containerId.
@@ -157,18 +135,6 @@ public interface ContainerManagerV2 extends AutoCloseable {
    * Returns ContainerInfo which matches the requirements.
    * @param size - the amount of space required in the container
    * @param owner - the user which requires space in its owned container
-   * @param pipeline - pipeline to which the container should belong
-   * @return ContainerInfo for the matching container.
-   */
-  default ContainerInfo getMatchingContainer(long size, String owner,
-                                     Pipeline pipeline) {
-    return getMatchingContainer(size, owner, pipeline, 
Collections.emptyList());
-  }
-
-  /**
-   * Returns ContainerInfo which matches the requirements.
-   * @param size - the amount of space required in the container
-   * @param owner - the user which requires space in its owned container
    * @param pipeline - pipeline to which the container should belong.
    * @param excludedContainerIDS - containerIds to be excluded.
    * @return ContainerInfo for the matching container.
@@ -185,4 +151,13 @@ public interface ContainerManagerV2 extends AutoCloseable {
    */
   // Is it possible to remove this from the Interface?
   void notifyContainerReportProcessing(boolean isFullReport, boolean success);
+
+  /**
+   * Deletes a container from SCM.
+   *
+   * @param containerID - Container ID
+   * @throws IOException
+   */
+  void deleteContainer(ContainerID containerID)
+      throws IOException;
 }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
index 7bca64f..18dffe7 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
@@ -120,7 +120,7 @@ public class ContainerReportHandler extends 
AbstractContainerReportHandler
 
       final Set<ContainerID> containersInDn = replicas.parallelStream()
           .map(ContainerReplicaProto::getContainerID)
-          .map(ContainerID::valueof).collect(Collectors.toSet());
+          .map(ContainerID::valueOf).collect(Collectors.toSet());
 
       final Set<ContainerID> missingReplicas = new HashSet<>(containersInSCM);
       missingReplicas.removeAll(containersInDn);
@@ -167,7 +167,7 @@ public class ContainerReportHandler extends 
AbstractContainerReportHandler
         } else if (unknownContainerHandleAction.equals(
             UNKNOWN_CONTAINER_ACTION_DELETE)) {
           final ContainerID containerId = ContainerID
-              .valueof(replicaProto.getContainerID());
+              .valueOf(replicaProto.getContainerID());
           deleteReplica(containerId, datanodeDetails, publisher, "unknown");
         }
       } catch (IOException e) {
@@ -221,7 +221,7 @@ public class ContainerReportHandler extends 
AbstractContainerReportHandler
     for (ContainerReplicaProto replica : replicas) {
       try {
         final ContainerInfo containerInfo = containerManager.getContainer(
-            ContainerID.valueof(replica.getContainerID()));
+            ContainerID.valueOf(replica.getContainerID()));
         if (containerInfo.getDeleteTransactionId() >
             replica.getDeleteTransactionId()) {
           pendingDeleteStatusList.addPendingDeleteStatus(
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
index e575c60..0c3772f 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
@@ -329,7 +329,7 @@ public class ContainerStateManager {
       // In Recon, while adding a 'new' CLOSED container, pipeline will be a
       // random ID, and hence be passed down as null.
       pipelineManager.addContainerToPipeline(pipeline.getId(),
-          ContainerID.valueof(containerID));
+          ContainerID.valueOf(containerID));
     }
     containerStateCount.incrementAndGet(containerInfo.getState());
   }
@@ -371,12 +371,8 @@ public class ContainerStateManager {
   void updateDeleteTransactionId(
       final Map<Long, Long> deleteTransactionMap) {
     deleteTransactionMap.forEach((k, v) -> {
-      try {
-        containers.getContainerInfo(ContainerID.valueof(k))
-            .updateDeleteTransactionId(v);
-      } catch (ContainerNotFoundException e) {
-        LOG.warn("Exception while updating delete transaction id.", e);
-      }
+      containers.getContainerInfo(ContainerID.valueOf(k))
+          .updateDeleteTransactionId(v);
     });
   }
 
@@ -432,18 +428,13 @@ public class ContainerStateManager {
   private ContainerInfo findContainerWithSpace(final long size,
       final NavigableSet<ContainerID> searchSet, final String owner,
       final PipelineID pipelineID) {
-    try {
-      // Get the container with space to meet our request.
-      for (ContainerID id : searchSet) {
-        final ContainerInfo containerInfo = containers.getContainerInfo(id);
-        if (containerInfo.getUsedBytes() + size <= this.containerSize) {
-          containerInfo.updateLastUsedTime();
-          return containerInfo;
-        }
+    // Get the container with space to meet our request.
+    for (ContainerID id : searchSet) {
+      final ContainerInfo containerInfo = containers.getContainerInfo(id);
+      if (containerInfo.getUsedBytes() + size <= this.containerSize) {
+        containerInfo.updateLastUsedTime();
+        return containerInfo;
       }
-    } catch (ContainerNotFoundException e) {
-      // This should not happen!
-      LOG.warn("Exception while finding container with space", e);
     }
     return null;
   }
@@ -496,7 +487,11 @@ public class ContainerStateManager {
    */
   ContainerInfo getContainer(final ContainerID containerID)
       throws ContainerNotFoundException {
-    return containers.getContainerInfo(containerID);
+    final ContainerInfo container = containers.getContainerInfo(containerID);
+    if (container != null) {
+      return container;
+    }
+    throw new ContainerNotFoundException(containerID.toString());
   }
 
   void close() throws IOException {
@@ -540,6 +535,9 @@ public class ContainerStateManager {
 
   void removeContainer(final ContainerID containerID)
       throws ContainerNotFoundException {
+    if (containers.getContainerInfo(containerID) == null) {
+      throw new ContainerNotFoundException(containerID.toString());
+    }
     containers.removeContainer(containerID);
   }
 
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java
index 4f4456a..7f42a97 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java
@@ -24,12 +24,12 @@ import java.util.Map;
 import java.util.NavigableSet;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicLong;
 
 import com.google.common.base.Preconditions;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
@@ -47,15 +47,32 @@ import 
org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
 import org.apache.hadoop.hdds.utils.db.TableIterator;
+import 
org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
 import org.apache.hadoop.ozone.common.statemachine.StateMachine;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.FINALIZE;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.QUASI_CLOSE;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CLOSE;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.FORCE_CLOSE;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.DELETE;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CLEANUP;
+
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSING;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.QUASI_CLOSED;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.DELETING;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.DELETED;
+
 /**
  * Default implementation of ContainerStateManager. This implementation
  * holds the Container States in-memory which is backed by a persistent store.
  * The persistent store is always kept in sync with the in-memory state 
changes.
+ *
+ * This class is NOT thread safe. All the calls are idempotent.
  */
 public final class ContainerStateManagerImpl
     implements ContainerStateManagerV2 {
@@ -72,13 +89,6 @@ public final class ContainerStateManagerImpl
   private final long containerSize;
 
   /**
-   * The container ID sequence which is used to create new container.
-   * This will be removed once we have a Distributed Sequence ID Generator.
-   */
-  @Deprecated
-  private final AtomicLong nextContainerID;
-
-  /**
    * In-memory representation of Container States.
    */
   private final ContainerStateMap containers;
@@ -121,7 +131,6 @@ public final class ContainerStateManagerImpl
     this.containerStore = containerStore;
     this.stateMachine = newStateMachine();
     this.containerSize = getConfiguredContainerSize(conf);
-    this.nextContainerID = new AtomicLong(1L);
     this.containers = new ContainerStateMap();
     this.lastUsedMap = new ConcurrentHashMap<>();
 
@@ -138,40 +147,45 @@ public final class ContainerStateManagerImpl
     final Set<LifeCycleState> finalStates = new HashSet<>();
 
     // These are the steady states of a container.
-    finalStates.add(LifeCycleState.OPEN);
-    finalStates.add(LifeCycleState.CLOSED);
-    finalStates.add(LifeCycleState.DELETED);
+    finalStates.add(CLOSED);
+    finalStates.add(DELETED);
 
     final StateMachine<LifeCycleState, LifeCycleEvent> containerLifecycleSM =
-        new StateMachine<>(LifeCycleState.OPEN, finalStates);
-
-    containerLifecycleSM.addTransition(LifeCycleState.OPEN,
-        LifeCycleState.CLOSING,
-        LifeCycleEvent.FINALIZE);
+        new StateMachine<>(OPEN, finalStates);
 
-    containerLifecycleSM.addTransition(LifeCycleState.CLOSING,
-        LifeCycleState.QUASI_CLOSED,
-        LifeCycleEvent.QUASI_CLOSE);
+    containerLifecycleSM.addTransition(OPEN, CLOSING, FINALIZE);
+    containerLifecycleSM.addTransition(CLOSING, QUASI_CLOSED, QUASI_CLOSE);
+    containerLifecycleSM.addTransition(CLOSING, CLOSED, CLOSE);
+    containerLifecycleSM.addTransition(QUASI_CLOSED, CLOSED, FORCE_CLOSE);
+    containerLifecycleSM.addTransition(CLOSED, DELETING, DELETE);
+    containerLifecycleSM.addTransition(DELETING, DELETED, CLEANUP);
 
-    containerLifecycleSM.addTransition(LifeCycleState.CLOSING,
-        LifeCycleState.CLOSED,
-        LifeCycleEvent.CLOSE);
-
-    containerLifecycleSM.addTransition(LifeCycleState.QUASI_CLOSED,
-        LifeCycleState.CLOSED,
-        LifeCycleEvent.FORCE_CLOSE);
-
-    containerLifecycleSM.addTransition(LifeCycleState.CLOSED,
-        LifeCycleState.DELETING,
-        LifeCycleEvent.DELETE);
-
-    containerLifecycleSM.addTransition(LifeCycleState.DELETING,
-        LifeCycleState.DELETED,
-        LifeCycleEvent.CLEANUP);
+    /* The following set of transitions are to make state machine
+     * transition idempotent.
+     */
+    makeStateTransitionIdempotent(containerLifecycleSM, FINALIZE,
+        CLOSING, QUASI_CLOSED, CLOSED, DELETING, DELETED);
+    makeStateTransitionIdempotent(containerLifecycleSM, QUASI_CLOSE,
+        QUASI_CLOSED, CLOSED, DELETING, DELETED);
+    makeStateTransitionIdempotent(containerLifecycleSM, CLOSE,
+        CLOSED, DELETING, DELETED);
+    makeStateTransitionIdempotent(containerLifecycleSM, FORCE_CLOSE,
+        CLOSED, DELETING, DELETED);
+    makeStateTransitionIdempotent(containerLifecycleSM, DELETE,
+        DELETING, DELETED);
+    makeStateTransitionIdempotent(containerLifecycleSM, CLEANUP, DELETED);
 
     return containerLifecycleSM;
   }
 
+  private void makeStateTransitionIdempotent(
+      final StateMachine<LifeCycleState, LifeCycleEvent> sm,
+      final LifeCycleEvent event, final LifeCycleState... states) {
+    for (LifeCycleState state : states) {
+      sm.addTransition(state, state, event);
+    }
+  }
+
   /**
    * Returns the configured container size.
    *
@@ -197,29 +211,27 @@ public final class ContainerStateManagerImpl
       final ContainerInfo container = iterator.next().getValue();
       Preconditions.checkNotNull(container);
       containers.addContainer(container);
-      nextContainerID.set(Long.max(container.containerID().getId(),
-          nextContainerID.get()));
       if (container.getState() == LifeCycleState.OPEN) {
         try {
           pipelineManager.addContainerToPipeline(container.getPipelineID(),
-              ContainerID.valueof(container.getContainerID()));
+              container.containerID());
         } catch (PipelineNotFoundException ex) {
           LOG.warn("Found container {} which is in OPEN state with " +
                   "pipeline {} that does not exist. Marking container for " +
                   "closing.", container, container.getPipelineID());
-          updateContainerState(container.containerID(),
-              LifeCycleEvent.FINALIZE);
+          try {
+            updateContainerState(container.containerID().getProtobuf(),
+                LifeCycleEvent.FINALIZE);
+          } catch (InvalidStateTransitionException e) {
+            // This cannot happen.
+            LOG.warn("Unable to finalize Container {}.", container);
+          }
         }
       }
     }
   }
 
   @Override
-  public ContainerID getNextContainerID() {
-    return ContainerID.valueof(nextContainerID.get());
-  }
-
-  @Override
   public Set<ContainerID> getContainerIDs() {
     return containers.getAllContainerIDs();
   }
@@ -230,15 +242,9 @@ public final class ContainerStateManagerImpl
   }
 
   @Override
-  public ContainerInfo getContainer(final ContainerID containerID)
-      throws ContainerNotFoundException {
-    return containers.getContainerInfo(containerID);
-  }
-
-  @Override
-  public Set<ContainerReplica> getContainerReplicas(
-      final ContainerID containerID) throws ContainerNotFoundException {
-    return containers.getContainerReplicas(containerID);
+  public ContainerInfo getContainer(final HddsProtos.ContainerID id) {
+    return containers.getContainerInfo(
+        ContainerID.getFromProtobuf(id));
   }
 
   @Override
@@ -254,32 +260,63 @@ public final class ContainerStateManagerImpl
     final ContainerID containerID = container.containerID();
     final PipelineID pipelineID = container.getPipelineID();
 
-    /*
-     * TODO:
-     *  Check if the container already exist in in ContainerStateManager.
-     *  This optimization can be done after moving ContainerNotFoundException
-     *  from ContainerStateMap to ContainerManagerImpl.
-     */
+    if (!containers.contains(containerID)) {
+      containerStore.put(containerID, container);
+      try {
+        containers.addContainer(container);
+        pipelineManager.addContainerToPipeline(pipelineID, containerID);
+      } catch (Exception ex) {
+        containers.removeContainer(containerID);
+        containerStore.delete(containerID);
+        throw ex;
+      }
+    }
+  }
 
-    containerStore.put(containerID, container);
-    containers.addContainer(container);
-    pipelineManager.addContainerToPipeline(pipelineID, containerID);
-    nextContainerID.incrementAndGet();
+  @Override
+  public boolean contains(final HddsProtos.ContainerID id) {
+    // TODO: Remove the protobuf conversion after fixing ContainerStateMap.
+    return containers.contains(ContainerID.getFromProtobuf(id));
   }
 
-  void updateContainerState(final ContainerID containerID,
-                            final LifeCycleEvent event)
-      throws IOException {
-    throw new UnsupportedOperationException("Not yet implemented!");
+  public void updateContainerState(final HddsProtos.ContainerID containerID,
+                                   final LifeCycleEvent event)
+      throws IOException, InvalidStateTransitionException {
+    // TODO: Remove the protobuf conversion after fixing ContainerStateMap.
+    final ContainerID id = ContainerID.getFromProtobuf(containerID);
+    if (containers.contains(id)) {
+      final ContainerInfo info = containers.getContainerInfo(id);
+      final LifeCycleState oldState = info.getState();
+      final LifeCycleState newState = stateMachine.getNextState(
+          info.getState(), event);
+      if (newState.getNumber() > oldState.getNumber()) {
+        containers.updateState(id, info.getState(), newState);
+      }
+    }
   }
 
 
-  void updateContainerReplica(final ContainerID containerID,
-                              final ContainerReplica replica)
-      throws ContainerNotFoundException {
-    containers.updateContainerReplica(containerID, replica);
+  @Override
+  public Set<ContainerReplica> getContainerReplicas(
+      final HddsProtos.ContainerID id) {
+    return containers.getContainerReplicas(
+        ContainerID.getFromProtobuf(id));
   }
 
+  @Override
+  public void updateContainerReplica(final HddsProtos.ContainerID id,
+                                     final ContainerReplica replica) {
+    containers.updateContainerReplica(ContainerID.getFromProtobuf(id),
+        replica);
+  }
+
+  @Override
+  public void removeContainerReplica(final HddsProtos.ContainerID id,
+                                     final ContainerReplica replica) {
+    containers.removeContainerReplica(ContainerID.getFromProtobuf(id),
+        replica);
+
+  }
 
   void updateDeleteTransactionId(
       final Map<ContainerID, Long> deleteTransactionMap) {
@@ -291,23 +328,14 @@ public final class ContainerStateManagerImpl
     throw new UnsupportedOperationException("Not yet implemented!");
   }
 
-
   NavigableSet<ContainerID> getMatchingContainerIDs(final String owner,
       final ReplicationType type, final ReplicationFactor factor,
       final LifeCycleState state) {
     throw new UnsupportedOperationException("Not yet implemented!");
   }
 
-  void removeContainerReplica(final ContainerID containerID,
-                              final ContainerReplica replica)
-      throws ContainerNotFoundException, ContainerReplicaNotFoundException {
-    throw new UnsupportedOperationException("Not yet implemented!");
-  }
-
-
-  void removeContainer(final ContainerID containerID)
-      throws ContainerNotFoundException {
-    throw new UnsupportedOperationException("Not yet implemented!");
+  public void removeContainer(final HddsProtos.ContainerID id) {
+    containers.removeContainer(ContainerID.getFromProtobuf(id));
   }
 
   @Override
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java
index 3520b01..3a0cf21 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java
@@ -20,9 +20,11 @@ package org.apache.hadoop.hdds.scm.container;
 import java.io.IOException;
 import java.util.Set;
 
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.scm.metadata.Replicate;
+import 
org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
 
 /**
  * A ContainerStateManager is responsible for keeping track of all the
@@ -94,10 +96,9 @@ public interface ContainerStateManagerV2 {
    ************************************************************************/
 
   /**
-   * Returns a new container ID which can be used for allocating a new
-   * container.
+   *
    */
-  ContainerID getNextContainerID();
+  boolean contains(HddsProtos.ContainerID containerID);
 
   /**
    * Returns the ID of all the managed containers.
@@ -114,14 +115,24 @@ public interface ContainerStateManagerV2 {
   /**
    *
    */
-  ContainerInfo getContainer(ContainerID containerID)
-      throws ContainerNotFoundException;
+  ContainerInfo getContainer(HddsProtos.ContainerID id);
+
+  /**
+   *
+   */
+  Set<ContainerReplica> getContainerReplicas(HddsProtos.ContainerID id);
 
   /**
    *
    */
-  Set<ContainerReplica> getContainerReplicas(ContainerID containerID)
-      throws ContainerNotFoundException;
+  void updateContainerReplica(HddsProtos.ContainerID id,
+                              ContainerReplica replica);
+
+  /**
+   *
+   */
+  void removeContainerReplica(HddsProtos.ContainerID id,
+                              ContainerReplica replica);
 
   /**
    *
@@ -133,5 +144,20 @@ public interface ContainerStateManagerV2 {
   /**
    *
    */
+  @Replicate
+  void updateContainerState(HddsProtos.ContainerID id,
+                            HddsProtos.LifeCycleEvent event)
+      throws IOException, InvalidStateTransitionException;
+
+  /**
+   *
+   */
+  @Replicate
+  void removeContainer(HddsProtos.ContainerID containerInfo)
+      throws IOException;
+
+  /**
+   *
+   */
   void close() throws Exception;
 }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
index ed87565..3317f42 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
@@ -71,7 +71,7 @@ public class IncrementalContainerReportHandler extends
     for (ContainerReplicaProto replicaProto :
         report.getReport().getReportList()) {
       try {
-        final ContainerID id = ContainerID.valueof(
+        final ContainerID id = ContainerID.valueOf(
             replicaProto.getContainerID());
         if (!replicaProto.getState().equals(
             ContainerReplicaProto.State.DELETED)) {
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
index 2117e70..f59e401 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
@@ -130,7 +130,7 @@ public class SCMContainerManager implements 
ContainerManager {
       try {
         if (container.getState() == LifeCycleState.OPEN) {
           pipelineManager.addContainerToPipeline(container.getPipelineID(),
-              ContainerID.valueof(container.getContainerID()));
+              ContainerID.valueOf(container.getContainerID()));
         }
       } catch (PipelineNotFoundException ex) {
         LOG.warn("Found a Container {} which is in {} state with pipeline {} " 
+
@@ -216,7 +216,9 @@ public class SCMContainerManager implements 
ContainerManager {
   public boolean exists(ContainerID containerID) {
     lock.lock();
     try {
-      return (containerStateManager.getContainer(containerID) != null);
+      Preconditions.checkNotNull(
+          containerStateManager.getContainer(containerID));
+      return true;
     } catch (ContainerNotFoundException e) {
       return false;
     } finally {
@@ -290,7 +292,7 @@ public class SCMContainerManager implements 
ContainerManager {
         // PipelineStateManager.
         pipelineManager.removeContainerFromPipeline(
             containerInfo.getPipelineID(),
-            new ContainerID(containerInfo.getContainerID()));
+            containerInfo.containerID());
         throw ex;
       }
       return containerInfo;
@@ -404,7 +406,8 @@ public class SCMContainerManager implements 
ContainerManager {
     try(BatchOperation batchOperation = batchHandler.initBatchOperation()) {
       for (Map.Entry< Long, Long > entry : deleteTransactionMap.entrySet()) {
         long containerID = entry.getKey();
-        ContainerID containerIdObject = new ContainerID(containerID);
+
+        ContainerID containerIdObject = ContainerID.valueOf(containerID);
         ContainerInfo containerInfo =
             containerStore.get(containerIdObject);
         ContainerInfo containerInfoInMem = containerStateManager
@@ -493,7 +496,7 @@ public class SCMContainerManager implements 
ContainerManager {
       throws IOException {
     try {
       containerStore
-          .put(new ContainerID(containerInfo.getContainerID()), containerInfo);
+          .put(containerInfo.containerID(), containerInfo);
       // Incrementing here, as allocateBlock to create a container calls
       // getMatchingContainer() and finally calls this API to add newly
       // created container to DB.
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java
index af44a8a..61cff09 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java
@@ -153,7 +153,7 @@ public class ContainerAttribute<T> {
    * @return true or false
    */
   public boolean hasContainerID(T key, int id) {
-    return hasContainerID(key, ContainerID.valueof(id));
+    return hasContainerID(key, ContainerID.valueOf(id));
   }
 
   /**
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
index d71049b..4d143e0 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
@@ -18,32 +18,30 @@
 
 package org.apache.hadoop.hdds.scm.container.states;
 
+import java.util.Set;
+import java.util.Collections;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.TreeSet;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.ConcurrentHashMap;
+
 import com.google.common.base.Preconditions;
 
 import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerReplicaNotFoundException;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.Set;
-import java.util.Collections;
-import java.util.Map;
-import java.util.NavigableSet;
-import java.util.TreeSet;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.concurrent.ConcurrentHashMap;
 
 import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
-    .CONTAINER_EXISTS;
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
     .FAILED_TO_CHANGE_CONTAINER_STATE;
 
 /**
@@ -76,6 +74,8 @@ import static 
org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
  * select a container that belongs to user1, with Ratis replication which can
  * make 3 copies of data. The fact that we will look for open containers by
  * default and if we cannot find them we will add new containers.
+ *
+ * All the calls are idempotent.
  */
 public class ContainerStateMap {
   private static final Logger LOG =
@@ -95,6 +95,7 @@ public class ContainerStateMap {
   // Container State Map lock should be held before calling into
   // Update ContainerAttributes. The consistency of ContainerAttributes is
   // protected by this lock.
+  // Can we remove this lock?
   private final ReadWriteLock lock;
 
   /**
@@ -120,56 +121,57 @@ public class ContainerStateMap {
   public void addContainer(final ContainerInfo info)
       throws SCMException {
     Preconditions.checkNotNull(info, "Container Info cannot be null");
-    Preconditions.checkArgument(info.getReplicationFactor().getNumber() > 0,
-        "ExpectedReplicaCount should be greater than 0");
-
     lock.writeLock().lock();
     try {
       final ContainerID id = info.containerID();
-      if (containerMap.putIfAbsent(id, info) != null) {
-        LOG.debug("Duplicate container ID detected. {}", id);
-        throw new
-            SCMException("Duplicate container ID detected.",
-            CONTAINER_EXISTS);
+      if (!contains(id)) {
+        containerMap.put(id, info);
+        lifeCycleStateMap.insert(info.getState(), id);
+        ownerMap.insert(info.getOwner(), id);
+        factorMap.insert(info.getReplicationFactor(), id);
+        typeMap.insert(info.getReplicationType(), id);
+        replicaMap.put(id, ConcurrentHashMap.newKeySet());
+
+        // Flush the cache of this container type, will be added later when
+        // get container queries are executed.
+        flushCache(info);
+        LOG.trace("Container {} added to ContainerStateMap.", id);
       }
-
-      lifeCycleStateMap.insert(info.getState(), id);
-      ownerMap.insert(info.getOwner(), id);
-      factorMap.insert(info.getReplicationFactor(), id);
-      typeMap.insert(info.getReplicationType(), id);
-      replicaMap.put(id, ConcurrentHashMap.newKeySet());
-
-      // Flush the cache of this container type, will be added later when
-      // get container queries are executed.
-      flushCache(info);
-      LOG.trace("Created container with {} successfully.", id);
     } finally {
       lock.writeLock().unlock();
     }
   }
 
+  public boolean contains(final ContainerID id) {
+    lock.readLock().lock();
+    try {
+      return containerMap.containsKey(id);
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
   /**
    * Removes a Container Entry from ContainerStateMap.
    *
-   * @param containerID - ContainerID
-   * @throws SCMException - throws if create failed.
+   * @param id - ContainerID
    */
-  public void removeContainer(final ContainerID containerID)
-      throws ContainerNotFoundException {
-    Preconditions.checkNotNull(containerID, "ContainerID cannot be null");
+  public void removeContainer(final ContainerID id) {
+    Preconditions.checkNotNull(id, "ContainerID cannot be null");
     lock.writeLock().lock();
     try {
-      checkIfContainerExist(containerID);
-      // Should we revert back to the original state if any of the below
-      // remove operation fails?
-      final ContainerInfo info = containerMap.remove(containerID);
-      lifeCycleStateMap.remove(info.getState(), containerID);
-      ownerMap.remove(info.getOwner(), containerID);
-      factorMap.remove(info.getReplicationFactor(), containerID);
-      typeMap.remove(info.getReplicationType(), containerID);
-      // Flush the cache of this container type.
-      flushCache(info);
-      LOG.trace("Removed container with {} successfully.", containerID);
+      if (contains(id)) {
+        // Should we revert back to the original state if any of the below
+        // remove operation fails?
+        final ContainerInfo info = containerMap.remove(id);
+        lifeCycleStateMap.remove(info.getState(), id);
+        ownerMap.remove(info.getOwner(), id);
+        factorMap.remove(info.getReplicationFactor(), id);
+        typeMap.remove(info.getReplicationType(), id);
+        // Flush the cache of this container type.
+        flushCache(info);
+        LOG.trace("Container {} removed from ContainerStateMap.", id);
+      }
     } finally {
       lock.writeLock().unlock();
     }
@@ -179,13 +181,11 @@ public class ContainerStateMap {
    * Returns the latest state of Container from SCM's Container State Map.
    *
    * @param containerID - ContainerID
-   * @return container info, if found.
+   * @return container info, if found else null.
    */
-  public ContainerInfo getContainerInfo(final ContainerID containerID)
-      throws ContainerNotFoundException {
+  public ContainerInfo getContainerInfo(final ContainerID containerID) {
     lock.readLock().lock();
     try {
-      checkIfContainerExist(containerID);
       return containerMap.get(containerID);
     } finally {
       lock.readLock().unlock();
@@ -194,19 +194,18 @@ public class ContainerStateMap {
 
   /**
    * Returns the latest list of DataNodes where replica for given containerId
-   * exist. Throws an SCMException if no entry is found for given containerId.
+   * exist.
    *
    * @param containerID
    * @return Set<DatanodeDetails>
    */
   public Set<ContainerReplica> getContainerReplicas(
-      final ContainerID containerID) throws ContainerNotFoundException {
+      final ContainerID containerID) {
     Preconditions.checkNotNull(containerID);
     lock.readLock().lock();
     try {
-      checkIfContainerExist(containerID);
-      return Collections
-          .unmodifiableSet(replicaMap.get(containerID));
+      final Set<ContainerReplica> replicas = replicaMap.get(containerID);
+      return replicas == null ? null : Collections.unmodifiableSet(replicas);
     } finally {
       lock.readLock().unlock();
     }
@@ -221,14 +220,15 @@ public class ContainerStateMap {
    * @param replica
    */
   public void updateContainerReplica(final ContainerID containerID,
-      final ContainerReplica replica) throws ContainerNotFoundException {
+      final ContainerReplica replica) {
     Preconditions.checkNotNull(containerID);
     lock.writeLock().lock();
     try {
-      checkIfContainerExist(containerID);
-      Set<ContainerReplica> replicas = replicaMap.get(containerID);
-      replicas.remove(replica);
-      replicas.add(replica);
+      if (contains(containerID)) {
+        final Set<ContainerReplica> replicas = replicaMap.get(containerID);
+        replicas.remove(replica);
+        replicas.add(replica);
+      }
     } finally {
       lock.writeLock().unlock();
     }
@@ -242,18 +242,13 @@ public class ContainerStateMap {
    * @return True of dataNode is removed successfully else false.
    */
   public void removeContainerReplica(final ContainerID containerID,
-      final ContainerReplica replica)
-      throws ContainerNotFoundException, ContainerReplicaNotFoundException {
+      final ContainerReplica replica) {
     Preconditions.checkNotNull(containerID);
     Preconditions.checkNotNull(replica);
-
     lock.writeLock().lock();
     try {
-      checkIfContainerExist(containerID);
-      if(!replicaMap.get(containerID).remove(replica)) {
-        throw new ContainerReplicaNotFoundException(
-            "Container #"
-                + containerID.getId() + ", replica: " + replica);
+      if (contains(containerID)) {
+        replicaMap.get(containerID).remove(replica);
       }
     } finally {
       lock.writeLock().unlock();
@@ -264,15 +259,16 @@ public class ContainerStateMap {
    * Just update the container State.
    * @param info ContainerInfo.
    */
-  public void updateContainerInfo(final ContainerInfo info)
-      throws ContainerNotFoundException {
+  public void updateContainerInfo(final ContainerInfo info) {
+    Preconditions.checkNotNull(info);
+    final ContainerID id = info.containerID();
     lock.writeLock().lock();
     try {
-      Preconditions.checkNotNull(info);
-      checkIfContainerExist(info.containerID());
-      final ContainerInfo currentInfo = containerMap.get(info.containerID());
-      flushCache(info, currentInfo);
-      containerMap.put(info.containerID(), info);
+      if (contains(id)) {
+        final ContainerInfo currentInfo = containerMap.get(id);
+        flushCache(info, currentInfo);
+        containerMap.put(id, info);
+      }
     } finally {
       lock.writeLock().unlock();
     }
@@ -287,12 +283,16 @@ public class ContainerStateMap {
    * @throws SCMException - in case of failure.
    */
   public void updateState(ContainerID containerID, LifeCycleState currentState,
-      LifeCycleState newState) throws SCMException, ContainerNotFoundException 
{
+      LifeCycleState newState) throws SCMException {
     Preconditions.checkNotNull(currentState);
     Preconditions.checkNotNull(newState);
     lock.writeLock().lock();
     try {
-      checkIfContainerExist(containerID);
+      if (!contains(containerID)) {
+        return;
+      }
+
+      // TODO: Simplify this logic.
       final ContainerInfo currentInfo = containerMap.get(containerID);
       try {
         currentInfo.setState(newState);
@@ -340,7 +340,12 @@ public class ContainerStateMap {
   }
 
   public Set<ContainerID> getAllContainerIDs() {
-    return Collections.unmodifiableSet(containerMap.keySet());
+    lock.readLock().lock();
+    try {
+      return Collections.unmodifiableSet(containerMap.keySet());
+    } finally {
+      lock.readLock().unlock();
+    }
   }
 
   /**
@@ -535,13 +540,4 @@ public class ContainerStateMap {
     }
   }
 
-  // TODO: Move container not found exception to upper layer.
-  private void checkIfContainerExist(ContainerID containerID)
-      throws ContainerNotFoundException {
-    if (!containerMap.containsKey(containerID)) {
-      throw new ContainerNotFoundException("Container with id #" +
-          containerID.getId() + " not found.");
-    }
-  }
-
 }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/ContainerIDCodec.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/ContainerIDCodec.java
index 87c9e91..cb02e31 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/ContainerIDCodec.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/ContainerIDCodec.java
@@ -38,11 +38,11 @@ public class ContainerIDCodec implements Codec<ContainerID> 
{
 
   @Override
   public ContainerID fromPersistedFormat(byte[] rawData) throws IOException {
-    return new ContainerID(longCodec.fromPersistedFormat(rawData));
+    return ContainerID.valueOf(longCodec.fromPersistedFormat(rawData));
   }
 
   @Override
   public ContainerID copyObject(ContainerID object) {
-    return new ContainerID(object.getId());
+    return ContainerID.valueOf(object.getId());
   }
 }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index ede679d..594527a 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -203,7 +203,7 @@ public class SCMClientProtocolServer implements
     getScm().checkAdminAccess(remoteUser);
     try {
       return scm.getContainerManager()
-          .getContainer(ContainerID.valueof(containerID));
+          .getContainer(ContainerID.valueOf(containerID));
     } catch (IOException ex) {
       auditSuccess = false;
       AUDIT.logReadFailure(
@@ -222,7 +222,7 @@ public class SCMClientProtocolServer implements
 
   private ContainerWithPipeline getContainerWithPipelineCommon(
       long containerID) throws IOException {
-    final ContainerID cid = ContainerID.valueof(containerID);
+    final ContainerID cid = ContainerID.valueOf(containerID);
     final ContainerInfo container = scm.getContainerManager()
         .getContainer(cid);
 
@@ -268,13 +268,13 @@ public class SCMClientProtocolServer implements
       AUDIT.logReadSuccess(buildAuditMessageForSuccess(
           SCMAction.GET_CONTAINER_WITH_PIPELINE,
           Collections.singletonMap("containerID",
-          ContainerID.valueof(containerID).toString())));
+          ContainerID.valueOf(containerID).toString())));
       return cp;
     } catch (IOException ex) {
       AUDIT.logReadFailure(buildAuditMessageForFailure(
           SCMAction.GET_CONTAINER_WITH_PIPELINE,
           Collections.singletonMap("containerID",
-              ContainerID.valueof(containerID).toString()), ex));
+              ContainerID.valueOf(containerID).toString()), ex));
       throw ex;
     }
   }
@@ -291,13 +291,13 @@ public class SCMClientProtocolServer implements
       try {
         ContainerWithPipeline cp = getContainerWithPipelineCommon(containerID);
         cpList.add(cp);
-        strContainerIDs.append(ContainerID.valueof(containerID).toString());
+        strContainerIDs.append(ContainerID.valueOf(containerID).toString());
         strContainerIDs.append(",");
       } catch (IOException ex) {
         AUDIT.logReadFailure(buildAuditMessageForFailure(
             SCMAction.GET_CONTAINER_WITH_PIPELINE_BATCH,
             Collections.singletonMap("containerID",
-                ContainerID.valueof(containerID).toString()), ex));
+                ContainerID.valueOf(containerID).toString()), ex));
         throw ex;
       }
     }
@@ -337,7 +337,7 @@ public class SCMClientProtocolServer implements
       // "null" is assigned, so that its handled in the
       // scm.getContainerManager().listContainer method
       final ContainerID containerId = startContainerID != 0 ? ContainerID
-          .valueof(startContainerID) : null;
+          .valueOf(startContainerID) : null;
       return scm.getContainerManager().
           listContainer(containerId, count);
     } catch (Exception ex) {
@@ -364,7 +364,7 @@ public class SCMClientProtocolServer implements
     try {
       getScm().checkAdminAccess(remoteUser);
       scm.getContainerManager().deleteContainer(
-          ContainerID.valueof(containerID));
+          ContainerID.valueOf(containerID));
     } catch (Exception ex) {
       auditSuccess = false;
       AUDIT.logWriteFailure(
@@ -407,7 +407,7 @@ public class SCMClientProtocolServer implements
     auditMap.put("remoteUser", remoteUser);
     try {
       scm.checkAdminAccess(remoteUser);
-      final ContainerID cid = ContainerID.valueof(containerID);
+      final ContainerID cid = ContainerID.valueOf(containerID);
       final HddsProtos.LifeCycleState state = scm.getContainerManager()
           .getContainer(cid).getState();
       if (!state.equals(HddsProtos.LifeCycleState.OPEN)) {
@@ -415,7 +415,7 @@ public class SCMClientProtocolServer implements
             ResultCodes.UNEXPECTED_CONTAINER_STATE);
       }
       scm.getEventQueue().fireEvent(SCMEvents.CLOSE_CONTAINER,
-          ContainerID.valueof(containerID));
+          ContainerID.valueOf(containerID));
       AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
           SCMAction.CLOSE_CONTAINER, auditMap));
     } catch (Exception ex) {
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index b17729b..4513857 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -774,7 +774,7 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
   @VisibleForTesting
   public ContainerInfo getContainerInfo(long containerID) throws
       IOException {
-    return containerManager.getContainer(ContainerID.valueof(containerID));
+    return containerManager.getContainer(ContainerID.valueOf(containerID));
   }
 
   /**
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
index d4e2553..96147c5 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
@@ -410,13 +410,14 @@ public class TestDeletedBlockLog {
             .build();
 
     ContainerInfo.Builder builder = new ContainerInfo.Builder();
-    builder.setPipelineID(pipeline.getId())
+    builder.setContainerID(containerID)
+        .setPipelineID(pipeline.getId())
         .setReplicationType(pipeline.getType())
         .setReplicationFactor(pipeline.getFactor());
 
     ContainerInfo containerInfo = builder.build();
     Mockito.doReturn(containerInfo).when(containerManager)
-        .getContainer(ContainerID.valueof(containerID));
+        .getContainer(ContainerID.valueOf(containerID));
 
     final Set<ContainerReplica> replicaSet = dns.stream()
         .map(datanodeDetails -> ContainerReplica.newBuilder()
@@ -426,7 +427,7 @@ public class TestDeletedBlockLog {
             .build())
         .collect(Collectors.toSet());
     when(containerManager.getContainerReplicas(
-        ContainerID.valueof(containerID)))
+        ContainerID.valueOf(containerID)))
         .thenReturn(replicaSet);
   }
 }
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
index daa9726..fbe4d42 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
@@ -126,7 +126,7 @@ public class TestCloseContainerEventHandler {
     GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
         .captureLogs(CloseContainerEventHandler.LOG);
     eventQueue.fireEvent(CLOSE_CONTAINER,
-        new ContainerID(Math.abs(RandomUtils.nextInt())));
+        ContainerID.valueOf(Math.abs(RandomUtils.nextInt())));
     eventQueue.processAll(1000);
     Assert.assertTrue(logCapturer.getOutput()
         .contains("Close container Event triggered for container"));
@@ -138,7 +138,7 @@ public class TestCloseContainerEventHandler {
     GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
         .captureLogs(CloseContainerEventHandler.LOG);
     eventQueue.fireEvent(CLOSE_CONTAINER,
-        new ContainerID(id));
+        ContainerID.valueOf(id));
     eventQueue.processAll(1000);
     Assert.assertTrue(logCapturer.getOutput()
         .contains("Failed to close the container"));
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
index 3434825..09b51f0 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
@@ -61,7 +61,7 @@ public class TestContainerActionsHandler {
     queue.fireEvent(SCMEvents.CONTAINER_ACTIONS, containerActions);
     queue.processAll(1000L);
     verify(closeContainerEventHandler, times(1))
-        .onMessage(ContainerID.valueof(1L), queue);
+        .onMessage(ContainerID.valueOf(1L), queue);
 
   }
 
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
index 022d392..6492e0a 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
@@ -79,13 +79,37 @@ public class TestContainerManagerImpl {
 
   @Test
   public void testAllocateContainer() throws Exception {
-    Assert.assertTrue(containerManager.getContainerIDs().isEmpty());
+    Assert.assertTrue(
+        containerManager.listContainers(null, Integer.MAX_VALUE).isEmpty());
     final ContainerInfo container = containerManager.allocateContainer(
         HddsProtos.ReplicationType.RATIS,
         HddsProtos.ReplicationFactor.THREE, "admin");
-    Assert.assertEquals(1, containerManager.getContainerIDs().size());
+    Assert.assertEquals(1,
+        containerManager.listContainers(null, Integer.MAX_VALUE).size());
     Assert.assertNotNull(containerManager.getContainer(
         container.containerID()));
   }
 
-}
\ No newline at end of file
+  @Test
+  public void testUpdateContainerState() throws Exception {
+    final ContainerInfo container = containerManager.allocateContainer(
+        HddsProtos.ReplicationType.RATIS,
+        HddsProtos.ReplicationFactor.THREE, "admin");
+    final ContainerID cid = container.containerID();
+    Assert.assertEquals(HddsProtos.LifeCycleState.OPEN,
+        containerManager.getContainer(cid).getState());
+    containerManager.updateContainerState(cid,
+        HddsProtos.LifeCycleEvent.FINALIZE);
+    Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING,
+        containerManager.getContainer(cid).getState());
+    containerManager.updateContainerState(cid,
+        HddsProtos.LifeCycleEvent.QUASI_CLOSE);
+    Assert.assertEquals(HddsProtos.LifeCycleState.QUASI_CLOSED,
+        containerManager.getContainer(cid).getState());
+    containerManager.updateContainerState(cid,
+        HddsProtos.LifeCycleEvent.FORCE_CLOSE);
+    Assert.assertEquals(HddsProtos.LifeCycleState.CLOSED,
+        containerManager.getContainer(cid).getState());
+  }
+
+}
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
index 2565076..a45d637 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
@@ -287,7 +287,7 @@ public class TestSCMContainerManager {
   @Test
   public void testgetNoneExistentContainer() {
     try {
-      containerManager.getContainer(ContainerID.valueof(
+      containerManager.getContainer(ContainerID.valueOf(
           random.nextInt() & Integer.MAX_VALUE));
       Assert.fail();
     } catch (ContainerNotFoundException ex) {
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java
index 63cc9bf..b7b8988 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java
@@ -39,7 +39,7 @@ public class TestContainerAttribute {
   @Test
   public void testInsert() throws SCMException {
     ContainerAttribute<Integer> containerAttribute = new 
ContainerAttribute<>();
-    ContainerID id = new ContainerID(42);
+    ContainerID id = ContainerID.valueOf(42);
     containerAttribute.insert(1, id);
     Assert.assertEquals(1,
         containerAttribute.getCollection(1).size());
@@ -47,7 +47,7 @@ public class TestContainerAttribute {
 
     // Insert again and verify that it overwrites an existing value.
     ContainerID newId =
-        new ContainerID(42);
+        ContainerID.valueOf(42);
     containerAttribute.insert(1, newId);
     Assert.assertEquals(1,
         containerAttribute.getCollection(1).size());
@@ -59,7 +59,7 @@ public class TestContainerAttribute {
     ContainerAttribute<Integer> containerAttribute = new 
ContainerAttribute<>();
 
     for (int x = 1; x < 42; x++) {
-      containerAttribute.insert(1, new ContainerID(x));
+      containerAttribute.insert(1, ContainerID.valueOf(x));
     }
     Assert.assertTrue(containerAttribute.hasKey(1));
     for (int x = 1; x < 42; x++) {
@@ -67,7 +67,7 @@ public class TestContainerAttribute {
     }
 
     Assert.assertFalse(containerAttribute.hasContainerID(1,
-        new ContainerID(42)));
+        ContainerID.valueOf(42)));
   }
 
   @Test
@@ -76,7 +76,7 @@ public class TestContainerAttribute {
     ContainerAttribute<String> containerAttribute = new ContainerAttribute<>();
     for (String k : keyslist) {
       for (int x = 1; x < 101; x++) {
-        containerAttribute.insert(k, new ContainerID(x));
+        containerAttribute.insert(k, ContainerID.valueOf(x));
       }
     }
     for (String k : keyslist) {
@@ -96,16 +96,16 @@ public class TestContainerAttribute {
 
     for (String k : keyslist) {
       for (int x = 1; x < 101; x++) {
-        containerAttribute.insert(k, new ContainerID(x));
+        containerAttribute.insert(k, ContainerID.valueOf(x));
       }
     }
     for (int x = 1; x < 101; x += 2) {
-      containerAttribute.remove("Key1", new ContainerID(x));
+      containerAttribute.remove("Key1", ContainerID.valueOf(x));
     }
 
     for (int x = 1; x < 101; x += 2) {
       Assert.assertFalse(containerAttribute.hasContainerID("Key1",
-          new ContainerID(x)));
+          ContainerID.valueOf(x)));
     }
 
     Assert.assertEquals(100,
@@ -125,7 +125,7 @@ public class TestContainerAttribute {
     String key3 = "Key3";
 
     ContainerAttribute<String> containerAttribute = new ContainerAttribute<>();
-    ContainerID id = new ContainerID(42);
+    ContainerID id = ContainerID.valueOf(42);
 
     containerAttribute.insert(key1, id);
     Assert.assertTrue(containerAttribute.hasContainerID(key1, id));
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
index 50b962d..3d77e9d 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
@@ -193,19 +193,19 @@ public class TestDeadNodeHandler {
     deadNodeHandler.onMessage(datanode1, publisher);
 
     Set<ContainerReplica> container1Replicas = containerManager
-        .getContainerReplicas(new ContainerID(container1.getContainerID()));
+        
.getContainerReplicas(ContainerID.valueOf(container1.getContainerID()));
     Assert.assertEquals(1, container1Replicas.size());
     Assert.assertEquals(datanode2,
         container1Replicas.iterator().next().getDatanodeDetails());
 
     Set<ContainerReplica> container2Replicas = containerManager
-        .getContainerReplicas(new ContainerID(container2.getContainerID()));
+        
.getContainerReplicas(ContainerID.valueOf(container2.getContainerID()));
     Assert.assertEquals(1, container2Replicas.size());
     Assert.assertEquals(datanode2,
         container2Replicas.iterator().next().getDatanodeDetails());
 
     Set<ContainerReplica> container3Replicas = containerManager
-            .getContainerReplicas(new 
ContainerID(container3.getContainerID()));
+            .getContainerReplicas(container3.containerID());
     Assert.assertEquals(1, container3Replicas.size());
     Assert.assertEquals(datanode3,
         container3Replicas.iterator().next().getDatanodeDetails());
@@ -216,7 +216,7 @@ public class TestDeadNodeHandler {
       throws ContainerNotFoundException {
     for (DatanodeDetails datanode : datanodes) {
       contManager.updateContainerReplica(
-          new ContainerID(container.getContainerID()),
+          ContainerID.valueOf(container.getContainerID()),
           ContainerReplica.newBuilder()
               .setContainerState(ContainerReplicaProto.State.OPEN)
               .setContainerID(container.containerID())
@@ -236,7 +236,7 @@ public class TestDeadNodeHandler {
     nodeManager
         .setContainers(datanode,
             Arrays.stream(containers)
-                .map(container -> new ContainerID(container.getContainerID()))
+                .map(ContainerInfo::containerID)
                 .collect(Collectors.toSet()));
   }
 
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
index 77ed907..bc1b3dd 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
@@ -52,7 +52,7 @@ public class TestNode2ContainerMap {
       TreeSet<ContainerID> currentSet = new TreeSet<>();
       for (int cnIndex = 1; cnIndex <= CONTAINER_COUNT; cnIndex++) {
         long currentCnIndex = (long) (dnIndex * CONTAINER_COUNT) + cnIndex;
-        currentSet.add(new ContainerID(currentCnIndex));
+        currentSet.add(ContainerID.valueOf(currentCnIndex));
       }
       testData.put(UUID.randomUUID(), currentSet);
     }
@@ -206,7 +206,7 @@ public class TestNode2ContainerMap {
     TreeSet<ContainerID> addedContainers = new TreeSet<>();
     for (int x = 1; x <= newCount; x++) {
       long cTemp = last.getId() + x;
-      addedContainers.add(new ContainerID(cTemp));
+      addedContainers.add(ContainerID.valueOf(cTemp));
     }
 
     // This set is the super set of existing containers and new containers.
@@ -250,7 +250,7 @@ public class TestNode2ContainerMap {
     for (int x = 0; x < removeCount; x++) {
       int startBase = (int) first.getId();
       long cTemp = r.nextInt(values.size());
-      removedContainers.add(new ContainerID(cTemp + startBase));
+      removedContainers.add(ContainerID.valueOf(cTemp + startBase));
     }
 
     // This set is a new set with some containers removed.
@@ -282,7 +282,7 @@ public class TestNode2ContainerMap {
     Set<ContainerID> insertedSet = new TreeSet<>();
     // Insert nodes from 1..30
     for (int x = 1; x <= 30; x++) {
-      insertedSet.add(new ContainerID(x));
+      insertedSet.add(ContainerID.valueOf(x));
     }
 
 
@@ -296,7 +296,7 @@ public class TestNode2ContainerMap {
     for (int x = 0; x < removeCount; x++) {
       int startBase = (int) first.getId();
       long cTemp = r.nextInt(values.size());
-      removedContainers.add(new ContainerID(cTemp + startBase));
+      removedContainers.add(ContainerID.valueOf(cTemp + startBase));
     }
 
     Set<ContainerID> newSet = new TreeSet<>(values);
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
index a8f03bb..642378f 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
@@ -162,7 +162,7 @@ public class TestPipelineManagerImpl {
     PipelineID pipelineID = pipeline.getId();
 
     pipelineManager.openPipeline(pipelineID);
-    pipelineManager.addContainerToPipeline(pipelineID, ContainerID.valueof(1));
+    pipelineManager.addContainerToPipeline(pipelineID, ContainerID.valueOf(1));
     Assert.assertTrue(pipelineManager
         .getPipelines(HddsProtos.ReplicationType.RATIS,
             HddsProtos.ReplicationFactor.THREE,
@@ -262,7 +262,7 @@ public class TestPipelineManagerImpl {
     // Open the pipeline
     pipelineManager.openPipeline(pipeline.getId());
     pipelineManager
-        .addContainerToPipeline(pipeline.getId(), ContainerID.valueof(1));
+        .addContainerToPipeline(pipeline.getId(), ContainerID.valueOf(1));
     Assert.assertTrue(pipelineManager
         .getPipelines(HddsProtos.ReplicationType.RATIS,
             HddsProtos.ReplicationFactor.THREE,
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java
index 8252e2c..43d5398 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java
@@ -290,14 +290,14 @@ public class TestPipelineStateManager {
     stateManager.addPipeline(pipeline);
     pipeline = stateManager.getPipeline(pipeline.getId());
     stateManager.addContainerToPipeline(pipeline.getId(),
-        ContainerID.valueof(++containerID));
+        ContainerID.valueOf(++containerID));
 
     // move pipeline to open state
     stateManager.openPipeline(pipeline.getId());
     stateManager.addContainerToPipeline(pipeline.getId(),
-        ContainerID.valueof(++containerID));
+        ContainerID.valueOf(++containerID));
     stateManager.addContainerToPipeline(pipeline.getId(),
-        ContainerID.valueof(++containerID));
+        ContainerID.valueOf(++containerID));
 
     //verify the number of containers returned
     Set<ContainerID> containerIDs =
@@ -307,7 +307,7 @@ public class TestPipelineStateManager {
     removePipeline(pipeline);
     try {
       stateManager.addContainerToPipeline(pipeline.getId(),
-          ContainerID.valueof(++containerID));
+          ContainerID.valueOf(++containerID));
       Assert.fail("Container should not have been added");
     } catch (IOException e) {
       // Can not add a container to removed pipeline
@@ -322,7 +322,7 @@ public class TestPipelineStateManager {
     // close the pipeline
     stateManager.openPipeline(pipeline.getId());
     stateManager
-        .addContainerToPipeline(pipeline.getId(), ContainerID.valueof(1));
+        .addContainerToPipeline(pipeline.getId(), ContainerID.valueOf(1));
 
     try {
       stateManager.removePipeline(pipeline.getId());
@@ -347,26 +347,26 @@ public class TestPipelineStateManager {
     stateManager.openPipeline(pipeline.getId());
 
     stateManager.addContainerToPipeline(pipeline.getId(),
-        ContainerID.valueof(containerID));
+        ContainerID.valueOf(containerID));
     Assert.assertEquals(1, 
stateManager.getContainers(pipeline.getId()).size());
     stateManager.removeContainerFromPipeline(pipeline.getId(),
-        ContainerID.valueof(containerID));
+        ContainerID.valueOf(containerID));
     Assert.assertEquals(0, 
stateManager.getContainers(pipeline.getId()).size());
 
     // add two containers in the pipeline
     stateManager.addContainerToPipeline(pipeline.getId(),
-        ContainerID.valueof(++containerID));
+        ContainerID.valueOf(++containerID));
     stateManager.addContainerToPipeline(pipeline.getId(),
-        ContainerID.valueof(++containerID));
+        ContainerID.valueOf(++containerID));
     Assert.assertEquals(2, 
stateManager.getContainers(pipeline.getId()).size());
 
     // move pipeline to closing state
     stateManager.finalizePipeline(pipeline.getId());
 
     stateManager.removeContainerFromPipeline(pipeline.getId(),
-        ContainerID.valueof(containerID));
+        ContainerID.valueOf(containerID));
     stateManager.removeContainerFromPipeline(pipeline.getId(),
-        ContainerID.valueof(--containerID));
+        ContainerID.valueOf(--containerID));
     Assert.assertEquals(0, 
stateManager.getContainers(pipeline.getId()).size());
 
     // clean up
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
index 7f53736..9cc9b3e 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
@@ -183,7 +183,7 @@ public class TestSCMPipelineManager {
             HddsProtos.ReplicationFactor.THREE);
     pipelineManager.openPipeline(pipeline.getId());
     pipelineManager
-        .addContainerToPipeline(pipeline.getId(), ContainerID.valueof(1));
+        .addContainerToPipeline(pipeline.getId(), ContainerID.valueOf(1));
     pipelineManager.closePipeline(pipeline, false);
     pipelineManager.close();
 
@@ -428,7 +428,7 @@ public class TestSCMPipelineManager {
     final PipelineID pid = pipeline.getId();
 
     pipelineManager.openPipeline(pid);
-    pipelineManager.addContainerToPipeline(pid, ContainerID.valueof(1));
+    pipelineManager.addContainerToPipeline(pid, ContainerID.valueOf(1));
 
     Assert.assertTrue(pipelineManager
         .getPipelines(HddsProtos.ReplicationType.RATIS,
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
index 03cdb72..adffbd8 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
@@ -324,7 +324,7 @@ public class KeyOutputStream extends OutputStream {
     // if the container needs to be excluded , add the container to the
     // exclusion list , otherwise add the pipeline to the exclusion list
     if (containerExclusionException) {
-      excludeList.addConatinerId(ContainerID.valueof(containerId));
+      excludeList.addConatinerId(ContainerID.valueOf(containerId));
     } else {
       excludeList.addPipeline(pipelineId);
     }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
index 3842818..70f4152 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
@@ -252,7 +252,7 @@ public class TestContainerStateManagerIntegration {
     ContainerInfo info = containerManager
         .getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE,
             container1.getPipeline(),
-            new HashSet<>(Collections.singletonList(new ContainerID(1))));
+            new HashSet<>(Collections.singletonList(ContainerID.valueOf(1))));
     Assert.assertNotEquals(container1.getContainerInfo().getContainerID(),
         info.getContainerID());
   }
@@ -277,8 +277,8 @@ public class TestContainerStateManagerIntegration {
     ContainerInfo info = containerManager
         .getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE,
             container1.getPipeline(),
-            new HashSet<>(Arrays.asList(new ContainerID(1), new
-                ContainerID(2), new ContainerID(3))));
+            new HashSet<>(Arrays.asList(ContainerID.valueOf(1),
+                ContainerID.valueOf(2), ContainerID.valueOf(3))));
     Assert.assertEquals(info.getContainerID(), 4);
   }
 
@@ -418,7 +418,7 @@ public class TestContainerStateManagerIntegration {
         .setUuid(UUID.randomUUID()).build();
 
     // Test 1: no replica's exist
-    ContainerID containerID = ContainerID.valueof(RandomUtils.nextLong());
+    ContainerID containerID = ContainerID.valueOf(RandomUtils.nextLong());
     Set<ContainerReplica> replicaSet;
     try {
       containerStateManager.getContainerReplicas(containerID);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
index cbe84b6..cc6824e 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
@@ -113,7 +113,7 @@ public class TestSCMContainerManagerMetrics {
         "NumSuccessfulDeleteContainers", metrics);
 
     containerManager.deleteContainer(
-        new ContainerID(containerInfo.getContainerID()));
+        ContainerID.valueOf(containerInfo.getContainerID()));
 
     metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
     Assert.assertEquals(getLongCounter("NumSuccessfulDeleteContainers",
@@ -123,7 +123,7 @@ public class TestSCMContainerManagerMetrics {
     try {
       // Give random container to delete.
       containerManager.deleteContainer(
-          new ContainerID(RandomUtils.nextLong(10000, 20000)));
+          ContainerID.valueOf(RandomUtils.nextLong(10000, 20000)));
       fail("testContainerOpsMetrics failed");
     } catch (IOException ex) {
       // Here it should fail, so it should have the old metric value.
@@ -135,7 +135,7 @@ public class TestSCMContainerManagerMetrics {
     }
 
     containerManager.listContainer(
-        new ContainerID(containerInfo.getContainerID()), 1);
+        ContainerID.valueOf(containerInfo.getContainerID()), 1);
     metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
     Assert.assertEquals(getLongCounter("NumListContainerOps",
         metrics), 1);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
index dd543ed..69615e8 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
@@ -58,21 +58,21 @@ public final class OzoneTestUtils {
       StorageContainerManager scm) throws Exception {
     performOperationOnKeyContainers((blockID) -> {
       if (scm.getContainerManager()
-          .getContainer(ContainerID.valueof(blockID.getContainerID()))
+          .getContainer(ContainerID.valueOf(blockID.getContainerID()))
           .getState() == HddsProtos.LifeCycleState.OPEN) {
         scm.getContainerManager()
-            
.updateContainerState(ContainerID.valueof(blockID.getContainerID()),
+            
.updateContainerState(ContainerID.valueOf(blockID.getContainerID()),
                 HddsProtos.LifeCycleEvent.FINALIZE);
       }
       if (scm.getContainerManager()
-          .getContainer(ContainerID.valueof(blockID.getContainerID()))
+          .getContainer(ContainerID.valueOf(blockID.getContainerID()))
           .getState() == HddsProtos.LifeCycleState.CLOSING) {
         scm.getContainerManager()
-            
.updateContainerState(ContainerID.valueof(blockID.getContainerID()),
+            
.updateContainerState(ContainerID.valueOf(blockID.getContainerID()),
                 HddsProtos.LifeCycleEvent.CLOSE);
       }
       Assert.assertFalse(scm.getContainerManager()
-          .getContainer(ContainerID.valueof(blockID.getContainerID()))
+          .getContainer(ContainerID.valueOf(blockID.getContainerID()))
           .isOpen());
     }, omKeyLocationInfoGroups);
   }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
index d9f7578..9fc8927 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
@@ -160,7 +160,7 @@ public class TestContainerReplicationEndToEnd {
     long containerID = omKeyLocationInfo.getContainerID();
     PipelineID pipelineID =
         cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(new ContainerID(containerID)).getPipelineID();
+            .getContainer(ContainerID.valueOf(containerID)).getPipelineID();
     Pipeline pipeline =
         cluster.getStorageContainerManager().getPipelineManager()
             .getPipeline(pipelineID);
@@ -168,13 +168,13 @@ public class TestContainerReplicationEndToEnd {
 
     HddsProtos.LifeCycleState containerState =
         cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(new ContainerID(containerID)).getState();
+            .getContainer(ContainerID.valueOf(containerID)).getState();
     LoggerFactory.getLogger(TestContainerReplicationEndToEnd.class).info(
         "Current Container State is {}",  containerState);
     if ((containerState != HddsProtos.LifeCycleState.CLOSING) &&
         (containerState != HddsProtos.LifeCycleState.CLOSED)) {
       cluster.getStorageContainerManager().getContainerManager()
-          .updateContainerState(new ContainerID(containerID),
+          .updateContainerState(ContainerID.valueOf(containerID),
               HddsProtos.LifeCycleEvent.FINALIZE);
     }
     // wait for container to move to OPEN state in SCM
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
index a9c0706..2de63d5 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
@@ -177,7 +177,7 @@ public class TestFailureHandlingByClient {
     long containerId = locationInfoList.get(0).getContainerID();
     ContainerInfo container = cluster.getStorageContainerManager()
         .getContainerManager()
-        .getContainer(ContainerID.valueof(containerId));
+        .getContainer(ContainerID.valueOf(containerId));
     Pipeline pipeline =
         cluster.getStorageContainerManager().getPipelineManager()
             .getPipeline(container.getPipelineID());
@@ -217,7 +217,7 @@ public class TestFailureHandlingByClient {
     BlockID blockId = locationInfoList.get(0).getBlockID();
     ContainerInfo container =
         cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(ContainerID.valueof(containerId));
+            .getContainer(ContainerID.valueOf(containerId));
     Pipeline pipeline =
         cluster.getStorageContainerManager().getPipelineManager()
             .getPipeline(container.getPipelineID());
@@ -280,7 +280,7 @@ public class TestFailureHandlingByClient {
     key.flush();
 
     Assert.assertTrue(keyOutputStream.getExcludeList().getContainerIds()
-        .contains(ContainerID.valueof(containerId)));
+        .contains(ContainerID.valueOf(containerId)));
     Assert.assertTrue(
         keyOutputStream.getExcludeList().getDatanodes().isEmpty());
     Assert.assertTrue(
@@ -328,7 +328,7 @@ public class TestFailureHandlingByClient {
     BlockID blockId = streamEntryList.get(0).getBlockID();
     ContainerInfo container =
         cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(ContainerID.valueof(containerId));
+            .getContainer(ContainerID.valueOf(containerId));
     Pipeline pipeline =
         cluster.getStorageContainerManager().getPipelineManager()
             .getPipeline(container.getPipelineID());
@@ -391,7 +391,7 @@ public class TestFailureHandlingByClient {
     BlockID blockId = streamEntryList.get(0).getBlockID();
     ContainerInfo container =
         cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(ContainerID.valueof(containerId));
+            .getContainer(ContainerID.valueOf(containerId));
     Pipeline pipeline =
         cluster.getStorageContainerManager().getPipelineManager()
             .getPipeline(container.getPipelineID());
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
index 76027f7..57158bb 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
@@ -183,7 +183,7 @@ public class TestFailureHandlingByClientFlushDelay {
     BlockID blockId = streamEntryList.get(0).getBlockID();
     ContainerInfo container =
         cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(ContainerID.valueof(containerId));
+            .getContainer(ContainerID.valueOf(containerId));
     Pipeline pipeline =
         cluster.getStorageContainerManager().getPipelineManager()
             .getPipeline(container.getPipelineID());
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
index b435ce9..2a97dab 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
@@ -159,7 +159,7 @@ public class TestMultiBlockWritesWithDnFailures {
     long containerId = locationInfoList.get(1).getContainerID();
     ContainerInfo container = cluster.getStorageContainerManager()
         .getContainerManager()
-        .getContainer(ContainerID.valueof(containerId));
+        .getContainer(ContainerID.valueOf(containerId));
     Pipeline pipeline =
         cluster.getStorageContainerManager().getPipelineManager()
             .getPipeline(container.getPipelineID());
@@ -207,7 +207,7 @@ public class TestMultiBlockWritesWithDnFailures {
     BlockID blockId = streamEntryList.get(0).getBlockID();
     ContainerInfo container =
         cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(ContainerID.valueof(containerId));
+            .getContainer(ContainerID.valueOf(containerId));
     Pipeline pipeline =
         cluster.getStorageContainerManager().getPipelineManager()
             .getPipeline(container.getPipelineID());
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java
index dd871f3..76861d4 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java
@@ -155,7 +155,7 @@ public class TestOzoneClientRetriesOnException {
     Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
     ContainerInfo container =
             cluster.getStorageContainerManager().getContainerManager()
-                    .getContainer(ContainerID.valueof(containerID));
+                    .getContainer(ContainerID.valueOf(containerID));
     Pipeline pipeline =
             cluster.getStorageContainerManager().getPipelineManager()
                     .getPipeline(container.getPipelineID());
@@ -201,7 +201,7 @@ public class TestOzoneClientRetriesOnException {
       containerID = entry.getBlockID().getContainerID();
       ContainerInfo container =
           cluster.getStorageContainerManager().getContainerManager()
-              .getContainer(ContainerID.valueof(containerID));
+              .getContainer(ContainerID.valueOf(containerID));
       Pipeline pipeline =
           cluster.getStorageContainerManager().getPipelineManager()
               .getPipeline(container.getPipelineID());
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
index e202ca1..a96cbe6 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
@@ -147,7 +147,7 @@ public class TestOzoneClientRetriesOnExceptionFlushDelay {
     Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
     ContainerInfo container =
         cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(ContainerID.valueof(containerID));
+            .getContainer(ContainerID.valueOf(containerID));
     Pipeline pipeline =
         cluster.getStorageContainerManager().getPipelineManager()
             .getPipeline(container.getPipelineID());
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index b7b75a4..24b8620 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -1542,7 +1542,7 @@ public abstract class TestOzoneRpcClientAbstract {
     // Second, sum the data size from chunks in Container via containerID
     // and localID, make sure the size equals to the size from keyDetails.
     ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager().getContainer(ContainerID.valueof(containerID));
+        .getContainerManager().getContainer(ContainerID.valueOf(containerID));
     Pipeline pipeline = cluster.getStorageContainerManager()
         .getPipelineManager().getPipeline(container.getPipelineID());
     List<DatanodeDetails> datanodes = pipeline.getNodes();
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
index 9148459..5e8e5cc 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
@@ -169,7 +169,7 @@ public class TestReadRetries {
         .assertEquals(value.getBytes().length, 
keyLocations.get(0).getLength());
 
     ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager().getContainer(ContainerID.valueof(containerID));
+        .getContainerManager().getContainer(ContainerID.valueOf(containerID));
     Pipeline pipeline = cluster.getStorageContainerManager()
         .getPipelineManager().getPipeline(container.getPipelineID());
     List<DatanodeDetails> datanodes = pipeline.getNodes();
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
index fab2ea3..21bbc04 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
@@ -174,7 +174,7 @@ public final class TestHelper {
     for (long containerID : containerIdList) {
       ContainerInfo container =
           cluster.getStorageContainerManager().getContainerManager()
-              .getContainer(ContainerID.valueof(containerID));
+              .getContainer(ContainerID.valueOf(containerID));
       Pipeline pipeline =
           cluster.getStorageContainerManager().getPipelineManager()
               .getPipeline(container.getPipelineID());
@@ -250,7 +250,7 @@ public final class TestHelper {
     for (long containerID : containerIdList) {
       ContainerInfo container =
           cluster.getStorageContainerManager().getContainerManager()
-              .getContainer(ContainerID.valueof(containerID));
+              .getContainer(ContainerID.valueOf(containerID));
       Pipeline pipeline =
           cluster.getStorageContainerManager().getPipelineManager()
               .getPipeline(container.getPipelineID());
@@ -271,7 +271,7 @@ public final class TestHelper {
         // send the order to close the container
         cluster.getStorageContainerManager().getEventQueue()
             .fireEvent(SCMEvents.CLOSE_CONTAINER,
-                ContainerID.valueof(containerID));
+                ContainerID.valueOf(containerID));
       }
     }
     int index = 0;
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
index 6b40179..853f2cd 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
@@ -122,7 +122,7 @@ public class TestCloseContainerByPipeline {
 
     long containerID = omKeyLocationInfo.getContainerID();
     ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager().getContainer(ContainerID.valueof(containerID));
+        .getContainerManager().getContainer(ContainerID.valueOf(containerID));
     Pipeline pipeline = cluster.getStorageContainerManager()
         .getPipelineManager().getPipeline(container.getPipelineID());
     List<DatanodeDetails> datanodes = pipeline.getNodes();
@@ -179,7 +179,7 @@ public class TestCloseContainerByPipeline {
 
     long containerID = omKeyLocationInfo.getContainerID();
     ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager().getContainer(ContainerID.valueof(containerID));
+        .getContainerManager().getContainer(ContainerID.valueOf(containerID));
     Pipeline pipeline = cluster.getStorageContainerManager()
         .getPipelineManager().getPipeline(container.getPipelineID());
     List<DatanodeDetails> datanodes = pipeline.getNodes();
@@ -232,7 +232,7 @@ public class TestCloseContainerByPipeline {
 
     long containerID = omKeyLocationInfo.getContainerID();
     ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager().getContainer(ContainerID.valueof(containerID));
+        .getContainerManager().getContainer(ContainerID.valueOf(containerID));
     Pipeline pipeline = cluster.getStorageContainerManager()
         .getPipelineManager().getPipeline(container.getPipelineID());
     List<DatanodeDetails> datanodes = pipeline.getNodes();
@@ -295,7 +295,7 @@ public class TestCloseContainerByPipeline {
 
     long containerID = omKeyLocationInfo.getContainerID();
     ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager().getContainer(ContainerID.valueof(containerID));
+        .getContainerManager().getContainer(ContainerID.valueOf(containerID));
     Pipeline pipeline = cluster.getStorageContainerManager()
         .getPipelineManager().getPipeline(container.getPipelineID());
     List<DatanodeDetails> datanodes = pipeline.getNodes();
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
index 831c729..8bd054b 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
@@ -107,7 +107,7 @@ public class TestCloseContainerHandler {
         cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
             .get(0).getBlocksLatestVersionOnly().get(0);
 
-    ContainerID containerId = ContainerID.valueof(
+    ContainerID containerId = ContainerID.valueOf(
         omKeyLocationInfo.getContainerID());
     ContainerInfo container = cluster.getStorageContainerManager()
         .getContainerManager().getContainer(containerId);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
index 28b58d9..61c3369 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
@@ -252,7 +252,7 @@ public class TestDeleteContainerHandler {
         cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
             .get(0).getBlocksLatestVersionOnly().get(0);
 
-    return ContainerID.valueof(
+    return ContainerID.valueOf(
         omKeyLocationInfo.getContainerID());
   }
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java
index 631d944..fbdee7e 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java
@@ -182,7 +182,7 @@ public class TestDataScrubber {
     ContainerManager cm = cluster.getStorageContainerManager()
         .getContainerManager();
     Set<ContainerReplica> replicas = cm.getContainerReplicas(
-        ContainerID.valueof(c.getContainerData().getContainerID()));
+        ContainerID.valueOf(c.getContainerData().getContainerID()));
     Assert.assertEquals(1, replicas.size());
     ContainerReplica r = replicas.iterator().next();
     Assert.assertEquals(StorageContainerDatanodeProtocolProtos.
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
index 7f049a3..1a4dddc 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
@@ -128,7 +128,7 @@ public class TestContainerReportWithKeys {
     ContainerInfo cinfo = scm.getContainerInfo(keyInfo.getContainerID());
     Set<ContainerReplica> replicas =
         scm.getContainerManager().getContainerReplicas(
-            new ContainerID(keyInfo.getContainerID()));
+            ContainerID.valueOf(keyInfo.getContainerID()));
     Assert.assertTrue(replicas.size() == 1);
     replicas.stream().forEach(rp ->
         Assert.assertTrue(rp.getDatanodeDetails().getParent() != null));
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java
index 9092cc5..ecb2a46 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java
@@ -202,6 +202,6 @@ public class TestReconAsPassiveScm {
 
     LambdaTestUtils.await(90000, 5000,
         () -> (newReconScm.getContainerManager()
-            .exists(ContainerID.valueof(containerID))));
+            .exists(ContainerID.valueOf(containerID))));
   }
 }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
index 394c102..3afe483 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
@@ -159,16 +159,18 @@ public class TestSCMMXBean {
       if (i % 2 == 0) {
         containerID = containerInfoList.get(i).getContainerID();
         scmContainerManager.updateContainerState(
-            new ContainerID(containerID), HddsProtos.LifeCycleEvent.FINALIZE);
-        assertEquals(scmContainerManager.getContainer(new ContainerID(
+            ContainerID.valueOf(containerID),
+            HddsProtos.LifeCycleEvent.FINALIZE);
+        assertEquals(scmContainerManager.getContainer(ContainerID.valueOf(
             containerID)).getState(), HddsProtos.LifeCycleState.CLOSING);
       } else {
         containerID = containerInfoList.get(i).getContainerID();
         scmContainerManager.updateContainerState(
-            new ContainerID(containerID), HddsProtos.LifeCycleEvent.FINALIZE);
+            ContainerID.valueOf(containerID),
+            HddsProtos.LifeCycleEvent.FINALIZE);
         scmContainerManager.updateContainerState(
-            new ContainerID(containerID), HddsProtos.LifeCycleEvent.CLOSE);
-        assertEquals(scmContainerManager.getContainer(new ContainerID(
+            ContainerID.valueOf(containerID), HddsProtos.LifeCycleEvent.CLOSE);
+        assertEquals(scmContainerManager.getContainer(ContainerID.valueOf(
             containerID)).getState(), HddsProtos.LifeCycleState.CLOSED);
       }
 
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
index 1778b84..10522cb 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
@@ -246,7 +246,7 @@ public class ContainerEndpoint {
           long containerID = container.getContainerId();
           try {
             ContainerInfo containerInfo =
-                containerManager.getContainer(new ContainerID(containerID));
+                
containerManager.getContainer(ContainerID.valueOf(containerID));
             long keyCount = containerInfo.getNumberOfKeys();
             UUID pipelineID = containerInfo.getPipelineID().getId();
 
@@ -307,7 +307,7 @@ public class ContainerEndpoint {
       for (UnhealthyContainers c : containers) {
         long containerID = c.getContainerId();
         ContainerInfo containerInfo =
-            containerManager.getContainer(new ContainerID(containerID));
+            containerManager.getContainer(ContainerID.valueOf(containerID));
         long keyCount = containerInfo.getNumberOfKeys();
         UUID pipelineID = containerInfo.getPipelineID().getId();
         List<ContainerHistory> datanodes =
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
index 315dd5c..f005509 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
@@ -97,7 +97,7 @@ public class ContainerHealthTask extends ReconScmTask {
   private ContainerHealthStatus setCurrentContainer(long recordId)
       throws ContainerNotFoundException {
     ContainerInfo container =
-        containerManager.getContainer(new ContainerID(recordId));
+        containerManager.getContainer(ContainerID.valueOf(recordId));
     Set<ContainerReplica> replicas =
         containerManager.getContainerReplicas(container.containerID());
     return new ContainerHealthStatus(container, replicas, placementPolicy);
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
index dff4709..c32ce05 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
@@ -162,7 +162,7 @@ public class ReconContainerManager extends 
SCMContainerManager {
           containerInfo.containerID(), ex);
       getPipelineManager().removeContainerFromPipeline(
           containerInfo.getPipelineID(),
-          new ContainerID(containerInfo.getContainerID()));
+          ContainerID.valueOf(containerInfo.getContainerID()));
       throw ex;
     } finally {
       getLock().unlock();
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java
index 228a657..391d2c5 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java
@@ -56,7 +56,7 @@ public class ReconContainerReportHandler extends 
ContainerReportHandler {
 
     List<ContainerReplicaProto> reportsList = containerReport.getReportsList();
     for (ContainerReplicaProto containerReplicaProto : reportsList) {
-      final ContainerID id = ContainerID.valueof(
+      final ContainerID id = ContainerID.valueOf(
           containerReplicaProto.getContainerID());
       try {
         containerManager.checkAndAddNewContainer(id,
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
index 0262c8b..863ef46 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
@@ -70,7 +70,7 @@ public class ReconIncrementalContainerReportHandler
     for (ContainerReplicaProto replicaProto :
         report.getReport().getReportList()) {
       try {
-        final ContainerID id = ContainerID.valueof(
+        final ContainerID id = ContainerID.valueOf(
             replicaProto.getContainerID());
         try {
           containerManager.checkAndAddNewContainer(id, replicaProto.getState(),
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
index 6ba6f56..514f919 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
@@ -98,7 +98,7 @@ public class TestContainerEndpoint {
   private boolean isSetupDone = false;
   private ContainerSchemaManager containerSchemaManager;
   private ReconOMMetadataManager reconOMMetadataManager;
-  private ContainerID containerID = new ContainerID(1L);
+  private ContainerID containerID = ContainerID.valueOf(1L);
   private PipelineID pipelineID;
   private long keyCount = 5L;
 
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
index 0a3546a..0bfa179 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
@@ -52,7 +52,7 @@ public class TestContainerHealthStatus {
     container = mock(ContainerInfo.class);
     when(container.getReplicationFactor())
         .thenReturn(HddsProtos.ReplicationFactor.THREE);
-    when(container.containerID()).thenReturn(new ContainerID(123456));
+    when(container.containerID()).thenReturn(ContainerID.valueOf(123456));
     when(container.getContainerID()).thenReturn((long)123456);
     when(placementPolicy.validateContainerPlacement(
         Mockito.anyList(), Mockito.anyInt()))
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
index d97b143..890c242 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
@@ -89,19 +89,19 @@ public class TestContainerHealthTask extends 
AbstractReconSqlDBTest {
       when(containerManagerMock.getContainer(c.containerID())).thenReturn(c);
     }
     // Under replicated
-    when(containerManagerMock.getContainerReplicas(new ContainerID(1L)))
+    when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(1L)))
         .thenReturn(getMockReplicas(1L, State.CLOSED, State.UNHEALTHY));
 
     // return one UNHEALTHY replica for container ID 2 -> Missing
-    when(containerManagerMock.getContainerReplicas(new ContainerID(2L)))
+    when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(2L)))
         .thenReturn(getMockReplicas(2L, State.UNHEALTHY));
 
     // return 0 replicas for container ID 3 -> Missing
-    when(containerManagerMock.getContainerReplicas(new ContainerID(3L)))
+    when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(3L)))
         .thenReturn(Collections.emptySet());
 
     // Return 5 Healthy -> Over replicated
-    when(containerManagerMock.getContainerReplicas(new ContainerID(4L)))
+    when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(4L)))
         .thenReturn(getMockReplicas(4L, State.CLOSED, State.CLOSED,
         State.CLOSED, State.CLOSED, State.CLOSED));
 
@@ -110,11 +110,11 @@ public class TestContainerHealthTask extends 
AbstractReconSqlDBTest {
         State.CLOSED, State.CLOSED, State.CLOSED);
     placementMock.setMisRepWhenDnPresent(
         misReplicas.iterator().next().getDatanodeDetails().getUuid());
-    when(containerManagerMock.getContainerReplicas(new ContainerID(5L)))
+    when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(5L)))
         .thenReturn(misReplicas);
 
     // Return 3 Healthy -> Healthy container
-    when(containerManagerMock.getContainerReplicas(new ContainerID(6L)))
+    when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(6L)))
         .thenReturn(getMockReplicas(6L,
             State.CLOSED, State.CLOSED, State.CLOSED));
 
@@ -164,20 +164,20 @@ public class TestContainerHealthTask extends 
AbstractReconSqlDBTest {
     // Now run the job again, to check that relevant records are updated or
     // removed as appropriate. Need to adjust the return value for all the 
mocks
     // Under replicated -> Delta goes from 2 to 1
-    when(containerManagerMock.getContainerReplicas(new ContainerID(1L)))
+    when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(1L)))
         .thenReturn(getMockReplicas(1L, State.CLOSED, State.CLOSED));
 
     // ID 2 was missing - make it healthy now
-    when(containerManagerMock.getContainerReplicas(new ContainerID(2L)))
+    when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(2L)))
         .thenReturn(getMockReplicas(2L,
             State.CLOSED, State.CLOSED, State.CLOSED));
 
     // return 0 replicas for container ID 3 -> Still Missing
-    when(containerManagerMock.getContainerReplicas(new ContainerID(3L)))
+    when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(3L)))
         .thenReturn(Collections.emptySet());
 
     // Return 4 Healthy -> Delta changes from -2 to -1
-    when(containerManagerMock.getContainerReplicas(new ContainerID(4L)))
+    when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(4L)))
         .thenReturn(getMockReplicas(4L, State.CLOSED, State.CLOSED,
             State.CLOSED, State.CLOSED));
 
@@ -215,7 +215,7 @@ public class TestContainerHealthTask extends 
AbstractReconSqlDBTest {
       replicas.add(ContainerReplica.newBuilder()
           .setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails())
           .setContainerState(s)
-          .setContainerID(new ContainerID(containerId))
+          .setContainerID(ContainerID.valueOf(containerId))
           .setSequenceId(1)
           .build());
     }
@@ -229,7 +229,7 @@ public class TestContainerHealthTask extends 
AbstractReconSqlDBTest {
       when(c.getContainerID()).thenReturn((long)i);
       when(c.getReplicationFactor())
           .thenReturn(HddsProtos.ReplicationFactor.THREE);
-      when(c.containerID()).thenReturn(new ContainerID(i));
+      when(c.containerID()).thenReturn(ContainerID.valueOf(i));
       containers.add(c);
     }
     return containers;
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java
index 62baf12..ccc9de3 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java
@@ -59,7 +59,7 @@ public class TestContainerHealthTaskRecordGenerator {
     container = mock(ContainerInfo.class);
     when(container.getReplicationFactor())
         .thenReturn(HddsProtos.ReplicationFactor.THREE);
-    when(container.containerID()).thenReturn(new ContainerID(123456));
+    when(container.containerID()).thenReturn(ContainerID.valueOf(123456));
     when(container.getContainerID()).thenReturn((long)123456);
     when(placementPolicy.validateContainerPlacement(
         Mockito.anyList(), Mockito.anyInt()))
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
index 783f42c..a5ee0a2 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
@@ -113,7 +113,7 @@ public class AbstractReconContainerManagerTest {
     Pipeline pipeline = getRandomPipeline();
     getPipelineManager().addPipeline(pipeline);
 
-    ContainerID containerID = new ContainerID(100L);
+    ContainerID containerID = ContainerID.valueOf(100L);
     ContainerInfo containerInfo =
         new ContainerInfo.Builder()
             .setContainerID(containerID.getId())
@@ -140,7 +140,7 @@ public class AbstractReconContainerManagerTest {
 
   protected ContainerWithPipeline getTestContainer(LifeCycleState state)
       throws IOException {
-    ContainerID containerID = new ContainerID(100L);
+    ContainerID containerID = ContainerID.valueOf(100L);
     Pipeline pipeline = getRandomPipeline();
     pipelineManager.addPipeline(pipeline);
     ContainerInfo containerInfo =
@@ -159,7 +159,7 @@ public class AbstractReconContainerManagerTest {
   protected ContainerWithPipeline getTestContainer(long id,
                                                    LifeCycleState state)
       throws IOException {
-    ContainerID containerID = new ContainerID(id);
+    ContainerID containerID = ContainerID.valueOf(id);
     Pipeline pipeline = getRandomPipeline();
     pipelineManager.addPipeline(pipeline);
     ContainerInfo containerInfo =
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
index 9f47779..49a5f39 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
@@ -100,7 +100,7 @@ public class TestReconContainerManager
 
   @Test
   public void testCheckAndAddNewContainer() throws IOException {
-    ContainerID containerID = new ContainerID(100L);
+    ContainerID containerID = ContainerID.valueOf(100L);
     ReconContainerManager containerManager = getContainerManager();
     assertFalse(containerManager.exists(containerID));
     DatanodeDetails datanodeDetails = randomDatanodeDetails();
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
index 1b42f21..97eaf96 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
@@ -62,7 +62,7 @@ public class TestReconIncrementalContainerReportHandler
   @Test
   public void testProcessICR() throws IOException, NodeNotFoundException {
 
-    ContainerID containerID = new ContainerID(100L);
+    ContainerID containerID = ContainerID.valueOf(100L);
     DatanodeDetails datanodeDetails = randomDatanodeDetails();
     IncrementalContainerReportFromDatanode reportMock =
         mock(IncrementalContainerReportFromDatanode.class);


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: ozone-commits-h...@hadoop.apache.org

Reply via email to