http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
new file mode 100644
index 0000000..0801c25
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.container.placement;
+
+import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.scm.container.MockNodeManager;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms
+    .SCMContainerPlacementCapacity;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms
+    .SCMContainerPlacementRandom;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Random;
+
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
+    .HEALTHY;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Asserts that allocation strategy works as expected.
+ */
+public class TestContainerPlacement {
+
+  private DescriptiveStatistics computeStatistics(NodeManager nodeManager) {
+    DescriptiveStatistics descriptiveStatistics = new DescriptiveStatistics();
+    for (DatanodeDetails dd : nodeManager.getNodes(HEALTHY)) {
+      float weightedValue =
+          nodeManager.getNodeStat(dd).get().getScmUsed().get() / (float)
+              nodeManager.getNodeStat(dd).get().getCapacity().get();
+      descriptiveStatistics.addValue(weightedValue);
+    }
+    return descriptiveStatistics;
+  }
+
+  /**
+   * This test simulates lots of Cluster I/O and updates the metadata in SCM.
+   * We simulate adding and removing containers from the cluster. It asserts
+   * that our placement algorithm has taken the capacity of nodes into
+   * consideration by asserting that standard deviation of used space on these
+   * has improved.
+   */
+  @Test
+  public void testCapacityPlacementYieldsBetterDataDistribution() throws
+      SCMException {
+    final int opsCount = 200 * 1000;
+    final int nodesRequired = 3;
+    Random random = new Random();
+
+    // The nature of init code in MockNodeManager yields similar clusters.
+    MockNodeManager nodeManagerCapacity = new MockNodeManager(true, 100);
+    MockNodeManager nodeManagerRandom = new MockNodeManager(true, 100);
+    DescriptiveStatistics beforeCapacity =
+        computeStatistics(nodeManagerCapacity);
+    DescriptiveStatistics beforeRandom = computeStatistics(nodeManagerRandom);
+
+    //Assert that our initial layout of clusters are similar.
+    assertEquals(beforeCapacity.getStandardDeviation(), beforeRandom
+        .getStandardDeviation(), 0.001);
+
+    SCMContainerPlacementCapacity capacityPlacer = new
+        SCMContainerPlacementCapacity(nodeManagerCapacity, new 
Configuration());
+    SCMContainerPlacementRandom randomPlacer = new
+        SCMContainerPlacementRandom(nodeManagerRandom, new Configuration());
+
+    for (int x = 0; x < opsCount; x++) {
+      long containerSize = random.nextInt(100) * OzoneConsts.GB;
+      List<DatanodeDetails> nodesCapacity =
+          capacityPlacer.chooseDatanodes(nodesRequired, containerSize);
+      assertEquals(nodesRequired, nodesCapacity.size());
+
+      List<DatanodeDetails> nodesRandom = 
randomPlacer.chooseDatanodes(nodesRequired,
+          containerSize);
+
+      // One fifth of all calls are delete
+      if (x % 5 == 0) {
+        deleteContainer(nodeManagerCapacity, nodesCapacity, containerSize);
+        deleteContainer(nodeManagerRandom, nodesRandom, containerSize);
+      } else {
+        createContainer(nodeManagerCapacity, nodesCapacity, containerSize);
+        createContainer(nodeManagerRandom, nodesRandom, containerSize);
+      }
+    }
+    DescriptiveStatistics postCapacity = 
computeStatistics(nodeManagerCapacity);
+    DescriptiveStatistics postRandom = computeStatistics(nodeManagerRandom);
+
+    // This is a very bold claim, and needs large number of I/O operations.
+    // The claim in this assertion is that we improved the data distribution
+    // of this cluster in relation to the start state of the cluster.
+    Assert.assertTrue(beforeCapacity.getStandardDeviation() >
+        postCapacity.getStandardDeviation());
+
+    // This asserts that Capacity placement yields a better placement
+    // algorithm than random placement, since both cluster started at an
+    // identical state.
+
+    Assert.assertTrue(postRandom.getStandardDeviation() >
+        postCapacity.getStandardDeviation());
+  }
+
+  private void deleteContainer(MockNodeManager nodeManager,
+      List<DatanodeDetails> nodes, long containerSize) {
+    for (DatanodeDetails dd : nodes) {
+      nodeManager.delContainer(dd, containerSize);
+    }
+  }
+
+  private void createContainer(MockNodeManager nodeManager,
+      List<DatanodeDetails> nodes, long containerSize) {
+    for (DatanodeDetails dd : nodes) {
+      nodeManager.addContainer(dd, containerSize);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
new file mode 100644
index 0000000..7150d1b
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.container.placement;
+
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Tests that test Metrics that support placement.
+ */
+public class TestDatanodeMetrics {
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
+  @Test
+  public void testSCMNodeMetric() {
+    SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L);
+    assertEquals((long) stat.getCapacity().get(), 100L);
+    assertEquals((long) stat.getScmUsed().get(), 10L);
+    assertEquals((long) stat.getRemaining().get(), 90L);
+    SCMNodeMetric metric = new SCMNodeMetric(stat);
+
+    SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L);
+    assertEquals((long) stat.getCapacity().get(), 100L);
+    assertEquals((long) stat.getScmUsed().get(), 10L);
+    assertEquals((long) stat.getRemaining().get(), 90L);
+
+    SCMNodeMetric newMetric = new SCMNodeMetric(newStat);
+    assertTrue(metric.isEqual(newMetric.get()));
+
+    newMetric.add(stat);
+    assertTrue(newMetric.isGreater(metric.get()));
+
+    SCMNodeMetric zeroMetric = new SCMNodeMetric(new SCMNodeStat());
+    // Assert we can handle zero capacity.
+    assertTrue(metric.isGreater(zeroMetric.get()));
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java
new file mode 100644
index 0000000..8eb07e6
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java
@@ -0,0 +1,272 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.container.replication;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.container.replication.ContainerSupervisor;
+import org.apache.hadoop.hdds.scm.container.replication.InProgressPool;
+import org.apache.hadoop.hdds.scm.node.CommandQueue;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.NodePoolManager;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+import org.apache.hadoop.ozone.container.common.SCMTestUtils;
+import org.apache.hadoop.ozone.container.testutils
+    .ReplicationDatanodeStateManager;
+import org.apache.hadoop.ozone.container.testutils.ReplicationNodeManagerMock;
+import org.apache.hadoop.ozone.container.testutils
+    .ReplicationNodePoolManagerMock;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.event.Level;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
+    .HEALTHY;
+import static org.apache.ratis.shaded.com.google.common.util.concurrent
+    .Uninterruptibles.sleepUninterruptibly;
+
+/**
+ * Tests for the container manager.
+ */
+public class TestContainerSupervisor {
+  final static String POOL_NAME_TEMPLATE = "Pool%d";
+  static final int MAX_DATANODES = 72;
+  static final int POOL_SIZE = 24;
+  static final int POOL_COUNT = 3;
+  private LogCapturer logCapturer = LogCapturer.captureLogs(
+      LogFactory.getLog(ContainerSupervisor.class));
+  private List<DatanodeDetails> datanodes = new LinkedList<>();
+  private NodeManager nodeManager;
+  private NodePoolManager poolManager;
+  private CommandQueue commandQueue;
+  private ContainerSupervisor containerSupervisor;
+  private ReplicationDatanodeStateManager datanodeStateManager;
+
+  @After
+  public void tearDown() throws Exception {
+    logCapturer.stopCapturing();
+    GenericTestUtils.setLogLevel(ContainerSupervisor.LOG, Level.INFO);
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    GenericTestUtils.setLogLevel(ContainerSupervisor.LOG, Level.DEBUG);
+    Map<DatanodeDetails, NodeState> nodeStateMap = new HashMap<>();
+    // We are setting up 3 pools with 24 nodes each in this cluster.
+    // First we create 72 Datanodes.
+    for (int x = 0; x < MAX_DATANODES; x++) {
+      DatanodeDetails datanode = TestUtils.getDatanodeDetails();
+      datanodes.add(datanode);
+      nodeStateMap.put(datanode, HEALTHY);
+    }
+
+    commandQueue = new CommandQueue();
+
+    // All nodes in this cluster are healthy for time being.
+    nodeManager = new ReplicationNodeManagerMock(nodeStateMap, commandQueue);
+    poolManager = new ReplicationNodePoolManagerMock();
+
+
+    Assert.assertEquals("Max datanodes should be equal to POOL_SIZE * " +
+        "POOL_COUNT", POOL_COUNT * POOL_SIZE, MAX_DATANODES);
+
+    // Start from 1 instead of zero so we can multiply and get the node index.
+    for (int y = 1; y <= POOL_COUNT; y++) {
+      String poolName = String.format(POOL_NAME_TEMPLATE, y);
+      for (int z = 0; z < POOL_SIZE; z++) {
+        DatanodeDetails id = datanodes.get(y * z);
+        poolManager.addNode(poolName, id);
+      }
+    }
+    OzoneConfiguration config = SCMTestUtils.getOzoneConf();
+    config.setTimeDuration(OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT, 2,
+        TimeUnit.SECONDS);
+    config.setTimeDuration(OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL, 1,
+        TimeUnit.SECONDS);
+    containerSupervisor = new ContainerSupervisor(config,
+        nodeManager, poolManager);
+    datanodeStateManager = new ReplicationDatanodeStateManager(nodeManager,
+        poolManager);
+    // Sleep for one second to make sure all threads get time to run.
+    sleepUninterruptibly(1, TimeUnit.SECONDS);
+  }
+
+  @Test
+  /**
+   * Asserts that at least one pool is picked up for processing.
+   */
+  public void testAssertPoolsAreProcessed() {
+    // This asserts that replication manager has started processing at least
+    // one pool.
+    Assert.assertTrue(containerSupervisor.getInProgressPoolCount() > 0);
+
+    // Since all datanodes are flagged as healthy in this test, for each
+    // datanode we must have queued a command.
+    Assert.assertEquals("Commands are in queue :",
+        POOL_SIZE * containerSupervisor.getInProgressPoolCount(),
+        commandQueue.getCommandsInQueue());
+  }
+
+  @Test
+  /**
+   * This test sends container reports for 2 containers to a pool in progress.
+   * Asserts that we are able to find a container with single replica and do
+   * not find container with 3 replicas.
+   */
+  public void testDetectSingleContainerReplica() throws TimeoutException,
+      InterruptedException {
+    String singleNodeContainer = "SingleNodeContainer";
+    String threeNodeContainer = "ThreeNodeContainer";
+    InProgressPool ppool = containerSupervisor.getInProcessPoolList().get(0);
+    // Only single datanode reporting that "SingleNodeContainer" exists.
+    List<ContainerReportsRequestProto> clist =
+        datanodeStateManager.getContainerReport(singleNodeContainer,
+            ppool.getPool().getPoolName(), 1);
+    ppool.handleContainerReport(clist.get(0));
+
+    // Three nodes are going to report that ThreeNodeContainer  exists.
+    clist = datanodeStateManager.getContainerReport(threeNodeContainer,
+        ppool.getPool().getPoolName(), 3);
+
+    for (ContainerReportsRequestProto reportsProto : clist) {
+      ppool.handleContainerReport(reportsProto);
+    }
+    GenericTestUtils.waitFor(() -> ppool.getContainerProcessedCount() == 4,
+        200, 1000);
+    ppool.setDoneProcessing();
+
+    List<Map.Entry<String, Integer>> containers = ppool.filterContainer(p -> p
+        .getValue() == 1);
+    Assert.assertEquals(singleNodeContainer, containers.get(0).getKey());
+    int count = containers.get(0).getValue();
+    Assert.assertEquals(1L, count);
+  }
+
+  @Test
+  /**
+   * We create three containers, Normal,OveReplicated and WayOverReplicated
+   * containers. This test asserts that we are able to find the
+   * over replicated containers.
+   */
+  public void testDetectOverReplica() throws TimeoutException,
+      InterruptedException {
+    String normalContainer = "NormalContainer";
+    String overReplicated = "OverReplicatedContainer";
+    String wayOverReplicated = "WayOverReplicated";
+    InProgressPool ppool = containerSupervisor.getInProcessPoolList().get(0);
+
+    List<ContainerReportsRequestProto> clist =
+        datanodeStateManager.getContainerReport(normalContainer,
+            ppool.getPool().getPoolName(), 3);
+    ppool.handleContainerReport(clist.get(0));
+
+    clist = datanodeStateManager.getContainerReport(overReplicated,
+        ppool.getPool().getPoolName(), 4);
+
+    for (ContainerReportsRequestProto reportsProto : clist) {
+      ppool.handleContainerReport(reportsProto);
+    }
+
+    clist = datanodeStateManager.getContainerReport(wayOverReplicated,
+        ppool.getPool().getPoolName(), 7);
+
+    for (ContainerReportsRequestProto reportsProto : clist) {
+      ppool.handleContainerReport(reportsProto);
+    }
+
+    // We ignore container reports from the same datanodes.
+    // it is possible that these each of these containers get placed
+    // on same datanodes, so allowing for 4 duplicates in the set of 14.
+    GenericTestUtils.waitFor(() -> ppool.getContainerProcessedCount() > 10,
+        200, 1000);
+    ppool.setDoneProcessing();
+
+    List<Map.Entry<String, Integer>> containers = ppool.filterContainer(p -> p
+        .getValue() > 3);
+    Assert.assertEquals(2, containers.size());
+  }
+
+  @Test
+  /**
+   * This test verifies that all pools are picked up for replica processing.
+   *
+   */
+  public void testAllPoolsAreProcessed() throws TimeoutException,
+      InterruptedException {
+    // Verify that we saw all three pools being picked up for processing.
+    GenericTestUtils.waitFor(() -> containerSupervisor.getPoolProcessCount()
+        >= 3, 200, 15 * 1000);
+    Assert.assertTrue(logCapturer.getOutput().contains("Pool1") &&
+        logCapturer.getOutput().contains("Pool2") &&
+        logCapturer.getOutput().contains("Pool3"));
+  }
+
+  @Test
+  /**
+   * Adds a new pool and tests that we are able to pick up that new pool for
+   * processing as well as handle container reports for datanodes in that pool.
+   * @throws TimeoutException
+   * @throws InterruptedException
+   */
+  public void testAddingNewPoolWorks()
+      throws TimeoutException, InterruptedException, IOException {
+    LogCapturer inProgressLog = LogCapturer.captureLogs(
+        LogFactory.getLog(InProgressPool.class));
+    GenericTestUtils.setLogLevel(InProgressPool.LOG, Level.DEBUG);
+    try {
+      DatanodeDetails id = TestUtils.getDatanodeDetails();
+      ((ReplicationNodeManagerMock) (nodeManager)).addNode(id, HEALTHY);
+      poolManager.addNode("PoolNew", id);
+      GenericTestUtils.waitFor(() ->
+              logCapturer.getOutput().contains("PoolNew"),
+          200, 15 * 1000);
+
+      // Assert that we are able to send a container report to this new
+      // pool and datanode.
+      List<ContainerReportsRequestProto> clist =
+          datanodeStateManager.getContainerReport("NewContainer1",
+              "PoolNew", 1);
+      containerSupervisor.handleContainerReport(clist.get(0));
+      GenericTestUtils.waitFor(() ->
+          inProgressLog.getOutput().contains("NewContainer1") && inProgressLog
+              .getOutput().contains(id.getUuidString()),
+          200, 10 * 1000);
+    } finally {
+      inProgressLog.stopCapturing();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
new file mode 100644
index 0000000..318c54d
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.container.replication;
+// Test classes for replication.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java
new file mode 100644
index 0000000..26f3514
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.container.testutils;
+
+import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.NodePoolManager;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerInfo;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Random;
+
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
+    .HEALTHY;
+
+/**
+ * This class  manages the state of datanode
+ * in conjunction with the node pool and node managers.
+ */
+public class ReplicationDatanodeStateManager {
+  private final NodeManager nodeManager;
+  private final NodePoolManager poolManager;
+  private final Random r;
+
+  /**
+   * The datanode state Manager.
+   *
+   * @param nodeManager
+   * @param poolManager
+   */
+  public ReplicationDatanodeStateManager(NodeManager nodeManager,
+      NodePoolManager poolManager) {
+    this.nodeManager = nodeManager;
+    this.poolManager = poolManager;
+    r = new Random();
+  }
+
+  /**
+   * Get Container Report as if it is from a datanode in the cluster.
+   * @param containerName - Container Name.
+   * @param poolName - Pool Name.
+   * @param dataNodeCount - Datanode Count.
+   * @return List of Container Reports.
+   */
+  public List<ContainerReportsRequestProto> getContainerReport(
+      String containerName, String poolName, int dataNodeCount) {
+    List<ContainerReportsRequestProto> containerList = new LinkedList<>();
+    List<DatanodeDetails> nodesInPool = poolManager.getNodes(poolName);
+
+    if (nodesInPool == null) {
+      return containerList;
+    }
+
+    if (nodesInPool.size() < dataNodeCount) {
+      throw new IllegalStateException("Not enough datanodes to create " +
+          "required container reports");
+    }
+
+    int containerID = 1;
+    while (containerList.size() < dataNodeCount && nodesInPool.size() > 0) {
+      DatanodeDetails id = nodesInPool.get(r.nextInt(nodesInPool.size()));
+      nodesInPool.remove(id);
+      containerID++;
+      // We return container reports only for nodes that are healthy.
+      if (nodeManager.getNodeState(id) == HEALTHY) {
+        ContainerInfo info = ContainerInfo.newBuilder()
+            .setContainerName(containerName)
+            .setFinalhash(DigestUtils.sha256Hex(containerName))
+            .setContainerID(containerID)
+            .build();
+        ContainerReportsRequestProto containerReport =
+            ContainerReportsRequestProto.newBuilder().addReports(info)
+            .setDatanodeDetails(id.getProtoBufMessage())
+            .setType(ContainerReportsRequestProto.reportType.fullReport)
+            .build();
+        containerList.add(containerReport);
+      }
+    }
+    return containerList;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
new file mode 100644
index 0000000..f2db751
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
@@ -0,0 +1,326 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.container.testutils;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.apache.hadoop.hdds.scm.node.CommandQueue;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.NodePoolManager;
+import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ReportState;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
+import org.apache.hadoop.ozone.protocol.VersionResponse;
+import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
+import org.mockito.Mockito;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+
+/**
+ * A Node Manager to test replication.
+ */
+public class ReplicationNodeManagerMock implements NodeManager {
+  private final Map<DatanodeDetails, NodeState> nodeStateMap;
+  private final CommandQueue commandQueue;
+
+  /**
+   * A list of Datanodes and current states.
+   * @param nodeState A node state map.
+   */
+  public ReplicationNodeManagerMock(Map<DatanodeDetails, NodeState> nodeState,
+                                    CommandQueue commandQueue) {
+    Preconditions.checkNotNull(nodeState);
+    this.nodeStateMap = nodeState;
+    this.commandQueue = commandQueue;
+  }
+
+  /**
+   * Get the minimum number of nodes to get out of chill mode.
+   *
+   * @return int
+   */
+  @Override
+  public int getMinimumChillModeNodes() {
+    return 0;
+  }
+
+  /**
+   * Returns a chill mode status string.
+   *
+   * @return String
+   */
+  @Override
+  public String getChillModeStatus() {
+    return null;
+  }
+
+  /**
+   * Get the number of data nodes that in all states.
+   *
+   * @return A state to number of nodes that in this state mapping
+   */
+  @Override
+  public Map<String, Integer> getNodeCount() {
+    return null;
+  }
+
+  /**
+   * Removes a data node from the management of this Node Manager.
+   *
+   * @param node - DataNode.
+   * @throws UnregisteredNodeException
+   */
+  @Override
+  public void removeNode(DatanodeDetails node)
+      throws UnregisteredNodeException {
+    nodeStateMap.remove(node);
+
+  }
+
+  /**
+   * Gets all Live Datanodes that is currently communicating with SCM.
+   *
+   * @param nodestate - State of the node
+   * @return List of Datanodes that are Heartbeating SCM.
+   */
+  @Override
+  public List<DatanodeDetails> getNodes(NodeState nodestate) {
+    return null;
+  }
+
+  /**
+   * Returns the Number of Datanodes that are communicating with SCM.
+   *
+   * @param nodestate - State of the node
+   * @return int -- count
+   */
+  @Override
+  public int getNodeCount(NodeState nodestate) {
+    return 0;
+  }
+
+  /**
+   * Get all datanodes known to SCM.
+   *
+   * @return List of DatanodeDetails known to SCM.
+   */
+  @Override
+  public List<DatanodeDetails> getAllNodes() {
+    return null;
+  }
+
+  /**
+   * Chill mode is the period when node manager waits for a minimum
+   * configured number of datanodes to report in. This is called chill mode
+   * to indicate the period before node manager gets into action.
+   * <p>
+   * Forcefully exits the chill mode, even if we have not met the minimum
+   * criteria of the nodes reporting in.
+   */
+  @Override
+  public void forceExitChillMode() {
+
+  }
+
+  /**
+   * Puts the node manager into manual chill mode.
+   */
+  @Override
+  public void enterChillMode() {
+
+  }
+
+  /**
+   * Brings node manager out of manual chill mode.
+   */
+  @Override
+  public void exitChillMode() {
+
+  }
+
+  /**
+   * Returns true if node manager is out of chill mode, else false.
+   * @return true if out of chill mode, else false
+   */
+  @Override
+  public boolean isOutOfChillMode() {
+    return !nodeStateMap.isEmpty();
+  }
+
+  /**
+   * Returns the aggregated node stats.
+   *
+   * @return the aggregated node stats.
+   */
+  @Override
+  public SCMNodeStat getStats() {
+    return null;
+  }
+
+  /**
+   * Return a map of node stats.
+   *
+   * @return a map of individual node stats (live/stale but not dead).
+   */
+  @Override
+  public Map<UUID, SCMNodeStat> getNodeStats() {
+    return null;
+  }
+
+  /**
+   * Return the node stat of the specified datanode.
+   *
+   * @param dd - datanode details.
+   * @return node stat if it is live/stale, null if it is dead or does't exist.
+   */
+  @Override
+  public SCMNodeMetric getNodeStat(DatanodeDetails dd) {
+    return null;
+  }
+
+  @Override
+  public NodePoolManager getNodePoolManager() {
+    return Mockito.mock(NodePoolManager.class);
+  }
+
+  /**
+   * Wait for the heartbeat is processed by NodeManager.
+   *
+   * @return true if heartbeat has been processed.
+   */
+  @Override
+  public boolean waitForHeartbeatProcessed() {
+    return false;
+  }
+
+  /**
+   * Returns the node state of a specific node.
+   *
+   * @param dd - DatanodeDetails
+   * @return Healthy/Stale/Dead.
+   */
+  @Override
+  public NodeState getNodeState(DatanodeDetails dd) {
+    return nodeStateMap.get(dd);
+  }
+
+  /**
+   * Closes this stream and releases any system resources associated
+   * with it. If the stream is already closed then invoking this
+   * method has no effect.
+   * <p>
+   * <p> As noted in {@link AutoCloseable#close()}, cases where the
+   * close may fail require careful attention. It is strongly advised
+   * to relinquish the underlying resources and to internally
+   * <em>mark</em> the {@code Closeable} as closed, prior to throwing
+   * the {@code IOException}.
+   *
+   * @throws IOException if an I/O error occurs
+   */
+  @Override
+  public void close() throws IOException {
+
+  }
+
+  /**
+   * When an object implementing interface <code>Runnable</code> is used
+   * to create a thread, starting the thread causes the object's
+   * <code>run</code> method to be called in that separately executing
+   * thread.
+   * <p>
+   * The general contract of the method <code>run</code> is that it may
+   * take any action whatsoever.
+   *
+   * @see Thread#run()
+   */
+  @Override
+  public void run() {
+
+  }
+
+  /**
+   * Gets the version info from SCM.
+   *
+   * @param versionRequest - version Request.
+   * @return - returns SCM version info and other required information needed 
by
+   * datanode.
+   */
+  @Override
+  public VersionResponse getVersion(SCMVersionRequestProto versionRequest) {
+    return null;
+  }
+
+  /**
+   * Register the node if the node finds that it is not registered with any 
SCM.
+   *
+   * @param dd DatanodeDetailsProto
+   *
+   * @return SCMHeartbeatResponseProto
+   */
+  @Override
+  public SCMCommand register(HddsProtos.DatanodeDetailsProto dd) {
+    return null;
+  }
+
+  /**
+   * Send heartbeat to indicate the datanode is alive and doing well.
+   *
+   * @param dd - Datanode Details.
+   * @param nodeReport - node report.
+   * @param containerReportState - container report state.
+   * @return SCMheartbeat response list
+   */
+  @Override
+  public List<SCMCommand> sendHeartbeat(HddsProtos.DatanodeDetailsProto dd,
+      SCMNodeReport nodeReport, ReportState containerReportState) {
+    return null;
+  }
+
+  /**
+   * Clears all nodes from the node Manager.
+   */
+  public void clearMap() {
+    this.nodeStateMap.clear();
+  }
+
+  /**
+   * Adds a node to the existing Node manager. This is used only for test
+   * purposes.
+   * @param id DatanodeDetails
+   * @param state State you want to put that node to.
+   */
+  public void addNode(DatanodeDetails id, NodeState state) {
+    nodeStateMap.put(id, state);
+  }
+
+  @Override
+  public void addDatanodeCommand(UUID dnId, SCMCommand command) {
+    this.commandQueue.addCommand(dnId, command);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java
new file mode 100644
index 0000000..ffcd752
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.container.testutils;
+
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.node.NodePoolManager;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Pool Manager replication mock.
+ */
+public class ReplicationNodePoolManagerMock implements NodePoolManager {
+
+  private final Map<DatanodeDetails, String> nodeMemberShip;
+
+  /**
+   * A node pool manager for testing.
+   */
+  public ReplicationNodePoolManagerMock() {
+    nodeMemberShip = new HashMap<>();
+  }
+
+  /**
+   * Add a node to a node pool.
+   *
+   * @param pool - name of the node pool.
+   * @param node - data node.
+   */
+  @Override
+  public void addNode(String pool, DatanodeDetails node) {
+    nodeMemberShip.put(node, pool);
+  }
+
+  /**
+   * Remove a node from a node pool.
+   *
+   * @param pool - name of the node pool.
+   * @param node - data node.
+   * @throws SCMException
+   */
+  @Override
+  public void removeNode(String pool, DatanodeDetails node)
+      throws SCMException {
+    nodeMemberShip.remove(node);
+
+  }
+
+  /**
+   * Get a list of known node pools.
+   *
+   * @return a list of known node pool names or an empty list if not node pool
+   * is defined.
+   */
+  @Override
+  public List<String> getNodePools() {
+    Set<String> poolSet = new HashSet<>();
+    for (Map.Entry<DatanodeDetails, String> entry : nodeMemberShip.entrySet()) 
{
+      poolSet.add(entry.getValue());
+    }
+    return new ArrayList<>(poolSet);
+
+  }
+
+  /**
+   * Get all nodes of a node pool given the name of the node pool.
+   *
+   * @param pool - name of the node pool.
+   * @return a list of datanode ids or an empty list if the node pool was not
+   * found.
+   */
+  @Override
+  public List<DatanodeDetails> getNodes(String pool) {
+    Set<DatanodeDetails> datanodeSet = new HashSet<>();
+    for (Map.Entry<DatanodeDetails, String> entry : nodeMemberShip.entrySet()) 
{
+      if (entry.getValue().equals(pool)) {
+        datanodeSet.add(entry.getKey());
+      }
+    }
+    return new ArrayList<>(datanodeSet);
+  }
+
+  /**
+   * Get the node pool name if the node has been added to a node pool.
+   *
+   * @param datanodeDetails DatanodeDetails.
+   * @return node pool name if it has been assigned. null if the node has not
+   * been assigned to any node pool yet.
+   */
+  @Override
+  public String getNodePool(DatanodeDetails datanodeDetails) {
+    return nodeMemberShip.get(datanodeDetails);
+  }
+
+  /**
+   * Closes this stream and releases any system resources associated
+   * with it. If the stream is already closed then invoking this
+   * method has no effect.
+   * <p>
+   * <p> As noted in {@link AutoCloseable#close()}, cases where the
+   * close may fail require careful attention. It is strongly advised
+   * to relinquish the underlying resources and to internally
+   * <em>mark</em> the {@code Closeable} as closed, prior to throwing
+   * the {@code IOException}.
+   *
+   * @throws IOException if an I/O error occurs
+   */
+  @Override
+  public void close() throws IOException {
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
new file mode 100644
index 0000000..4e8a90b
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.container.testutils;
+// Helper classes for ozone and container tests.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/tools/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml
new file mode 100644
index 0000000..b2823ef
--- /dev/null
+++ b/hadoop-hdds/tools/pom.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0";
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+http://maven.apache.org/xsd/maven-4.0.0.xsd";>
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-hdds</artifactId>
+    <version>3.2.0-SNAPSHOT</version>
+  </parent>
+
+  <artifactId>hadoop-hdds-tools</artifactId>
+  <version>3.2.0-SNAPSHOT</version>
+  <description>Apache HDDS Tools</description>
+  <name>Apache Hadoop HDDS tools</name>
+  <packaging>jar</packaging>
+
+  <properties>
+        <hadoop.component>hdds</hadoop.component>
+    <is.hadoop.component>true</is.hadoop.component>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdds-common</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdds-client</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-cli</groupId>
+      <artifactId>commons-cli</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.xerial</groupId>
+      <artifactId>sqlite-jdbc</artifactId>
+      <version>3.8.7</version>
+    </dependency>
+
+
+  </dependencies>
+</project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneBaseCLI.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneBaseCLI.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneBaseCLI.java
new file mode 100644
index 0000000..727c81a
--- /dev/null
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneBaseCLI.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.cli;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.util.Tool;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+
+/**
+ * This class is the base CLI for scm, ksm and scmadm.
+ */
+public abstract class OzoneBaseCLI extends Configured implements Tool {
+
+  protected abstract int dispatch(CommandLine cmd, Options opts)
+      throws IOException, URISyntaxException;
+
+  protected abstract CommandLine parseArgs(String[] argv, Options opts)
+      throws ParseException;
+
+  protected abstract Options getOptions();
+
+  protected abstract void displayHelp();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneCommandHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneCommandHandler.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneCommandHandler.java
new file mode 100644
index 0000000..f9b8fcd
--- /dev/null
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneCommandHandler.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.cli;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.hadoop.hdds.scm.client.ScmClient;
+
+import java.io.IOException;
+import java.io.PrintStream;
+
+/**
+ * The abstract class of all SCM CLI commands.
+ */
+public abstract class OzoneCommandHandler {
+
+  private ScmClient scmClient;
+  private PrintStream out = System.out;
+  private PrintStream err = System.err;
+
+  /**
+   * Constructs a handler object.
+   */
+  public OzoneCommandHandler(ScmClient scmClient) {
+    this.scmClient = scmClient;
+  }
+
+  protected ScmClient getScmClient() {
+    return scmClient;
+  }
+
+  /**
+   * Sets customized output stream to redirect the stdout to somewhere else.
+   * @param out
+   */
+  public void setOut(PrintStream out) {
+    this.out = out;
+  }
+
+  /**
+   * Sets customized error stream to redirect the stderr to somewhere else.
+   * @param err
+   */
+  public void setErr(PrintStream err) {
+    this.err = err;
+  }
+
+  public void logOut(String msg, String ... variable) {
+    this.out.println(String.format(msg, variable));
+  }
+
+  /**
+   * Executes the Client command.
+   *
+   * @param cmd - CommandLine.
+   * @throws IOException throws exception.
+   */
+  public abstract void execute(CommandLine cmd) throws IOException;
+
+  /**
+   * Display a help message describing the options the command takes.
+   * TODO : currently only prints to standard out, may want to change this.
+   */
+  public abstract void displayHelp();
+
+  public PrintStream getOut() {
+    return out;
+  }
+
+  public PrintStream getErr() {
+    return err;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ResultCode.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ResultCode.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ResultCode.java
new file mode 100644
index 0000000..27df88c
--- /dev/null
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ResultCode.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.cli;
+
+/**
+ * The possible result code of SCM CLI.
+ */
+public final class ResultCode {
+  public static final int SUCCESS = 1;
+
+  public static final int UNRECOGNIZED_CMD = 2;
+
+  public static final int EXECUTION_ERROR = 3;
+
+  private ResultCode() {}
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java
new file mode 100644
index 0000000..34553ed
--- /dev/null
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java
@@ -0,0 +1,233 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.cli;
+
+import org.apache.commons.cli.BasicParser;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.hdds.scm.cli.container.ContainerCommandHandler;
+import org.apache.hadoop.hdds.scm.cli.container.CreateContainerHandler;
+import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
+import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.hdds.scm.protocolPB
+    .StorageContainerLocationProtocolClientSideTranslatorPB;
+import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.ToolRunner;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.net.InetSocketAddress;
+import java.net.URISyntaxException;
+import java.util.Arrays;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_CONTAINER_SIZE_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_CONTAINER_SIZE_GB;
+import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
+import static org.apache.hadoop.hdds.scm.cli.ResultCode.EXECUTION_ERROR;
+import static org.apache.hadoop.hdds.scm.cli.ResultCode.SUCCESS;
+import static org.apache.hadoop.hdds.scm.cli.ResultCode.UNRECOGNIZED_CMD;
+
+/**
+ * This class is the CLI of SCM.
+ */
+public class SCMCLI extends OzoneBaseCLI {
+
+  public static final String HELP_OP = "help";
+  public static final int CMD_WIDTH = 80;
+
+  private final ScmClient scmClient;
+  private final PrintStream out;
+  private final PrintStream err;
+
+  private final Options options;
+
+  public SCMCLI(ScmClient scmClient) {
+    this(scmClient, System.out, System.err);
+  }
+
+  public SCMCLI(ScmClient scmClient, PrintStream out, PrintStream err) {
+    this.scmClient = scmClient;
+    this.out = out;
+    this.err = err;
+    this.options = getOptions();
+  }
+
+  /**
+   * Main for the scm shell Command handling.
+   *
+   * @param argv - System Args Strings[]
+   * @throws Exception
+   */
+  public static void main(String[] argv) throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    ScmClient scmClient = getScmClient(conf);
+    SCMCLI shell = new SCMCLI(scmClient);
+    conf.setQuietMode(false);
+    shell.setConf(conf);
+    int res = 0;
+    try {
+      res = ToolRunner.run(shell, argv);
+    } catch (Exception ex) {
+      System.exit(1);
+    }
+    System.exit(res);
+  }
+
+  private static ScmClient getScmClient(OzoneConfiguration ozoneConf)
+      throws IOException {
+    long version = RPC.getProtocolVersion(
+        StorageContainerLocationProtocolPB.class);
+    InetSocketAddress scmAddress =
+        getScmAddressForClients(ozoneConf);
+    int containerSizeGB = ozoneConf.getInt(OZONE_SCM_CONTAINER_SIZE_GB,
+        OZONE_SCM_CONTAINER_SIZE_DEFAULT);
+    ContainerOperationClient.setContainerSizeB(containerSizeGB*OzoneConsts.GB);
+
+    RPC.setProtocolEngine(ozoneConf, StorageContainerLocationProtocolPB.class,
+        ProtobufRpcEngine.class);
+    StorageContainerLocationProtocolClientSideTranslatorPB client =
+        new StorageContainerLocationProtocolClientSideTranslatorPB(
+            RPC.getProxy(StorageContainerLocationProtocolPB.class, version,
+                scmAddress, UserGroupInformation.getCurrentUser(), ozoneConf,
+                NetUtils.getDefaultSocketFactory(ozoneConf),
+                Client.getRpcTimeout(ozoneConf)));
+    ScmClient storageClient = new ContainerOperationClient(
+        client, new XceiverClientManager(ozoneConf));
+    return storageClient;
+  }
+
+  /**
+   * Adds ALL the options that hdfs scm command supports. Given the hierarchy
+   * of commands, the options are added in a cascading manner, e.g.:
+   * {@link SCMCLI} asks {@link ContainerCommandHandler} to add it's options,
+   * which then asks it's sub command, such as
+   * {@link CreateContainerHandler}
+   * to add it's own options.
+   *
+   * We need to do this because {@link BasicParser} need to take all the 
options
+   * when paring args.
+   * @return ALL the options supported by this CLI.
+   */
+  @Override
+  protected Options getOptions() {
+    Options newOptions = new Options();
+    // add the options
+    addTopLevelOptions(newOptions);
+    ContainerCommandHandler.addOptions(newOptions);
+    // TODO : add pool, node and pipeline commands.
+    addHelpOption(newOptions);
+    return newOptions;
+  }
+
+  private static void addTopLevelOptions(Options options) {
+    Option containerOps = new Option(
+        ContainerCommandHandler.CONTAINER_CMD, false, "Container related 
options");
+    options.addOption(containerOps);
+    // TODO : add pool, node and pipeline commands.
+  }
+
+  private static void addHelpOption(Options options) {
+    Option helpOp = new Option(HELP_OP, false, "display help message");
+    options.addOption(helpOp);
+  }
+
+  @Override
+  protected void displayHelp() {
+    HelpFormatter helpFormatter = new HelpFormatter();
+    Options topLevelOptions = new Options();
+    addTopLevelOptions(topLevelOptions);
+    helpFormatter.printHelp(CMD_WIDTH, "hdfs scmcli <commands> [<options>]",
+        "where <commands> can be one of the following",
+        topLevelOptions, "");
+  }
+
+  @Override
+  public int run(String[] args) throws Exception {
+    CommandLine cmd = parseArgs(args, options);
+    if (cmd == null) {
+      err.println("Unrecognized options:" + Arrays.asList(args));
+      displayHelp();
+      return UNRECOGNIZED_CMD;
+    }
+    return dispatch(cmd, options);
+  }
+
+  /**
+   * This function parses all command line arguments
+   * and returns the appropriate values.
+   *
+   * @param argv - Argv from main
+   *
+   * @return CommandLine
+   */
+  @Override
+  protected CommandLine parseArgs(String[] argv, Options opts)
+      throws ParseException {
+    try {
+      BasicParser parser = new BasicParser();
+      return parser.parse(opts, argv);
+    } catch (ParseException ex) {
+      err.println(ex.getMessage());
+    }
+    return null;
+  }
+
+  @Override
+  protected int dispatch(CommandLine cmd, Options opts)
+      throws IOException, URISyntaxException {
+    OzoneCommandHandler handler = null;
+    try {
+      if (cmd.hasOption(ContainerCommandHandler.CONTAINER_CMD)) {
+        handler = new ContainerCommandHandler(scmClient);
+      }
+
+      if (handler == null) {
+        if (cmd.hasOption(HELP_OP)) {
+          displayHelp();
+          return SUCCESS;
+        } else {
+          displayHelp();
+          err.println("Unrecognized command: " + Arrays.asList(cmd.getArgs()));
+          return UNRECOGNIZED_CMD;
+        }
+      } else {
+        // Redirect stdout and stderr if necessary.
+        handler.setOut(this.out);
+        handler.setErr(this.err);
+        handler.execute(cmd);
+        return SUCCESS;
+      }
+    } catch (IOException ioe) {
+      err.println("Error executing command:" + ioe);
+      return EXECUTION_ERROR;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java
new file mode 100644
index 0000000..ba42023
--- /dev/null
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.cli.container;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
+import org.apache.hadoop.hdds.scm.cli.SCMCLI;
+import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+
+import java.io.IOException;
+
+/**
+ * The handler of close container command.
+ */
+public class CloseContainerHandler extends OzoneCommandHandler {
+
+  public static final String CONTAINER_CLOSE = "close";
+  public static final String OPT_CONTAINER_NAME = "c";
+
+  @Override
+  public void execute(CommandLine cmd) throws IOException {
+    if (!cmd.hasOption(CONTAINER_CLOSE)) {
+      throw new IOException("Expecting container close");
+    }
+    if (!cmd.hasOption(OPT_CONTAINER_NAME)) {
+      displayHelp();
+      if (!cmd.hasOption(SCMCLI.HELP_OP)) {
+        throw new IOException("Expecting container name");
+      } else {
+        return;
+      }
+    }
+    String containerName = cmd.getOptionValue(OPT_CONTAINER_NAME);
+
+    Pipeline pipeline = getScmClient().getContainer(containerName);
+    if (pipeline == null) {
+      throw new IOException("Cannot close an non-exist container "
+          + containerName);
+    }
+    logOut("Closing container : %s.", containerName);
+    getScmClient().closeContainer(pipeline);
+    logOut("Container closed.");
+  }
+
+  @Override
+  public void displayHelp() {
+    Options options = new Options();
+    addOptions(options);
+    HelpFormatter helpFormatter = new HelpFormatter();
+    helpFormatter.printHelp(SCMCLI.CMD_WIDTH, "hdfs scm -container -close 
<option>",
+        "where <option> is", options, "");
+  }
+
+  public static void addOptions(Options options) {
+    Option containerNameOpt = new Option(OPT_CONTAINER_NAME,
+        true, "Specify container name");
+    options.addOption(containerNameOpt);
+  }
+
+  CloseContainerHandler(ScmClient client) {
+    super(client);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommandHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommandHandler.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommandHandler.java
new file mode 100644
index 0000000..980388f
--- /dev/null
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommandHandler.java
@@ -0,0 +1,129 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.cli.container;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
+import org.apache.hadoop.hdds.scm.client.ScmClient;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import static org.apache.hadoop.hdds.scm.cli.SCMCLI.CMD_WIDTH;
+import static org.apache.hadoop.hdds.scm.cli.SCMCLI.HELP_OP;
+import static org.apache.hadoop.hdds.scm.cli.container.CloseContainerHandler
+    .CONTAINER_CLOSE;
+import static org.apache.hadoop.hdds.scm.cli.container.CreateContainerHandler
+    .CONTAINER_CREATE;
+import static org.apache.hadoop.hdds.scm.cli.container.DeleteContainerHandler
+    .CONTAINER_DELETE;
+import static org.apache.hadoop.hdds.scm.cli.container.InfoContainerHandler
+    .CONTAINER_INFO;
+import static org.apache.hadoop.hdds.scm.cli.container.ListContainerHandler
+    .CONTAINER_LIST;
+
+/**
+ * The handler class of container-specific commands, e.g. addContainer.
+ */
+public class ContainerCommandHandler extends OzoneCommandHandler {
+
+  public static final String CONTAINER_CMD = "container";
+
+  public ContainerCommandHandler(ScmClient scmClient) {
+    super(scmClient);
+  }
+
+  @Override
+  public void execute(CommandLine cmd) throws IOException {
+    // all container commands should contain -container option
+    if (!cmd.hasOption(CONTAINER_CMD)) {
+      throw new IOException("Expecting container cmd");
+    }
+    // check which each the sub command it is
+    OzoneCommandHandler handler = null;
+    if (cmd.hasOption(CONTAINER_CREATE)) {
+      handler = new CreateContainerHandler(getScmClient());
+    } else if (cmd.hasOption(CONTAINER_DELETE)) {
+      handler = new DeleteContainerHandler(getScmClient());
+    } else if (cmd.hasOption(CONTAINER_INFO)) {
+      handler = new InfoContainerHandler(getScmClient());
+    } else if (cmd.hasOption(CONTAINER_LIST)) {
+      handler = new ListContainerHandler(getScmClient());
+    } else if (cmd.hasOption(CONTAINER_CLOSE)) {
+      handler = new CloseContainerHandler(getScmClient());
+    }
+
+    // execute the sub command, throw exception if no sub command found
+    // unless -help option is given.
+    if (handler != null) {
+      handler.setOut(this.getOut());
+      handler.setErr(this.getErr());
+      handler.execute(cmd);
+    } else {
+      displayHelp();
+      if (!cmd.hasOption(HELP_OP)) {
+        throw new IOException("Unrecognized command "
+            + Arrays.asList(cmd.getArgs()));
+      }
+    }
+  }
+
+  @Override
+  public void displayHelp() {
+    Options options = new Options();
+    addCommandsOption(options);
+    HelpFormatter helpFormatter = new HelpFormatter();
+    helpFormatter.printHelp(CMD_WIDTH,
+        "hdfs scm -container <commands> <options>",
+        "where <commands> can be one of the following", options, "");
+  }
+
+  private static void addCommandsOption(Options options) {
+    Option createContainer =
+        new Option(CONTAINER_CREATE, false, "Create container");
+    Option infoContainer =
+        new Option(CONTAINER_INFO, false, "Info container");
+    Option deleteContainer =
+        new Option(CONTAINER_DELETE, false, "Delete container");
+    Option listContainer =
+        new Option(CONTAINER_LIST, false, "List container");
+    Option closeContainer =
+        new Option(CONTAINER_CLOSE, false, "Close container");
+
+    options.addOption(createContainer);
+    options.addOption(deleteContainer);
+    options.addOption(infoContainer);
+    options.addOption(listContainer);
+    options.addOption(closeContainer);
+    // Every new option should add it's option here.
+  }
+
+  public static void addOptions(Options options) {
+    addCommandsOption(options);
+    // for create container options.
+    CreateContainerHandler.addOptions(options);
+    DeleteContainerHandler.addOptions(options);
+    InfoContainerHandler.addOptions(options);
+    ListContainerHandler.addOptions(options);
+    CloseContainerHandler.addOptions(options);
+    // Every new option should add it's option here.
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateContainerHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateContainerHandler.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateContainerHandler.java
new file mode 100644
index 0000000..2961831
--- /dev/null
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateContainerHandler.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.cli.container;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
+import org.apache.hadoop.hdds.scm.client.ScmClient;
+
+import java.io.IOException;
+
+import static org.apache.hadoop.hdds.scm.cli.SCMCLI.CMD_WIDTH;
+import static org.apache.hadoop.hdds.scm.cli.SCMCLI.HELP_OP;
+
+/**
+ * This is the handler that process container creation command.
+ */
+public class CreateContainerHandler extends OzoneCommandHandler {
+
+  public static final String CONTAINER_CREATE = "create";
+  public static final String OPT_CONTAINER_NAME = "c";
+  public static final String CONTAINER_OWNER = "OZONE";
+  // TODO Support an optional -p <pipelineID> option to create
+  // container on given datanodes.
+
+  public CreateContainerHandler(ScmClient scmClient) {
+    super(scmClient);
+  }
+
+  @Override
+  public void execute(CommandLine cmd) throws IOException {
+    if (!cmd.hasOption(CONTAINER_CREATE)) {
+      throw new IOException("Expecting container create");
+    }
+    if (!cmd.hasOption(OPT_CONTAINER_NAME)) {
+      displayHelp();
+      if (!cmd.hasOption(HELP_OP)) {
+        throw new IOException("Expecting container name");
+      } else {
+        return;
+      }
+    }
+    String containerName = cmd.getOptionValue(OPT_CONTAINER_NAME);
+
+    logOut("Creating container : %s.", containerName);
+    getScmClient().createContainer(containerName, CONTAINER_OWNER);
+    logOut("Container created.");
+  }
+
+  @Override
+  public void displayHelp() {
+    Options options = new Options();
+    addOptions(options);
+    HelpFormatter helpFormatter = new HelpFormatter();
+    helpFormatter.printHelp(CMD_WIDTH, "hdfs scm -container -create <option>",
+        "where <option> is", options, "");
+  }
+
+  public static void addOptions(Options options) {
+    Option containerNameOpt = new Option(OPT_CONTAINER_NAME,
+        true, "Specify container name");
+    options.addOption(containerNameOpt);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java
new file mode 100644
index 0000000..a5b625a
--- /dev/null
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.cli.container;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
+import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+
+import java.io.IOException;
+
+import static org.apache.hadoop.hdds.scm.cli.SCMCLI.CMD_WIDTH;
+import static org.apache.hadoop.hdds.scm.cli.SCMCLI.HELP_OP;
+
+/**
+ * This is the handler that process delete container command.
+ */
+public class DeleteContainerHandler extends OzoneCommandHandler {
+
+  protected static final String CONTAINER_DELETE = "delete";
+  protected static final String OPT_FORCE = "f";
+  protected static final String OPT_CONTAINER_NAME = "c";
+
+  public DeleteContainerHandler(ScmClient scmClient) {
+    super(scmClient);
+  }
+
+  @Override
+  public void execute(CommandLine cmd) throws IOException {
+    Preconditions.checkArgument(cmd.hasOption(CONTAINER_DELETE),
+        "Expecting command delete");
+    if (!cmd.hasOption(OPT_CONTAINER_NAME)) {
+      displayHelp();
+      if (!cmd.hasOption(HELP_OP)) {
+        throw new IOException("Expecting container name");
+      } else {
+        return;
+      }
+    }
+
+    String containerName = cmd.getOptionValue(OPT_CONTAINER_NAME);
+
+    Pipeline pipeline = getScmClient().getContainer(containerName);
+    if (pipeline == null) {
+      throw new IOException("Cannot delete an non-exist container "
+          + containerName);
+    }
+
+    logOut("Deleting container : %s.", containerName);
+    getScmClient().deleteContainer(pipeline, cmd.hasOption(OPT_FORCE));
+    logOut("Container %s deleted.", containerName);
+  }
+
+  @Override
+  public void displayHelp() {
+    Options options = new Options();
+    addOptions(options);
+    HelpFormatter helpFormatter = new HelpFormatter();
+    helpFormatter.printHelp(CMD_WIDTH, "hdfs scm -container -delete <option>",
+        "where <option> is", options, "");
+  }
+
+  public static void addOptions(Options options) {
+    Option forceOpt = new Option(OPT_FORCE,
+        false,
+        "forcibly delete a container");
+    options.addOption(forceOpt);
+    Option containerNameOpt = new Option(OPT_CONTAINER_NAME,
+        true, "Specify container name");
+    options.addOption(containerNameOpt);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
new file mode 100644
index 0000000..c609915
--- /dev/null
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.cli.container;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
+import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+
+import java.io.IOException;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.hdds.scm.cli.SCMCLI.CMD_WIDTH;
+import static org.apache.hadoop.hdds.scm.cli.SCMCLI.HELP_OP;
+
+/**
+ * This is the handler that process container info command.
+ */
+public class InfoContainerHandler extends OzoneCommandHandler {
+
+  public static final String CONTAINER_INFO = "info";
+  protected static final String OPT_CONTAINER_NAME = "c";
+
+  /**
+   * Constructs a handler object.
+   *
+   * @param scmClient scm client.
+   */
+  public InfoContainerHandler(ScmClient scmClient) {
+    super(scmClient);
+  }
+
+  @Override
+  public void execute(CommandLine cmd) throws IOException {
+    if (!cmd.hasOption(CONTAINER_INFO)) {
+      throw new IOException("Expecting container info");
+    }
+    if (!cmd.hasOption(OPT_CONTAINER_NAME)) {
+      displayHelp();
+      if (!cmd.hasOption(HELP_OP)) {
+        throw new IOException("Expecting container name");
+      } else {
+        return;
+      }
+    }
+    String containerName = cmd.getOptionValue(OPT_CONTAINER_NAME);
+    Pipeline pipeline = getScmClient().getContainer(containerName);
+    Preconditions.checkNotNull(pipeline, "Pipeline cannot be null");
+
+    ContainerData containerData =
+        getScmClient().readContainer(pipeline);
+
+    // Print container report info.
+    logOut("Container Name: %s",
+        containerData.getName());
+    String openStatus =
+        containerData.getState() == HddsProtos.LifeCycleState.OPEN ? "OPEN" :
+            "CLOSED";
+    logOut("Container State: %s", openStatus);
+    if (!containerData.getHash().isEmpty()) {
+      logOut("Container Hash: %s", containerData.getHash());
+    }
+    logOut("Container DB Path: %s", containerData.getDbPath());
+    logOut("Container Path: %s", containerData.getContainerPath());
+
+    // Output meta data.
+    String metadataStr = containerData.getMetadataList().stream().map(
+        p -> p.getKey() + ":" + p.getValue()).collect(Collectors.joining(", 
"));
+    logOut("Container Metadata: {%s}", metadataStr);
+
+    // Print pipeline of an existing container.
+    logOut("LeaderID: %s", pipeline.getLeader().getHostName());
+    String machinesStr = pipeline.getMachines().stream().map(
+        DatanodeDetails::getHostName).collect(Collectors.joining(","));
+    logOut("Datanodes: [%s]", machinesStr);
+  }
+
+  @Override
+  public void displayHelp() {
+    Options options = new Options();
+    addOptions(options);
+    HelpFormatter helpFormatter = new HelpFormatter();
+    helpFormatter.printHelp(CMD_WIDTH, "hdfs scm -container -info <option>",
+        "where <option> is", options, "");
+  }
+
+  public static void addOptions(Options options) {
+    Option containerNameOpt = new Option(OPT_CONTAINER_NAME,
+        true, "Specify container name");
+    options.addOption(containerNameOpt);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListContainerHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListContainerHandler.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListContainerHandler.java
new file mode 100644
index 0000000..0c7e790
--- /dev/null
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListContainerHandler.java
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.cli.container;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
+import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.ozone.web.utils.JsonUtils;
+
+import java.io.IOException;
+import java.util.List;
+
+import static org.apache.hadoop.hdds.scm.cli.SCMCLI.CMD_WIDTH;
+import static org.apache.hadoop.hdds.scm.cli.SCMCLI.HELP_OP;
+
+/**
+ * This is the handler that process container list command.
+ */
+public class ListContainerHandler extends OzoneCommandHandler {
+
+  public static final String CONTAINER_LIST = "list";
+  public static final String OPT_START_CONTAINER = "start";
+  public static final String OPT_PREFIX_CONTAINER = "prefix";
+  public static final String OPT_COUNT = "count";
+
+  /**
+   * Constructs a handler object.
+   *
+   * @param scmClient scm client
+   */
+  public ListContainerHandler(ScmClient scmClient) {
+    super(scmClient);
+  }
+
+  @Override
+  public void execute(CommandLine cmd) throws IOException {
+    if (!cmd.hasOption(CONTAINER_LIST)) {
+      throw new IOException("Expecting container list");
+    }
+    if (cmd.hasOption(HELP_OP)) {
+      displayHelp();
+      return;
+    }
+
+    if (!cmd.hasOption(OPT_COUNT)) {
+      displayHelp();
+      if (!cmd.hasOption(HELP_OP)) {
+        throw new IOException("Expecting container count");
+      } else {
+        return;
+      }
+    }
+
+    String startName = cmd.getOptionValue(OPT_START_CONTAINER);
+    String prefixName = cmd.getOptionValue(OPT_PREFIX_CONTAINER);
+    int count = 0;
+
+    if (cmd.hasOption(OPT_COUNT)) {
+      count = Integer.parseInt(cmd.getOptionValue(OPT_COUNT));
+      if (count < 0) {
+        displayHelp();
+        throw new IOException("-count should not be negative");
+      }
+    }
+
+    List<ContainerInfo> containerList =
+        getScmClient().listContainer(startName, prefixName, count);
+
+    // Output data list
+    for (ContainerInfo container : containerList) {
+      outputContainerPipeline(container.getPipeline());
+    }
+  }
+
+  private void outputContainerPipeline(Pipeline pipeline) throws IOException {
+    // Print container report info.
+    logOut("%s", JsonUtils.toJsonStringWithDefaultPrettyPrinter(
+        pipeline.toJsonString()));
+  }
+
+  @Override
+  public void displayHelp() {
+    Options options = new Options();
+    addOptions(options);
+    HelpFormatter helpFormatter = new HelpFormatter();
+    helpFormatter.printHelp(CMD_WIDTH, "hdfs scm -container -list <option>",
+        "where <option> can be the following", options, "");
+  }
+
+  public static void addOptions(Options options) {
+    Option startContainerOpt = new Option(OPT_START_CONTAINER,
+        true, "Specify start container name");
+    Option endContainerOpt = new Option(OPT_PREFIX_CONTAINER,
+        true, "Specify prefix container name");
+    Option countOpt = new Option(OPT_COUNT, true,
+        "Specify count number, required");
+    options.addOption(countOpt);
+    options.addOption(startContainerOpt);
+    options.addOption(endContainerOpt);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to