Repository: hadoop
Updated Branches:
  refs/heads/trunk a3a1552c3 -> 3a43ac285


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
index 04473d1..888b72e 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
@@ -17,11 +17,13 @@
  */
 package org.apache.hadoop.ozone.scm;
 
+import com.google.common.primitives.Longs;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -43,6 +45,8 @@ import org.junit.rules.Timeout;
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.List;
 
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED;
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN;
@@ -122,28 +126,29 @@ public class TestSCMCli {
 
   @Test
   public void testCreateContainer() throws Exception {
-    String containerName =  "containerTestCreate";
+    long containerID = ContainerTestHelper.getTestContainerID();
     try {
-      scm.getClientProtocolServer().getContainer(containerName);
+      scm.getClientProtocolServer().getContainer(containerID);
       fail("should not be able to get the container");
     } catch (IOException ioe) {
       assertTrue(ioe.getMessage().contains(
-          "Specified key does not exist. key : " + containerName));
+          "Specified key does not exist. key : " + containerID));
     }
-    String[] args = {"-container", "-create", "-c", containerName};
+    String[] args = {"-container", "-create", "-c",
+        Long.toString(containerID)};
     assertEquals(ResultCode.SUCCESS, cli.run(args));
-    Pipeline container = scm.getClientProtocolServer()
-        .getContainer(containerName);
+    ContainerInfo container = scm.getClientProtocolServer()
+        .getContainer(containerID);
     assertNotNull(container);
-    assertEquals(containerName, container.getContainerName());
+    assertEquals(containerID, container.containerID());
   }
 
-  private boolean containerExist(String containerName) {
+  private boolean containerExist(long containerID) {
     try {
-      Pipeline scmPipeline = scm.getClientProtocolServer()
-          .getContainer(containerName);
-      return scmPipeline != null
-          && containerName.equals(scmPipeline.getContainerName());
+      ContainerInfo container = scm.getClientProtocolServer()
+          .getContainer(containerID);
+      return container != null
+          && containerID == container.getContainerID();
     } catch (IOException e) {
       return false;
     }
@@ -162,29 +167,31 @@ public class TestSCMCli {
     // 1. Test to delete a non-empty container.
     // ****************************************
     // Create an non-empty container
-    containerName = "non-empty-container";
-    pipeline = containerOperationClient
+    ContainerInfo container = containerOperationClient
         .createContainer(xceiverClientManager.getType(),
-            HddsProtos.ReplicationFactor.ONE, containerName, containerOwner);
+            HddsProtos.ReplicationFactor.ONE, containerOwner);
 
     ContainerData cdata = ContainerData
-        .getFromProtBuf(containerOperationClient.readContainer(pipeline), 
conf);
-    KeyUtils.getDB(cdata, conf).put(containerName.getBytes(),
+        .getFromProtBuf(containerOperationClient.readContainer(
+            container.getContainerID(), container.getPipeline()), conf);
+    KeyUtils.getDB(cdata, 
conf).put(Longs.toByteArray(container.getContainerID()),
         "someKey".getBytes());
-    Assert.assertTrue(containerExist(containerName));
+    Assert.assertTrue(containerExist(container.getContainerID()));
 
     // Gracefully delete a container should fail because it is open.
-    delCmd = new String[] {"-container", "-delete", "-c", containerName};
+    delCmd = new String[] {"-container", "-delete", "-c",
+        Long.toString(container.getContainerID())};
     testErr = new ByteArrayOutputStream();
     ByteArrayOutputStream out = new ByteArrayOutputStream();
     exitCode = runCommandAndGetOutput(delCmd, out, testErr);
     assertEquals(EXECUTION_ERROR, exitCode);
     assertTrue(testErr.toString()
         .contains("Deleting an open container is not allowed."));
-    Assert.assertTrue(containerExist(containerName));
+    Assert.assertTrue(containerExist(container.getContainerID()));
 
     // Close the container
-    containerOperationClient.closeContainer(pipeline);
+    containerOperationClient.closeContainer(
+        container.getContainerID(), container.getPipeline());
 
     // Gracefully delete a container should fail because it is not empty.
     testErr = new ByteArrayOutputStream();
@@ -192,42 +199,46 @@ public class TestSCMCli {
     assertEquals(EXECUTION_ERROR, exitCode2);
     assertTrue(testErr.toString()
         .contains("Container cannot be deleted because it is not empty."));
-    Assert.assertTrue(containerExist(containerName));
+    Assert.assertTrue(containerExist(container.getContainerID()));
 
     // Try force delete again.
-    delCmd = new String[] {"-container", "-delete", "-c", containerName, "-f"};
+    delCmd = new String[] {"-container", "-delete", "-c",
+        Long.toString(container.getContainerID()), "-f"};
     exitCode = runCommandAndGetOutput(delCmd, out, null);
     assertEquals("Expected success, found:", ResultCode.SUCCESS, exitCode);
-    assertFalse(containerExist(containerName));
+    assertFalse(containerExist(container.getContainerID()));
 
     // ****************************************
     // 2. Test to delete an empty container.
     // ****************************************
     // Create an empty container
-    containerName = "empty-container";
-    pipeline = containerOperationClient
+    ContainerInfo emptyContainer = containerOperationClient
         .createContainer(xceiverClientManager.getType(),
-            HddsProtos.ReplicationFactor.ONE, containerName, containerOwner);
-    containerOperationClient.closeContainer(pipeline);
-    Assert.assertTrue(containerExist(containerName));
+            HddsProtos.ReplicationFactor.ONE, containerOwner);
+    containerOperationClient.closeContainer(container.getContainerID(),
+        container.getPipeline());
+    Assert.assertTrue(containerExist(container.getContainerID()));
 
     // Successfully delete an empty container.
-    delCmd = new String[] {"-container", "-delete", "-c", containerName};
+    delCmd = new String[] {"-container", "-delete", "-c",
+        Long.toString(emptyContainer.getContainerID())};
     exitCode = runCommandAndGetOutput(delCmd, out, null);
     assertEquals(ResultCode.SUCCESS, exitCode);
-    assertFalse(containerExist(containerName));
+    assertFalse(containerExist(emptyContainer.getContainerID()));
 
     // After the container is deleted,
-    // a same name container can now be recreated.
-    containerOperationClient.createContainer(xceiverClientManager.getType(),
-        HddsProtos.ReplicationFactor.ONE, containerName, containerOwner);
-    Assert.assertTrue(containerExist(containerName));
+    // another container can now be recreated.
+    ContainerInfo newContainer = containerOperationClient.
+        createContainer(xceiverClientManager.getType(),
+            HddsProtos.ReplicationFactor.ONE, containerOwner);
+    Assert.assertTrue(containerExist(newContainer.getContainerID()));
 
     // ****************************************
     // 3. Test to delete a non-exist container.
     // ****************************************
-    containerName = "non-exist-container";
-    delCmd = new String[] {"-container", "-delete", "-c", containerName};
+    long nonExistContainerID =  ContainerTestHelper.getTestContainerID();
+    delCmd = new String[] {"-container", "-delete", "-c",
+        Long.toString(nonExistContainerID)};
     testErr = new ByteArrayOutputStream();
     exitCode = runCommandAndGetOutput(delCmd, out, testErr);
     assertEquals(EXECUTION_ERROR, exitCode);
@@ -267,12 +278,13 @@ public class TestSCMCli {
         EXECUTION_ERROR, exitCode);
 
     // Create an empty container.
-    cname = "ContainerTestInfo1";
-    Pipeline pipeline = containerOperationClient
+    ContainerInfo container = containerOperationClient
         .createContainer(xceiverClientManager.getType(),
-            HddsProtos.ReplicationFactor.ONE, cname, containerOwner);
+            HddsProtos.ReplicationFactor.ONE, containerOwner);
     ContainerData data = ContainerData
-        .getFromProtBuf(containerOperationClient.readContainer(pipeline), 
conf);
+        .getFromProtBuf(containerOperationClient.
+            readContainer(container.getContainerID(),
+                container.getPipeline()), conf);
 
     info = new String[]{"-container", "-info", "-c", cname};
     ByteArrayOutputStream out = new ByteArrayOutputStream();
@@ -289,12 +301,12 @@ public class TestSCMCli {
     out.reset();
 
     // Create an non-empty container
-    cname = "ContainerTestInfo2";
-    pipeline = containerOperationClient
+    container = containerOperationClient
         .createContainer(xceiverClientManager.getType(),
-            HddsProtos.ReplicationFactor.ONE, cname, containerOwner);
+            HddsProtos.ReplicationFactor.ONE, containerOwner);
     data = ContainerData
-        .getFromProtBuf(containerOperationClient.readContainer(pipeline), 
conf);
+        .getFromProtBuf(containerOperationClient.readContainer(
+            container.getContainerID(), container.getPipeline()), conf);
     KeyUtils.getDB(data, conf).put(cname.getBytes(), "someKey".getBytes());
 
     info = new String[]{"-container", "-info", "-c", cname};
@@ -311,13 +323,15 @@ public class TestSCMCli {
 
 
     // Close last container and test info again.
-    containerOperationClient.closeContainer(pipeline);
+    containerOperationClient.closeContainer(
+        container.getContainerID(), container.getPipeline());
 
     info = new String[] {"-container", "-info", "-c", cname};
     exitCode = runCommandAndGetOutput(info, out, null);
     assertEquals(ResultCode.SUCCESS, exitCode);
     data = ContainerData
-        .getFromProtBuf(containerOperationClient.readContainer(pipeline), 
conf);
+        .getFromProtBuf(containerOperationClient.readContainer(
+            container.getContainerID(), container.getPipeline()), conf);
 
     openStatus = data.isOpen() ? "OPEN" : "CLOSED";
     expected = String.format(formatStrWithHash, cname, openStatus,
@@ -347,11 +361,12 @@ public class TestSCMCli {
   @Test
   public void testListContainerCommand() throws Exception {
     // Create 20 containers for testing.
-    String prefix = "ContainerForTesting";
+    List<ContainerInfo> containers = new ArrayList<>();
     for (int index = 0; index < 20; index++) {
-      String containerName = String.format("%s%02d", prefix, index);
-      containerOperationClient.createContainer(xceiverClientManager.getType(),
-          HddsProtos.ReplicationFactor.ONE, containerName, containerOwner);
+      ContainerInfo container = containerOperationClient.createContainer(
+          xceiverClientManager.getType(), HddsProtos.ReplicationFactor.ONE,
+          containerOwner);
+      containers.add(container);
     }
 
     ByteArrayOutputStream out = new ByteArrayOutputStream();
@@ -367,9 +382,11 @@ public class TestSCMCli {
     out.reset();
     err.reset();
 
+    long startContainerID = containers.get(0).getContainerID();
+    String startContainerIDStr = Long.toString(startContainerID);
     // Test with -start and -count, the value of -count is negative.
     args = new String[] {"-container", "-list",
-        "-start", prefix + 0, "-count", "-1"};
+        "-start", startContainerIDStr, "-count", "-1"};
     exitCode = runCommandAndGetOutput(args, out, err);
     assertEquals(EXECUTION_ERROR, exitCode);
     assertTrue(err.toString()
@@ -378,67 +395,23 @@ public class TestSCMCli {
     out.reset();
     err.reset();
 
-    String startName = String.format("%s%02d", prefix, 0);
-
     // Test with -start and -count.
     args = new String[] {"-container", "-list", "-start",
-        startName, "-count", "10"};
-    exitCode = runCommandAndGetOutput(args, out, err);
-    assertEquals(ResultCode.SUCCESS, exitCode);
-    for (int index = 0; index < 10; index++) {
-      String containerName = String.format("%s%02d", prefix, index);
-      assertTrue(out.toString().contains(containerName));
-    }
-
-    out.reset();
-    err.reset();
-
-    // Test with -start, -prefix and -count.
-    startName = String.format("%s%02d", prefix, 0);
-    String prefixName = String.format("%s0", prefix);
-    args = new String[] {"-container", "-list", "-start",
-        startName, "-prefix", prefixName, "-count", "20"};
-    exitCode = runCommandAndGetOutput(args, out, err);
-    assertEquals(ResultCode.SUCCESS, exitCode);
-    for (int index = 0; index < 10; index++) {
-      String containerName = String.format("%s%02d", prefix, index);
-      assertTrue(out.toString().contains(containerName));
-    }
-
-    out.reset();
-    err.reset();
-
-    startName = String.format("%s%02d", prefix, 0);
-    prefixName = String.format("%s0", prefix);
-    args = new String[] {"-container", "-list", "-start",
-        startName, "-prefix", prefixName, "-count", "4"};
+        startContainerIDStr, "-count", "10"};
     exitCode = runCommandAndGetOutput(args, out, err);
     assertEquals(ResultCode.SUCCESS, exitCode);
-    for (int index = 0; index < 4; index++) {
-      String containerName = String.format("%s%02d", prefix, index);
-      assertTrue(out.toString().contains(containerName));
+    for (int index = 1; index < 10; index++) {
+      String containerID = Long.toString(
+          containers.get(index).getContainerID());
+      assertTrue(out.toString().contains(containerID));
     }
 
     out.reset();
     err.reset();
 
-    prefixName = String.format("%s0", prefix);
-    args = new String[] {"-container", "-list",
-        "-prefix", prefixName, "-count", "6"};
-    exitCode = runCommandAndGetOutput(args, out, err);
-    assertEquals(ResultCode.SUCCESS, exitCode);
-    for (int index = 0; index < 6; index++) {
-      String containerName = String.format("%s%02d", prefix, index);
-      assertTrue(out.toString().contains(containerName));
-    }
-
-    out.reset();
-    err.reset();
-
-    // Test with -start and -prefix, while -count doesn't exist.
-    prefixName = String.format("%s%02d", prefix, 20);
+    // Test with -start, while -count doesn't exist.
     args = new String[] {"-container", "-list", "-start",
-        startName, "-prefix", prefixName, "-count", "10"};
+        startContainerIDStr};
     exitCode = runCommandAndGetOutput(args, out, err);
     assertEquals(ResultCode.SUCCESS, exitCode);
     assertTrue(out.toString().isEmpty());
@@ -446,21 +419,23 @@ public class TestSCMCli {
 
   @Test
   public void testCloseContainer() throws Exception {
-    String containerName =  "containerTestClose";
-    String[] args = {"-container", "-create", "-c", containerName};
+    long containerID = ContainerTestHelper.getTestContainerID();
+    String[] args = {"-container", "-create", "-c",
+        Long.toString(containerID)};
     assertEquals(ResultCode.SUCCESS, cli.run(args));
-    Pipeline container = scm.getClientProtocolServer()
-        .getContainer(containerName);
+    ContainerInfo container = scm.getClientProtocolServer()
+        .getContainer(containerID);
     assertNotNull(container);
-    assertEquals(containerName, container.getContainerName());
+    assertEquals(containerID, container.getContainerID());
 
-    ContainerInfo containerInfo = scm.getContainerInfo(containerName);
+    ContainerInfo containerInfo = scm.getContainerInfo(containerID);
     assertEquals(OPEN, containerInfo.getState());
 
-    String[] args1 = {"-container", "-close", "-c", containerName};
+    String[] args1 = {"-container", "-close", "-c",
+        Long.toString(containerID)};
     assertEquals(ResultCode.SUCCESS, cli.run(args1));
 
-    containerInfo = scm.getContainerInfo(containerName);
+    containerInfo = scm.getContainerInfo(containerID);
     assertEquals(CLOSED, containerInfo.getState());
 
     // closing this container again will trigger an error.
@@ -502,9 +477,7 @@ public class TestSCMCli {
     String[] args2 = {"-container", "-create", "-help"};
     assertEquals(ResultCode.SUCCESS, cli.run(args2));
     String expected2 =
-        "usage: hdfs scm -container -create <option>\n" +
-        "where <option> is\n" +
-        " -c <arg>   Specify container name\n";
+        "usage: hdfs scm -container -create\n";
     assertEquals(expected2, testContent.toString());
     testContent.reset();
 
@@ -513,7 +486,7 @@ public class TestSCMCli {
     String expected3 =
         "usage: hdfs scm -container -delete <option>\n" +
         "where <option> is\n" +
-        " -c <arg>   Specify container name\n" +
+        " -c <arg>   Specify container id\n" +
         " -f         forcibly delete a container\n";
     assertEquals(expected3, testContent.toString());
     testContent.reset();
@@ -523,7 +496,7 @@ public class TestSCMCli {
     String expected4 =
         "usage: hdfs scm -container -info <option>\n" +
         "where <option> is\n" +
-        " -c <arg>   Specify container name\n";
+        " -c <arg>   Specify container id\n";
     assertEquals(expected4, testContent.toString());
     testContent.reset();
 
@@ -532,9 +505,8 @@ public class TestSCMCli {
     String expected5 =
         "usage: hdfs scm -container -list <option>\n" +
             "where <option> can be the following\n" +
-            " -count <arg>    Specify count number, required\n" +
-            " -prefix <arg>   Specify prefix container name\n" +
-            " -start <arg>    Specify start container name\n";
+            " -start <arg>    Specify start container id, required\n" +
+            " -count <arg>   Specify count number name\n";
     assertEquals(expected5, testContent.toString());
     testContent.reset();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
index 332e679..1d19bb3 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
@@ -22,9 +22,8 @@ import static 
org.apache.hadoop.test.MetricsAsserts.getLongGauge;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 
-import java.util.UUID;
-
 import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.scm.TestUtils;
@@ -225,7 +224,7 @@ public class TestSCMMetrics {
 
     for (int i = 0; i < numReport; i++) {
       ContainerReport report = new ContainerReport(
-          UUID.randomUUID().toString(), DigestUtils.sha256Hex("Simulated"));
+          RandomUtils.nextLong(), DigestUtils.sha256Hex("Simulated"));
       report.setSize(stat.getSize().get());
       report.setBytesUsed(stat.getUsed().get());
       report.setReadCount(stat.getReadCount().get());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
index 85403a2..07ad6ef 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
@@ -19,13 +19,13 @@ package org.apache.hadoop.ozone.scm;
 
 import com.google.common.cache.Cache;
 import org.apache.commons.lang.RandomStringUtils;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.protocolPB
     .StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
@@ -78,29 +78,24 @@ public class TestXceiverClientManager {
     OzoneConfiguration conf = new OzoneConfiguration();
     XceiverClientManager clientManager = new XceiverClientManager(conf);
 
-    String containerName1 = "container" + RandomStringUtils.randomNumeric(10);
-    Pipeline pipeline1 = storageContainerLocationClient
+    ContainerInfo container1 = storageContainerLocationClient
         .allocateContainer(clientManager.getType(), clientManager.getFactor(),
-            containerName1, containerOwner);
-    XceiverClientSpi client1 = clientManager.acquireClient(pipeline1);
+            containerOwner);
+    XceiverClientSpi client1 = 
clientManager.acquireClient(container1.getPipeline(),
+        container1.getContainerID());
     Assert.assertEquals(1, client1.getRefcount());
-    Assert.assertEquals(containerName1,
-        client1.getPipeline().getContainerName());
 
-    String containerName2 = "container" + RandomStringUtils.randomNumeric(10);
-    Pipeline pipeline2 = storageContainerLocationClient
+    ContainerInfo container2 = storageContainerLocationClient
         .allocateContainer(clientManager.getType(), clientManager.getFactor(),
-            containerName2, containerOwner);
-    XceiverClientSpi client2 = clientManager.acquireClient(pipeline2);
+            containerOwner);
+    XceiverClientSpi client2 = 
clientManager.acquireClient(container2.getPipeline(),
+        container2.getContainerID());
     Assert.assertEquals(1, client2.getRefcount());
-    Assert.assertEquals(containerName2,
-        client2.getPipeline().getContainerName());
 
-    XceiverClientSpi client3 = clientManager.acquireClient(pipeline1);
+    XceiverClientSpi client3 = 
clientManager.acquireClient(container1.getPipeline(),
+        container1.getContainerID());
     Assert.assertEquals(2, client3.getRefcount());
     Assert.assertEquals(2, client1.getRefcount());
-    Assert.assertEquals(containerName1,
-        client3.getPipeline().getContainerName());
     Assert.assertEquals(client1, client3);
     clientManager.releaseClient(client1);
     clientManager.releaseClient(client2);
@@ -112,43 +107,43 @@ public class TestXceiverClientManager {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.setInt(SCM_CONTAINER_CLIENT_MAX_SIZE_KEY, 1);
     XceiverClientManager clientManager = new XceiverClientManager(conf);
-    Cache<String, XceiverClientSpi> cache =
+    Cache<Long, XceiverClientSpi> cache =
         clientManager.getClientCache();
 
-    String containerName1 = "container" + RandomStringUtils.randomNumeric(10);
-    Pipeline pipeline1 =
+    ContainerInfo container1 =
         storageContainerLocationClient.allocateContainer(
             clientManager.getType(), HddsProtos.ReplicationFactor.ONE,
-            containerName1, containerOwner);
-    XceiverClientSpi client1 = clientManager.acquireClient(pipeline1);
+            containerOwner);
+    XceiverClientSpi client1 = 
clientManager.acquireClient(container1.getPipeline(),
+        container1.getContainerID());
     Assert.assertEquals(1, client1.getRefcount());
-    Assert.assertEquals(containerName1,
-        client1.getPipeline().getContainerName());
+    Assert.assertEquals(container1.getPipeline(),
+        client1.getPipeline());
 
-    String containerName2 = "container" + RandomStringUtils.randomNumeric(10);
-    Pipeline pipeline2 =
+    ContainerInfo container2 =
         storageContainerLocationClient.allocateContainer(
             clientManager.getType(),
-            HddsProtos.ReplicationFactor.ONE, containerName2, containerOwner);
-    XceiverClientSpi client2 = clientManager.acquireClient(pipeline2);
+            HddsProtos.ReplicationFactor.ONE, containerOwner);
+    XceiverClientSpi client2 = 
clientManager.acquireClient(container2.getPipeline(),
+        container2.getContainerID());
     Assert.assertEquals(1, client2.getRefcount());
-    Assert.assertEquals(containerName2,
-        client2.getPipeline().getContainerName());
     Assert.assertNotEquals(client1, client2);
 
     // least recent container (i.e containerName1) is evicted
-    XceiverClientSpi nonExistent1 = cache.getIfPresent(containerName1);
+    XceiverClientSpi nonExistent1 = 
cache.getIfPresent(container1.getContainerID());
     Assert.assertEquals(null, nonExistent1);
     // However container call should succeed because of refcount on the client.
     String traceID1 = "trace" + RandomStringUtils.randomNumeric(4);
-    ContainerProtocolCalls.createContainer(client1,  traceID1);
+    ContainerProtocolCalls.createContainer(client1,
+        container1.getContainerID(),  traceID1);
 
     // After releasing the client, this connection should be closed
     // and any container operations should fail
     clientManager.releaseClient(client1);
     exception.expect(IOException.class);
     exception.expectMessage("This channel is not connected.");
-    ContainerProtocolCalls.createContainer(client1,  traceID1);
+    ContainerProtocolCalls.createContainer(client1,
+        container1.getContainerID(), traceID1);
     clientManager.releaseClient(client2);
   }
 
@@ -157,42 +152,39 @@ public class TestXceiverClientManager {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.setInt(SCM_CONTAINER_CLIENT_MAX_SIZE_KEY, 1);
     XceiverClientManager clientManager = new XceiverClientManager(conf);
-    Cache<String, XceiverClientSpi> cache =
+    Cache<Long, XceiverClientSpi> cache =
         clientManager.getClientCache();
 
-    String containerName1 = "container" + RandomStringUtils.randomNumeric(10);
-    Pipeline pipeline1 =
+    ContainerInfo container1 =
         storageContainerLocationClient.allocateContainer(
             clientManager.getType(),
-            clientManager.getFactor(), containerName1, containerOwner);
-    XceiverClientSpi client1 = clientManager.acquireClient(pipeline1);
+            clientManager.getFactor(), containerOwner);
+    XceiverClientSpi client1 = 
clientManager.acquireClient(container1.getPipeline(),
+        container1.getContainerID());
     Assert.assertEquals(1, client1.getRefcount());
-    Assert.assertEquals(containerName1,
-        client1.getPipeline().getContainerName());
 
     clientManager.releaseClient(client1);
     Assert.assertEquals(0, client1.getRefcount());
 
-    String containerName2 = "container" + RandomStringUtils.randomNumeric(10);
-    Pipeline pipeline2 = storageContainerLocationClient
+    ContainerInfo container2 = storageContainerLocationClient
         .allocateContainer(clientManager.getType(), clientManager.getFactor(),
-            containerName2, containerOwner);
-    XceiverClientSpi client2 = clientManager.acquireClient(pipeline2);
+            containerOwner);
+    XceiverClientSpi client2 = 
clientManager.acquireClient(container2.getPipeline(),
+        container2.getContainerID());
     Assert.assertEquals(1, client2.getRefcount());
-    Assert.assertEquals(containerName2,
-        client2.getPipeline().getContainerName());
     Assert.assertNotEquals(client1, client2);
 
 
     // now client 1 should be evicted
-    XceiverClientSpi nonExistent = cache.getIfPresent(containerName1);
+    XceiverClientSpi nonExistent = 
cache.getIfPresent(container1.getContainerID());
     Assert.assertEquals(null, nonExistent);
 
     // Any container operation should now fail
     String traceID2 = "trace" + RandomStringUtils.randomNumeric(4);
     exception.expect(IOException.class);
     exception.expectMessage("This channel is not connected.");
-    ContainerProtocolCalls.createContainer(client1, traceID2);
+    ContainerProtocolCalls.createContainer(client1,
+        container1.getContainerID(), traceID2);
     clientManager.releaseClient(client2);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
index 1403f89..99742c2 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
@@ -27,9 +27,11 @@ import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CountDownLatch;
 
 import org.apache.commons.lang.RandomStringUtils;
+import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandRequestProto;
 import 
org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandResponseProto;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -41,6 +43,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -78,14 +81,15 @@ public class TestXceiverClientMetrics {
     OzoneConfiguration conf = new OzoneConfiguration();
     XceiverClientManager clientManager = new XceiverClientManager(conf);
 
-    String containerName = "container" + RandomStringUtils.randomNumeric(10);
-    Pipeline pipeline = storageContainerLocationClient
+    ContainerInfo container = storageContainerLocationClient
         .allocateContainer(clientManager.getType(), clientManager.getFactor(),
-            containerName, containerOwner);
-    XceiverClientSpi client = clientManager.acquireClient(pipeline);
+            containerOwner);
+    XceiverClientSpi client = clientManager.acquireClient(
+        container.getPipeline(), container.getContainerID());
 
     ContainerCommandRequestProto request = ContainerTestHelper
-        .getCreateContainerRequest(containerName, pipeline);
+        .getCreateContainerRequest(container.getContainerID(),
+            container.getPipeline());
     client.sendCommand(request);
 
     MetricsRecordBuilder containerMetrics = getMetrics(
@@ -109,11 +113,12 @@ public class TestXceiverClientMetrics {
         try {
           // use async interface for testing pending metrics
           for (int i = 0; i < numRequest; i++) {
-            String keyName = OzoneUtils.getRequestID();
+            BlockID blockID = ContainerTestHelper.
+                getTestBlockID(container.getContainerID());
             ContainerProtos.ContainerCommandRequestProto smallFileRequest;
 
             smallFileRequest = ContainerTestHelper.getWriteSmallFileRequest(
-                client.getPipeline(), containerName, keyName, 1024);
+                client.getPipeline(), blockID, 1024);
             CompletableFuture<ContainerProtos.ContainerCommandResponseProto>
                 response = client.sendCommandAsync(smallFileRequest);
             computeResults.add(response);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
index ae30fb3..b621a08 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
@@ -623,12 +623,11 @@ public class TestKeys {
         List<KsmKeyLocationInfo> locations =
             keyInfo.getLatestVersionLocations().getLocationList();
         for (KsmKeyLocationInfo location : locations) {
-          String containerName = location.getContainerName();
-          KeyData keyData = new KeyData(containerName, location.getBlockID());
+          KeyData keyData = new KeyData(location.getBlockID());
           KeyData blockInfo = cm.getContainerManager()
               .getKeyManager().getKey(keyData);
           ContainerData containerData = cm.getContainerManager()
-              .readContainer(containerName);
+              .readContainer(keyData.getContainerID());
           File dataDir = ContainerUtils
               .getDataDirectory(containerData).toFile();
           for (ContainerProtos.ChunkInfo chunkInfo : blockInfo.getChunks()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java
index fa0eaa2..13cc40b 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java
@@ -21,11 +21,11 @@ import com.google.common.base.Strings;
 import com.google.common.collect.Lists;
 import org.apache.commons.lang3.tuple.ImmutablePair;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
 import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -480,8 +480,8 @@ public class KSMMetadataManagerImpl implements 
KSMMetadataManager {
       if (latest == null) {
         return Collections.emptyList();
       }
-      List<String> item = latest.getLocationList().stream()
-          .map(KsmKeyLocationInfo::getBlockID)
+      List<BlockID> item = latest.getLocationList().stream()
+          .map(b->new BlockID(b.getContainerID(), b.getLocalID()))
           .collect(Collectors.toList());
       BlockGroup keyBlocks = BlockGroup.newBuilder()
           .setKeyName(DFSUtil.bytes2String(entry.getKey()))
@@ -510,9 +510,9 @@ public class KSMMetadataManagerImpl implements 
KSMMetadataManager {
         continue;
       }
       // Get block keys as a list.
-      List<String> item = info.getLatestVersionLocations()
+      List<BlockID> item = info.getLatestVersionLocations()
           .getBlocksLatestVersionOnly().stream()
-          .map(KsmKeyLocationInfo::getBlockID)
+          .map(b->new BlockID(b.getContainerID(), b.getLocalID()))
           .collect(Collectors.toList());
       BlockGroup keyBlocks = BlockGroup.newBuilder()
           .setKeyName(DFSUtil.bytes2String(entry.getKey()))

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java
index 14fb69c..e51ab28 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java
@@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.utils.BackgroundService;
 import org.apache.hadoop.utils.BackgroundTask;
@@ -117,7 +118,7 @@ public class KeyDeletingService extends BackgroundService {
               LOG.warn("Key {} deletion failed because some of the blocks"
                   + " were failed to delete, failed blocks: {}",
                   result.getObjectKey(),
-                  String.join(",", result.getFailedBlocks()));
+                  StringUtils.join(",", result.getFailedBlocks()));
             }
           }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManagerImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManagerImpl.java
index 70ba178..8ee7d25 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManagerImpl.java
@@ -203,8 +203,7 @@ public class KeyManagerImpl implements KeyManager {
       KsmKeyInfo keyInfo =
           KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(keyData));
       KsmKeyLocationInfo info = new KsmKeyLocationInfo.Builder()
-          .setContainerName(allocatedBlock.getPipeline().getContainerName())
-          .setBlockID(allocatedBlock.getKey())
+          .setBlockID(allocatedBlock.getBlockID())
           .setShouldCreateContainer(allocatedBlock.getCreateContainer())
           .setLength(scmBlockSize)
           .setOffset(0)
@@ -256,8 +255,7 @@ public class KeyManagerImpl implements KeyManager {
         AllocatedBlock allocatedBlock =
             scmBlockClient.allocateBlock(allocateSize, type, factor, ksmId);
         KsmKeyLocationInfo subKeyInfo = new KsmKeyLocationInfo.Builder()
-            .setContainerName(allocatedBlock.getPipeline().getContainerName())
-            .setBlockID(allocatedBlock.getKey())
+            .setBlockID(allocatedBlock.getBlockID())
             .setShouldCreateContainer(allocatedBlock.getCreateContainer())
             .setLength(allocateSize)
             .setOffset(0)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
index 76312e7..120eb06 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
@@ -715,7 +715,7 @@ public final class KeySpaceManager extends 
ServiceRuntimeInfoImpl
   }
 
   @Override
-  public KsmKeyLocationInfo  allocateBlock(KsmKeyArgs args, int clientID)
+  public KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID)
       throws IOException {
     try {
       metrics.incNumBlockAllocateCalls();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/OpenKeyCleanupService.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/OpenKeyCleanupService.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/OpenKeyCleanupService.java
index 7a2d7cc..8e2540a 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/OpenKeyCleanupService.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/OpenKeyCleanupService.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.ksm;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
 import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.utils.BackgroundService;
 import org.apache.hadoop.utils.BackgroundTask;
 import org.apache.hadoop.utils.BackgroundTaskQueue;
@@ -97,7 +98,7 @@ public class OpenKeyCleanupService extends BackgroundService {
               LOG.warn("Deleting open Key {} failed because some of the blocks"
                       + " were failed to delete, failed blocks: {}",
                   result.getObjectKey(),
-                  String.join(",", result.getFailedBlocks()));
+                  StringUtils.join(",", result.getFailedBlocks()));
             }
           }
           LOG.info("Found {} expired open key entries, successfully " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
index 70d80d5..e3f6cc9 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
@@ -60,7 +60,7 @@ public class BenchMarkContainerStateMap {
     for (int x = 1; x < 1000; x++) {
       try {
         ContainerInfo containerInfo = new ContainerInfo.Builder()
-            .setContainerName(pipeline.getContainerName()).setState(CLOSED)
+            .setState(CLOSED)
             .setPipeline(pipeline)
             // This is bytes allocated for blocks inside container, not the
             // container size
@@ -76,7 +76,7 @@ public class BenchMarkContainerStateMap {
     for (int y = currentCount; y < 2000; y++) {
       try {
         ContainerInfo containerInfo = new ContainerInfo.Builder()
-            .setContainerName(pipeline.getContainerName()).setState(OPEN)
+            .setState(OPEN)
             .setPipeline(pipeline)
             // This is bytes allocated for blocks inside container, not the
             // container size
@@ -91,7 +91,7 @@ public class BenchMarkContainerStateMap {
     }
     try {
       ContainerInfo containerInfo = new ContainerInfo.Builder()
-          .setContainerName(pipeline.getContainerName()).setState(OPEN)
+          .setState(OPEN)
           .setPipeline(pipeline)
           // This is bytes allocated for blocks inside container, not the
           // container size
@@ -142,7 +142,7 @@ public class BenchMarkContainerStateMap {
     for (; i.hasNext();) {
       pipelineChannel.addMember(i.next());
     }
-    return new Pipeline(containerName, pipelineChannel);
+    return new Pipeline(pipelineChannel);
   }
 
   @Benchmark
@@ -151,7 +151,7 @@ public class BenchMarkContainerStateMap {
     Pipeline pipeline = createSingleNodePipeline(UUID.randomUUID().toString());
     int cid = state.containerID.incrementAndGet();
     ContainerInfo containerInfo = new ContainerInfo.Builder()
-        .setContainerName(pipeline.getContainerName()).setState(CLOSED)
+        .setState(CLOSED)
         .setPipeline(pipeline)
         // This is bytes allocated for blocks inside container, not the
         // container size

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
index 468fee5..b73f108 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
@@ -21,7 +21,9 @@ import com.google.protobuf.ByteString;
 import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.container.common.impl.ChunkManagerImpl;
@@ -32,6 +34,7 @@ import 
org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
 
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel;
+import org.apache.hadoop.util.Time;
 import org.openjdk.jmh.annotations.Benchmark;
 import org.openjdk.jmh.annotations.Level;
 import org.openjdk.jmh.annotations.Scope;
@@ -41,6 +44,7 @@ import org.openjdk.jmh.annotations.TearDown;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Random;
@@ -75,6 +79,14 @@ public class BenchMarkDatanodeDispatcher {
   private AtomicInteger keyCount;
   private AtomicInteger chunkCount;
 
+  final int initContainers = 100;
+  final int initKeys = 50;
+  final int initChunks = 100;
+
+  List<Long> containers;
+  List<Long> keys;
+  List<String> chunks;
+
   @Setup(Level.Trial)
   public void initialize() throws IOException {
     datanodeUuid = UUID.randomUUID().toString();
@@ -110,20 +122,39 @@ public class BenchMarkDatanodeDispatcher {
     keyCount = new AtomicInteger();
     chunkCount = new AtomicInteger();
 
+    containers = new ArrayList<>();
+    keys = new ArrayList<>();
+    chunks = new ArrayList<>();
+
     // Create containers
-    for (int x = 0; x < 100; x++) {
-      String containerName = "container-" + containerCount.getAndIncrement();
-      dispatcher.dispatch(getCreateContainerCommand(containerName));
+    for (int x = 0; x < initContainers; x++) {
+      long containerID = Time.getUtcTime() + x;
+      ContainerCommandRequestProto req = 
getCreateContainerCommand(containerID);
+      dispatcher.dispatch(req);
+      containers.add(containerID);
+      containerCount.getAndIncrement();
     }
+
+    for (int x = 0; x < initKeys; x++) {
+      keys.add(Time.getUtcTime()+x);
+    }
+
+    for (int x = 0; x < initChunks; x++) {
+      chunks.add("chunk-" + x);
+    }
+
     // Add chunk and keys to the containers
-    for (int x = 0; x < 50; x++) {
-      String chunkName = "chunk-" + chunkCount.getAndIncrement();
-      String keyName = "key-" + keyCount.getAndIncrement();
-      for (int y = 0; y < 100; y++) {
-        String containerName = "container-" + y;
-        dispatcher.dispatch(getWriteChunkCommand(containerName, chunkName));
+    for (int x = 0; x < initKeys; x++) {
+      String chunkName = chunks.get(x);
+      chunkCount.getAndIncrement();
+      long key = keys.get(x);
+      keyCount.getAndIncrement();
+      for (int y = 0; y < initContainers; y++) {
+        long containerID = containers.get(y);
+        BlockID  blockID = new BlockID(containerID, key);
         dispatcher
-            .dispatch(getPutKeyCommand(containerName, chunkName, keyName));
+            .dispatch(getPutKeyCommand(blockID, chunkName));
+        dispatcher.dispatch(getWriteChunkCommand(blockID, chunkName));
       }
     }
   }
@@ -134,147 +165,166 @@ public class BenchMarkDatanodeDispatcher {
     FileUtils.deleteDirectory(new File(baseDir));
   }
 
-  private ContainerCommandRequestProto getCreateContainerCommand(
-      String containerName) {
+  private ContainerCommandRequestProto getCreateContainerCommand(long 
containerID) {
     CreateContainerRequestProto.Builder createRequest =
         CreateContainerRequestProto.newBuilder();
     createRequest.setPipeline(
-        new Pipeline(containerName, pipelineChannel).getProtobufMessage());
+        new Pipeline(pipelineChannel).getProtobufMessage());
     createRequest.setContainerData(
-        ContainerData.newBuilder().setName(containerName).build());
+        ContainerData.newBuilder().setContainerID(
+            containerID).build());
 
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.CreateContainer);
     request.setCreateContainer(createRequest);
     request.setDatanodeUuid(datanodeUuid);
-    request.setTraceID(containerName + "-trace");
+    request.setTraceID(containerID + "-trace");
     return request.build();
   }
 
   private ContainerCommandRequestProto getWriteChunkCommand(
-      String containerName, String key) {
-
+      BlockID blockID, String chunkName) {
     WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto
         .newBuilder()
-        .setPipeline(
-            new Pipeline(containerName, pipelineChannel).getProtobufMessage())
-        .setKeyName(key)
-        .setChunkData(getChunkInfo(containerName, key))
+        .setBlockID(blockID.getProtobuf())
+        .setChunkData(getChunkInfo(blockID, chunkName))
         .setData(data);
 
     ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
         .newBuilder();
     request.setCmdType(ContainerProtos.Type.WriteChunk)
-        .setTraceID(containerName + "-" + key +"-trace")
+        .setTraceID(getBlockTraceID(blockID))
         .setDatanodeUuid(datanodeUuid)
         .setWriteChunk(writeChunkRequest);
     return request.build();
   }
 
   private ContainerCommandRequestProto getReadChunkCommand(
-      String containerName, String key) {
+      BlockID blockID, String chunkName) {
     ReadChunkRequestProto.Builder readChunkRequest = ReadChunkRequestProto
         .newBuilder()
-        .setPipeline(
-            new Pipeline(containerName, pipelineChannel).getProtobufMessage())
-        .setKeyName(key)
-        .setChunkData(getChunkInfo(containerName, key));
+        .setBlockID(blockID.getProtobuf())
+        .setChunkData(getChunkInfo(blockID, chunkName));
     ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
         .newBuilder();
     request.setCmdType(ContainerProtos.Type.ReadChunk)
-        .setTraceID(containerName + "-" + key +"-trace")
+        .setTraceID(getBlockTraceID(blockID))
         .setDatanodeUuid(datanodeUuid)
         .setReadChunk(readChunkRequest);
     return request.build();
   }
 
   private ContainerProtos.ChunkInfo getChunkInfo(
-      String containerName, String key) {
+      BlockID blockID, String chunkName) {
     ContainerProtos.ChunkInfo.Builder builder =
         ContainerProtos.ChunkInfo.newBuilder()
             .setChunkName(
-                DigestUtils.md5Hex(key) + "_stream_" + containerName + 
"_chunk_"
-                    + key)
+                DigestUtils.md5Hex(chunkName)
+                    + "_stream_" + blockID.getContainerID() + "_block_"
+                    + blockID.getLocalID())
             .setOffset(0).setLen(data.size());
     return builder.build();
   }
 
   private ContainerCommandRequestProto getPutKeyCommand(
-      String containerName, String chunkKey, String key) {
+      BlockID blockID, String chunkKey) {
     PutKeyRequestProto.Builder putKeyRequest = PutKeyRequestProto
         .newBuilder()
-        .setPipeline(
-            new Pipeline(containerName, pipelineChannel).getProtobufMessage())
-        .setKeyData(getKeyData(containerName, chunkKey, key));
+        .setKeyData(getKeyData(blockID, chunkKey));
     ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
         .newBuilder();
     request.setCmdType(ContainerProtos.Type.PutKey)
-        .setTraceID(containerName + "-" + key +"-trace")
+        .setTraceID(getBlockTraceID(blockID))
         .setDatanodeUuid(datanodeUuid)
         .setPutKey(putKeyRequest);
     return request.build();
   }
 
   private ContainerCommandRequestProto getGetKeyCommand(
-      String containerName, String chunkKey, String key) {
+      BlockID blockID, String chunkKey) {
     GetKeyRequestProto.Builder readKeyRequest = GetKeyRequestProto.newBuilder()
-        .setPipeline(
-            new Pipeline(containerName, pipelineChannel).getProtobufMessage())
-        .setKeyData(getKeyData(containerName, chunkKey, key));
+        .setKeyData(getKeyData(blockID, chunkKey));
     ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
         .newBuilder()
         .setCmdType(ContainerProtos.Type.GetKey)
-        .setTraceID(containerName + "-" + key +"-trace")
+        .setTraceID(getBlockTraceID(blockID))
         .setDatanodeUuid(datanodeUuid)
         .setGetKey(readKeyRequest);
     return request.build();
   }
 
   private ContainerProtos.KeyData getKeyData(
-      String containerName, String chunkKey, String key) {
+      BlockID blockID, String chunkKey) {
     ContainerProtos.KeyData.Builder builder =  ContainerProtos.KeyData
         .newBuilder()
-        .setContainerName(containerName)
-        .setName(key)
-        .addChunks(getChunkInfo(containerName, chunkKey));
+        .setBlockID(blockID.getProtobuf())
+        .addChunks(getChunkInfo(blockID, chunkKey));
     return builder.build();
   }
 
   @Benchmark
   public void createContainer(BenchMarkDatanodeDispatcher bmdd) {
-    bmdd.dispatcher.dispatch(getCreateContainerCommand(
-        "container-" + containerCount.getAndIncrement()));
+    long containerID = RandomUtils.nextLong();
+    ContainerCommandRequestProto req = getCreateContainerCommand(containerID);
+    bmdd.dispatcher.dispatch(req);
+    bmdd.containers.add(containerID);
+    bmdd.containerCount.getAndIncrement();
   }
 
 
   @Benchmark
   public void writeChunk(BenchMarkDatanodeDispatcher bmdd) {
-    String containerName = "container-" + random.nextInt(containerCount.get());
     bmdd.dispatcher.dispatch(getWriteChunkCommand(
-        containerName, "chunk-" + chunkCount.getAndIncrement()));
+        getRandomBlockID(), getNewChunkToWrite()));
   }
 
   @Benchmark
   public void readChunk(BenchMarkDatanodeDispatcher bmdd) {
-    String containerName = "container-" + random.nextInt(containerCount.get());
-    String chunkKey = "chunk-" + random.nextInt(chunkCount.get());
-    bmdd.dispatcher.dispatch(getReadChunkCommand(containerName, chunkKey));
+    BlockID blockID = getRandomBlockID();
+    String chunkKey = getRandomChunkToRead();
+    bmdd.dispatcher.dispatch(getReadChunkCommand(blockID, chunkKey));
   }
 
   @Benchmark
   public void putKey(BenchMarkDatanodeDispatcher bmdd) {
-    String containerName = "container-" + random.nextInt(containerCount.get());
-    String chunkKey = "chunk-" + random.nextInt(chunkCount.get());
-    bmdd.dispatcher.dispatch(getPutKeyCommand(
-        containerName, chunkKey, "key-" + keyCount.getAndIncrement()));
+    BlockID blockID = getRandomBlockID();
+    String chunkKey = getNewChunkToWrite();
+    bmdd.dispatcher.dispatch(getPutKeyCommand(blockID, chunkKey));
   }
 
   @Benchmark
   public void getKey(BenchMarkDatanodeDispatcher bmdd) {
-    String containerName = "container-" + random.nextInt(containerCount.get());
-    String chunkKey = "chunk-" + random.nextInt(chunkCount.get());
-    String key = "key-" + random.nextInt(keyCount.get());
-    bmdd.dispatcher.dispatch(getGetKeyCommand(containerName, chunkKey, key));
+    BlockID blockID = getRandomBlockID();
+    String chunkKey = getNewChunkToWrite();
+    bmdd.dispatcher.dispatch(getGetKeyCommand(blockID, chunkKey));
+  }
+
+  // Chunks writes from benchmark only reaches certain containers
+  // Use initChunks instead of updated counters to guarantee
+  // key/chunks are readable.
+
+  private BlockID getRandomBlockID() {
+    return new BlockID(getRandomContainerID(), getRandomKeyID());
+  }
+
+  private long getRandomContainerID() {
+    return containers.get(random.nextInt(initContainers));
+  }
+
+  private long getRandomKeyID() {
+    return keys.get(random.nextInt(initKeys));
+  }
+
+  private String getRandomChunkToRead() {
+    return chunks.get(random.nextInt(initChunks));
+  }
+
+  private String getNewChunkToWrite() {
+    return "chunk-" + chunkCount.getAndIncrement();
+  }
+
+  private String getBlockTraceID(BlockID blockID) {
+    return blockID.getContainerID() + "-" + blockID.getLocalID() +"-trace";
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkRocksDbStore.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkRocksDbStore.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkRocksDbStore.java
index 0890e4b..c4c6f9e 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkRocksDbStore.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkRocksDbStore.java
@@ -72,21 +72,21 @@ public class BenchMarkRocksDbStore {
         .toFile();
     opts.setCreateIfMissing(true);
     opts.setWriteBufferSize(
-        (long) StorageUnit.MB.toBytes(Long.valueOf(writeBufferSize)));
-    opts.setMaxWriteBufferNumber(Integer.valueOf(maxWriteBufferNumber));
-    opts.setMaxBackgroundFlushes(Integer.valueOf(maxBackgroundFlushes));
+        (long) StorageUnit.MB.toBytes(Long.parseLong(writeBufferSize)));
+    opts.setMaxWriteBufferNumber(Integer.parseInt(maxWriteBufferNumber));
+    opts.setMaxBackgroundFlushes(Integer.parseInt(maxBackgroundFlushes));
     BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
     tableConfig.setBlockSize(
-        (long) StorageUnit.KB.toBytes(Long.valueOf(blockSize)));
-    opts.setMaxOpenFiles(Integer.valueOf(maxOpenFiles));
+        (long) StorageUnit.KB.toBytes(Long.parseLong(blockSize)));
+    opts.setMaxOpenFiles(Integer.parseInt(maxOpenFiles));
     opts.setMaxBytesForLevelBase(
-        (long) StorageUnit.MB.toBytes(Long.valueOf(maxBytesForLevelBase)));
+        (long) StorageUnit.MB.toBytes(Long.parseLong(maxBytesForLevelBase)));
     opts.setCompactionStyle(CompactionStyle.UNIVERSAL);
     opts.setLevel0FileNumCompactionTrigger(10);
     opts.setLevel0SlowdownWritesTrigger(20);
     opts.setLevel0StopWritesTrigger(40);
     opts.setTargetFileSizeBase(
-        (long) StorageUnit.MB.toBytes(Long.valueOf(maxBytesForLevelBase)) / 
10);
+        (long) StorageUnit.MB.toBytes(Long.parseLong(maxBytesForLevelBase)) / 
10);
     opts.setMaxBackgroundCompactions(8);
     opts.setUseFsync(false);
     opts.setBytesPerSync(8388608);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to