http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/TestContainerMapping.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/TestContainerMapping.java
 
b/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/TestContainerMapping.java
index 4616799..cec02de 100644
--- 
a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/TestContainerMapping.java
+++ 
b/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/TestContainerMapping.java
@@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.scm.container;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
 import org.apache.hadoop.hdsl.protocol.proto
@@ -34,7 +34,6 @@ import 
org.apache.hadoop.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.test.GenericTestUtils;
 
-import static org.apache.hadoop.ozone.scm.TestUtils.getDatanodeID;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
@@ -118,7 +117,7 @@ public class TestContainerMapping {
     5 separate nodes  from the list of 10 datanodes that got allocated a
     container.
      */
-    Set<String> pipelineList = new TreeSet<>();
+    Set<UUID> pipelineList = new TreeSet<>();
     for (int x = 0; x < 30; x++) {
       ContainerInfo containerInfo = mapping.allocateContainer(
           xceiverClientManager.getType(),
@@ -128,7 +127,7 @@ public class TestContainerMapping {
       Assert.assertNotNull(containerInfo);
       Assert.assertNotNull(containerInfo.getPipeline());
       pipelineList.add(containerInfo.getPipeline().getLeader()
-          .getDatanodeUuid());
+          .getUuid());
     }
     Assert.assertTrue(pipelineList.size() > 5);
   }
@@ -142,8 +141,8 @@ public class TestContainerMapping {
         containerOwner).getPipeline();
     Assert.assertNotNull(pipeline);
     Pipeline newPipeline = mapping.getContainer(containerName).getPipeline();
-    Assert.assertEquals(pipeline.getLeader().getDatanodeUuid(),
-        newPipeline.getLeader().getDatanodeUuid());
+    Assert.assertEquals(pipeline.getLeader().getUuid(),
+        newPipeline.getLeader().getUuid());
   }
 
   @Test
@@ -209,7 +208,7 @@ public class TestContainerMapping {
   public void testFullContainerReport() throws IOException {
     String containerName = UUID.randomUUID().toString();
     ContainerInfo info = createContainer(containerName);
-    DatanodeID datanodeID = getDatanodeID();
+    DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
     ContainerReportsRequestProto.reportType reportType =
         ContainerReportsRequestProto.reportType.fullReport;
     List<StorageContainerDatanodeProtocolProtos.ContainerInfo> reports =
@@ -232,7 +231,7 @@ public class TestContainerMapping {
 
     ContainerReportsRequestProto.Builder crBuilder =
         ContainerReportsRequestProto.newBuilder();
-    crBuilder.setDatanodeID(datanodeID.getProtoBufMessage())
+    crBuilder.setDatanodeDetails(datanodeDetails.getProtoBufMessage())
         .setType(reportType).addAllReports(reports);
 
     mapping.processContainerReports(crBuilder.build());
@@ -246,7 +245,7 @@ public class TestContainerMapping {
   public void testContainerCloseWithContainerReport() throws IOException {
     String containerName = UUID.randomUUID().toString();
     ContainerInfo info = createContainer(containerName);
-    DatanodeID datanodeID = TestUtils.getDatanodeID();
+    DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
     ContainerReportsRequestProto.reportType reportType =
         ContainerReportsRequestProto.reportType.fullReport;
     List<StorageContainerDatanodeProtocolProtos.ContainerInfo> reports =
@@ -270,7 +269,7 @@ public class TestContainerMapping {
 
     ContainerReportsRequestProto.Builder crBuilder =
         ContainerReportsRequestProto.newBuilder();
-    crBuilder.setDatanodeID(datanodeID.getProtoBufMessage())
+    crBuilder.setDatanodeDetails(datanodeDetails.getProtoBufMessage())
         .setType(reportType).addAllReports(reports);
 
     mapping.processContainerReports(crBuilder.build());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/closer/TestContainerCloser.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/closer/TestContainerCloser.java
 
b/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/closer/TestContainerCloser.java
index dca026e..4a797b2 100644
--- 
a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/closer/TestContainerCloser.java
+++ 
b/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/closer/TestContainerCloser.java
@@ -21,13 +21,13 @@ package org.apache.hadoop.ozone.scm.container.closer;
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
 import 
org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import 
org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+import org.apache.hadoop.ozone.scm.TestUtils;
 import org.apache.hadoop.ozone.scm.container.ContainerMapping;
 import org.apache.hadoop.ozone.scm.container.MockNodeManager;
 import org.apache.hadoop.ozone.scm.container.TestContainerMapping;
@@ -97,7 +97,7 @@ public class TestContainerCloser {
     long currentCount = mapping.getCloser().getCloseCount();
     long runCount = mapping.getCloser().getThreadRunCount();
 
-    DatanodeID datanodeID = info.getPipeline().getLeader();
+    DatanodeDetails datanode = info.getPipeline().getLeader();
     // Send a container report with used set to 1 GB. This should not close.
     sendContainerReport(info, 1 * GIGABYTE);
 
@@ -108,7 +108,7 @@ public class TestContainerCloser {
     Assert.assertEquals(0, mapping.getCloser().getCloseCount());
 
     // Assert that the Close command was not queued for this Datanode.
-    Assert.assertEquals(0, nodeManager.getCommandCount(datanodeID));
+    Assert.assertEquals(0, nodeManager.getCommandCount(datanode));
 
     long newUsed = (long) (size * 0.91f);
     sendContainerReport(info, newUsed);
@@ -121,7 +121,7 @@ public class TestContainerCloser {
         mapping.getCloser().getCloseCount() - currentCount);
 
     // Assert that the Close command was Queued for this Datanode.
-    Assert.assertEquals(1, nodeManager.getCommandCount(datanodeID));
+    Assert.assertEquals(1, nodeManager.getCommandCount(datanode));
   }
 
   @Test
@@ -146,7 +146,7 @@ public class TestContainerCloser {
     long runCount = mapping.getCloser().getThreadRunCount();
 
 
-    DatanodeID datanodeID = info.getPipeline().getLeader();
+    DatanodeDetails datanodeDetails = info.getPipeline().getLeader();
 
     // Send this command twice and assert we have only one command in the 
queue.
     sendContainerReport(info, 5 * GIGABYTE);
@@ -154,7 +154,7 @@ public class TestContainerCloser {
 
     // Assert that the Close command was Queued for this Datanode.
     Assert.assertEquals(1,
-        nodeManager.getCommandCount(datanodeID));
+        nodeManager.getCommandCount(datanodeDetails));
     // And close count will be one.
     Assert.assertEquals(1,
         mapping.getCloser().getCloseCount() - currentCount);
@@ -163,7 +163,7 @@ public class TestContainerCloser {
     //send another close and the system will queue this to the command queue.
     sendContainerReport(info, 5 * GIGABYTE);
     Assert.assertEquals(2,
-        nodeManager.getCommandCount(datanodeID));
+        nodeManager.getCommandCount(datanodeDetails));
     // but the close count will still be one, since from the point of view of
     // closer we are closing only one container even if we have send multiple
     // close commands to the datanode.
@@ -213,8 +213,8 @@ public class TestContainerCloser {
         .setReadBytes(2000000000L)
         .setWriteBytes(2000000000L)
         .setContainerID(1L);
-    reports.setDatanodeID(
-        DFSTestUtil.getLocalDatanodeID().getProtoBufMessage());
+    reports.setDatanodeDetails(
+        TestUtils.getDatanodeDetails().getProtoBufMessage());
     reports.addReports(ciBuilder);
     mapping.processContainerReports(reports.build());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestContainerPlacement.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestContainerPlacement.java
 
b/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestContainerPlacement.java
index 1bf623a..b0f47f5 100644
--- 
a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestContainerPlacement.java
+++ 
b/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestContainerPlacement.java
@@ -21,11 +21,10 @@ package org.apache.hadoop.ozone.scm.node;
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.hadoop.hdsl.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ReportState;
 import org.apache.hadoop.hdsl.protocol.proto
@@ -132,16 +131,16 @@ public class TestContainerPlacement {
     SCMNodeManager nodeManager = createNodeManager(conf);
     ContainerMapping containerManager =
         createContainerManager(conf, nodeManager);
-    List<DatanodeID> datanodes =
-        TestUtils.getRegisteredDatanodeIDs(nodeManager, nodeCount);
+    List<DatanodeDetails> datanodes =
+        TestUtils.getListOfRegisteredDatanodeDetails(nodeManager, nodeCount);
     try {
-      for (DatanodeID datanodeID : datanodes) {
+      for (DatanodeDetails datanodeDetails : datanodes) {
         SCMNodeReport.Builder nrb = SCMNodeReport.newBuilder();
         SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
         srb.setStorageUuid(UUID.randomUUID().toString());
         srb.setCapacity(capacity).setScmUsed(used).
             setRemaining(remaining).build();
-        nodeManager.sendHeartbeat(datanodeID,
+        nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
             nrb.addStorageReport(srb).build(), reportState);
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java
 
b/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java
index 2ae6ea6..6c821d3 100644
--- 
a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java
+++ 
b/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java
@@ -20,10 +20,9 @@ package org.apache.hadoop.ozone.scm.node;
 import com.google.common.base.Supplier;
 import static java.util.concurrent.TimeUnit.*;
 import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
 import org.apache.hadoop.hdsl.protocol.proto
@@ -38,7 +37,6 @@ import org.apache.hadoop.scm.ScmConfigKeys;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 
-import static org.apache.hadoop.ozone.scm.TestUtils.getDatanodeID;
 import org.hamcrest.CoreMatchers;
 import org.junit.Assert;
 import org.junit.After;
@@ -69,7 +67,8 @@ import static 
org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL;
 import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL;
 import static org.apache.hadoop.scm.ScmConfigKeys
     .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
-import static 
org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_MAX_HB_COUNT_TO_PROCESS;
+import static org.apache.hadoop.scm.ScmConfigKeys
+    .OZONE_SCM_MAX_HB_COUNT_TO_PROCESS;
 import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.MatcherAssert.assertThat;
@@ -153,8 +152,10 @@ public class TestNodeManager {
     try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
       // Send some heartbeats from different nodes.
       for (int x = 0; x < nodeManager.getMinimumChillModeNodes(); x++) {
-        DatanodeID datanodeID = getDatanodeID(nodeManager);
-        nodeManager.sendHeartbeat(datanodeID, null, reportState);
+        DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
+            nodeManager);
+        nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
+            null, reportState);
       }
 
       // Wait for 4 seconds max.
@@ -200,7 +201,8 @@ public class TestNodeManager {
 
       // Need 100 nodes to come out of chill mode, only one node is sending HB.
       nodeManager.setMinimumChillModeNodes(100);
-      nodeManager.sendHeartbeat(TestUtils.getDatanodeID(nodeManager),
+      nodeManager.sendHeartbeat(TestUtils.getDatanodeDetails(nodeManager)
+          .getProtoBufMessage(),
           null, reportState);
       GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
           100, 4 * 1000);
@@ -223,11 +225,13 @@ public class TestNodeManager {
 
     try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
       nodeManager.setMinimumChillModeNodes(3);
-      DatanodeID datanodeID = TestUtils.getDatanodeID(nodeManager);
+      DatanodeDetails datanodeDetails = TestUtils
+          .getDatanodeDetails(nodeManager);
 
       // Send 10 heartbeat from same node, and assert we never leave chill 
mode.
       for (int x = 0; x < 10; x++) {
-        nodeManager.sendHeartbeat(datanodeID, null, reportState);
+        nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
+            null, reportState);
       }
 
       GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
@@ -253,11 +257,12 @@ public class TestNodeManager {
     conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
         100, TimeUnit.MILLISECONDS);
     SCMNodeManager nodeManager = createNodeManager(conf);
-    DatanodeID datanodeID = TestUtils.getDatanodeID(nodeManager);
+    DatanodeDetails datanodeDetails = 
TestUtils.getDatanodeDetails(nodeManager);
     nodeManager.close();
 
     // These should never be processed.
-    nodeManager.sendHeartbeat(datanodeID, null, reportState);
+    nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
+        null, reportState);
 
     // Let us just wait for 2 seconds to prove that HBs are not processed.
     Thread.sleep(2 * 1000);
@@ -277,12 +282,13 @@ public class TestNodeManager {
     OzoneConfiguration conf = getConf();
     conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
         100, TimeUnit.MILLISECONDS);
-    DatanodeID datanodeID = TestUtils.getDatanodeID();
+    DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
     try (SCMNodeManager nodemanager = createNodeManager(conf)) {
-      nodemanager.register(datanodeID);
-      List<SCMCommand> command = nodemanager.sendHeartbeat(datanodeID,
+      nodemanager.register(datanodeDetails.getProtoBufMessage());
+      List<SCMCommand> command = nodemanager.sendHeartbeat(
+          datanodeDetails.getProtoBufMessage(),
           null, reportState);
-      Assert.assertTrue(nodemanager.getAllNodes().contains(datanodeID));
+      Assert.assertTrue(nodemanager.getAllNodes().contains(datanodeDetails));
       Assert.assertTrue("On regular HB calls, SCM responses a "
           + "datanode with an empty command list", command.isEmpty());
     }
@@ -291,7 +297,7 @@ public class TestNodeManager {
     // This happens when SCM restarts.
     try (SCMNodeManager nodemanager = createNodeManager(conf)) {
       Assert.assertFalse(nodemanager
-          .getAllNodes().contains(datanodeID));
+          .getAllNodes().contains(datanodeDetails));
       try {
         // SCM handles heartbeat asynchronously.
         // It may need more than one heartbeat processing to
@@ -299,8 +305,8 @@ public class TestNodeManager {
         GenericTestUtils.waitFor(new Supplier<Boolean>() {
           @Override public Boolean get() {
             List<SCMCommand> command =
-                nodemanager.sendHeartbeat(datanodeID, null,
-                    reportState);
+                nodemanager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
+                    null, reportState);
             return command.size() == 1 && command.get(0).getType()
                 .equals(SCMCmdType.reregisterCommand);
           }
@@ -329,8 +335,10 @@ public class TestNodeManager {
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
 
       for (int x = 0; x < count; x++) {
-        DatanodeID datanodeID = TestUtils.getDatanodeID(nodeManager);
-        nodeManager.sendHeartbeat(datanodeID, null, reportState);
+        DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
+            nodeManager);
+        nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
+            null, reportState);
       }
       GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
           100, 4 * 1000);
@@ -415,41 +423,42 @@ public class TestNodeManager {
 
 
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-      List<DatanodeID> nodeList = createNodeSet(nodeManager, nodeCount,
+      List<DatanodeDetails> nodeList = createNodeSet(nodeManager, nodeCount,
           "Node");
 
-      DatanodeID staleNode = TestUtils.getDatanodeID(nodeManager);
+      DatanodeDetails staleNode = TestUtils.getDatanodeDetails(nodeManager);
 
       // Heartbeat once
-      nodeManager.sendHeartbeat(staleNode, null, reportState);
+      nodeManager.sendHeartbeat(staleNode.getProtoBufMessage(),
+          null, reportState);
 
       // Heartbeat all other nodes.
-      for (DatanodeID dn : nodeList) {
-        nodeManager.sendHeartbeat(dn, null, reportState);
+      for (DatanodeDetails dn : nodeList) {
+        nodeManager.sendHeartbeat(dn.getProtoBufMessage(), null, reportState);
       }
 
       // Wait for 2 seconds .. and heartbeat good nodes again.
       Thread.sleep(2 * 1000);
 
-      for (DatanodeID dn : nodeList) {
-        nodeManager.sendHeartbeat(dn, null, reportState);
+      for (DatanodeDetails dn : nodeList) {
+        nodeManager.sendHeartbeat(dn.getProtoBufMessage(), null, reportState);
       }
 
       // Wait for 2 seconds, wait a total of 4 seconds to make sure that the
       // node moves into stale state.
       Thread.sleep(2 * 1000);
-      List<DatanodeID> staleNodeList = nodeManager.getNodes(STALE);
+      List<DatanodeDetails> staleNodeList = nodeManager.getNodes(STALE);
       assertEquals("Expected to find 1 stale node",
           1, nodeManager.getNodeCount(STALE));
       assertEquals("Expected to find 1 stale node",
           1, staleNodeList.size());
       assertEquals("Stale node is not the expected ID", staleNode
-          .getDatanodeUuid(), staleNodeList.get(0).getDatanodeUuid());
+          .getUuid(), staleNodeList.get(0).getUuid());
       Thread.sleep(1000);
 
       // heartbeat good nodes again.
-      for (DatanodeID dn : nodeList) {
-        nodeManager.sendHeartbeat(dn, null, reportState);
+      for (DatanodeDetails dn : nodeList) {
+        nodeManager.sendHeartbeat(dn.getProtoBufMessage(), null, reportState);
       }
 
       //  6 seconds is the dead window for this test , so we wait a total of
@@ -464,13 +473,13 @@ public class TestNodeManager {
           0, staleNodeList.size());
 
       // Check for the dead node now.
-      List<DatanodeID> deadNodeList = nodeManager.getNodes(DEAD);
+      List<DatanodeDetails> deadNodeList = nodeManager.getNodes(DEAD);
       assertEquals("Expected to find 1 dead node", 1,
           nodeManager.getNodeCount(DEAD));
       assertEquals("Expected to find 1 dead node",
           1, deadNodeList.size());
       assertEquals("Dead node is not the expected ID", staleNode
-          .getDatanodeUuid(), deadNodeList.get(0).getDatanodeUuid());
+          .getUuid(), deadNodeList.get(0).getUuid());
     }
   }
 
@@ -558,15 +567,18 @@ public class TestNodeManager {
      * Cluster state: Healthy: All nodes are heartbeat-ing like normal.
      */
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-      DatanodeID healthyNode =
-          TestUtils.getDatanodeID(nodeManager, "HealthyNode");
-      DatanodeID staleNode =
-          TestUtils.getDatanodeID(nodeManager, "StaleNode");
-      DatanodeID deadNode =
-          TestUtils.getDatanodeID(nodeManager, "DeadNode");
-      nodeManager.sendHeartbeat(healthyNode, null, reportState);
-      nodeManager.sendHeartbeat(staleNode, null, reportState);
-      nodeManager.sendHeartbeat(deadNode, null, reportState);
+      DatanodeDetails healthyNode =
+          TestUtils.getDatanodeDetails(nodeManager, "HealthyNode");
+      DatanodeDetails staleNode =
+          TestUtils.getDatanodeDetails(nodeManager, "StaleNode");
+      DatanodeDetails deadNode =
+          TestUtils.getDatanodeDetails(nodeManager, "DeadNode");
+      nodeManager.sendHeartbeat(
+          healthyNode.getProtoBufMessage(), null, reportState);
+      nodeManager.sendHeartbeat(
+          staleNode.getProtoBufMessage(), null, reportState);
+      nodeManager.sendHeartbeat(
+          deadNode.getProtoBufMessage(), null, reportState);
 
       // Sleep so that heartbeat processing thread gets to run.
       Thread.sleep(500);
@@ -592,12 +604,16 @@ public class TestNodeManager {
        * the 3 second windows.
        */
 
-      nodeManager.sendHeartbeat(healthyNode, null, reportState);
-      nodeManager.sendHeartbeat(staleNode, null, reportState);
-      nodeManager.sendHeartbeat(deadNode, null, reportState);
+      nodeManager.sendHeartbeat(
+          healthyNode.getProtoBufMessage(), null, reportState);
+      nodeManager.sendHeartbeat(
+          staleNode.getProtoBufMessage(), null, reportState);
+      nodeManager.sendHeartbeat(
+          deadNode.getProtoBufMessage(), null, reportState);
 
       Thread.sleep(1500);
-      nodeManager.sendHeartbeat(healthyNode, null, reportState);
+      nodeManager.sendHeartbeat(
+          healthyNode.getProtoBufMessage(), null, reportState);
       Thread.sleep(2 * 1000);
       assertEquals(1, nodeManager.getNodeCount(HEALTHY));
 
@@ -605,10 +621,10 @@ public class TestNodeManager {
       // 3.5 seconds from last heartbeat for the stale and deadNode. So those
       //  2 nodes must move to Stale state and the healthy node must
       // remain in the healthy State.
-      List<DatanodeID> healthyList = nodeManager.getNodes(HEALTHY);
+      List<DatanodeDetails> healthyList = nodeManager.getNodes(HEALTHY);
       assertEquals("Expected one healthy node", 1, healthyList.size());
       assertEquals("Healthy node is not the expected ID", healthyNode
-          .getDatanodeUuid(), healthyList.get(0).getDatanodeUuid());
+          .getUuid(), healthyList.get(0).getUuid());
 
       assertEquals(2, nodeManager.getNodeCount(STALE));
 
@@ -617,18 +633,21 @@ public class TestNodeManager {
        * staleNode to move to stale state and deadNode to move to dead state.
        */
 
-      nodeManager.sendHeartbeat(healthyNode, null, reportState);
-      nodeManager.sendHeartbeat(staleNode, null, reportState);
+      nodeManager.sendHeartbeat(
+          healthyNode.getProtoBufMessage(), null, reportState);
+      nodeManager.sendHeartbeat(
+          staleNode.getProtoBufMessage(), null, reportState);
       Thread.sleep(1500);
-      nodeManager.sendHeartbeat(healthyNode, null, reportState);
+      nodeManager.sendHeartbeat(
+          healthyNode.getProtoBufMessage(), null, reportState);
       Thread.sleep(2 * 1000);
 
       // 3.5 seconds have elapsed for stale node, so it moves into Stale.
       // 7 seconds have elapsed for dead node, so it moves into dead.
       // 2 Seconds have elapsed for healthy node, so it stays in healhty state.
       healthyList = nodeManager.getNodes(HEALTHY);
-      List<DatanodeID> staleList = nodeManager.getNodes(STALE);
-      List<DatanodeID> deadList = nodeManager.getNodes(DEAD);
+      List<DatanodeDetails> staleList = nodeManager.getNodes(STALE);
+      List<DatanodeDetails> deadList = nodeManager.getNodes(DEAD);
 
       assertEquals(3, nodeManager.getAllNodes().size());
       assertEquals(1, nodeManager.getNodeCount(HEALTHY));
@@ -638,24 +657,27 @@ public class TestNodeManager {
       assertEquals("Expected one healthy node",
           1, healthyList.size());
       assertEquals("Healthy node is not the expected ID", healthyNode
-          .getDatanodeUuid(), healthyList.get(0).getDatanodeUuid());
+          .getUuid(), healthyList.get(0).getUuid());
 
       assertEquals("Expected one stale node",
           1, staleList.size());
       assertEquals("Stale node is not the expected ID", staleNode
-          .getDatanodeUuid(), staleList.get(0).getDatanodeUuid());
+          .getUuid(), staleList.get(0).getUuid());
 
       assertEquals("Expected one dead node",
           1, deadList.size());
       assertEquals("Dead node is not the expected ID", deadNode
-          .getDatanodeUuid(), deadList.get(0).getDatanodeUuid());
+          .getUuid(), deadList.get(0).getUuid());
       /**
        * Cluster State : let us heartbeat all the nodes and verify that we get
        * back all the nodes in healthy state.
        */
-      nodeManager.sendHeartbeat(healthyNode, null, reportState);
-      nodeManager.sendHeartbeat(staleNode, null, reportState);
-      nodeManager.sendHeartbeat(deadNode, null, reportState);
+      nodeManager.sendHeartbeat(
+          healthyNode.getProtoBufMessage(), null, reportState);
+      nodeManager.sendHeartbeat(
+          staleNode.getProtoBufMessage(), null, reportState);
+      nodeManager.sendHeartbeat(
+          deadNode.getProtoBufMessage(), null, reportState);
       Thread.sleep(500);
       //Assert all nodes are healthy.
       assertEquals(3, nodeManager.getAllNodes().size());
@@ -671,11 +693,12 @@ public class TestNodeManager {
    * @param sleepDuration - Duration to sleep between heartbeats.
    * @throws InterruptedException
    */
-  private void heartbeatNodeSet(SCMNodeManager manager, List<DatanodeID> list,
-      int sleepDuration) throws InterruptedException {
+  private void heartbeatNodeSet(SCMNodeManager manager,
+                                List<DatanodeDetails> list,
+                                int sleepDuration) throws InterruptedException 
{
     while (!Thread.currentThread().isInterrupted()) {
-      for (DatanodeID dn : list) {
-        manager.sendHeartbeat(dn, null, reportState);
+      for (DatanodeDetails dn : list) {
+        manager.sendHeartbeat(dn.getProtoBufMessage(), null, reportState);
       }
       Thread.sleep(sleepDuration);
     }
@@ -688,12 +711,12 @@ public class TestNodeManager {
    * @param prefix - A prefix string that can be used in verification.
    * @return List of Nodes.
    */
-  private List<DatanodeID> createNodeSet(SCMNodeManager nodeManager, int
+  private List<DatanodeDetails> createNodeSet(SCMNodeManager nodeManager, int
       count, String
       prefix) {
-    List<DatanodeID> list = new LinkedList<>();
+    List<DatanodeDetails> list = new LinkedList<>();
     for (int x = 0; x < count; x++) {
-      list.add(TestUtils.getDatanodeID(nodeManager, prefix + x));
+      list.add(TestUtils.getDatanodeDetails(nodeManager, prefix + x));
     }
     return list;
   }
@@ -734,11 +757,11 @@ public class TestNodeManager {
 
 
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-      List<DatanodeID> healthyNodeList = createNodeSet(nodeManager,
+      List<DatanodeDetails> healthyNodeList = createNodeSet(nodeManager,
           healthyCount, "Healthy");
-      List<DatanodeID> staleNodeList = createNodeSet(nodeManager, staleCount,
-          "Stale");
-      List<DatanodeID> deadNodeList = createNodeSet(nodeManager, deadCount,
+      List<DatanodeDetails> staleNodeList = createNodeSet(nodeManager,
+          staleCount, "Stale");
+      List<DatanodeDetails> deadNodeList = createNodeSet(nodeManager, 
deadCount,
           "Dead");
 
       Runnable healthyNodeTask = () -> {
@@ -761,8 +784,8 @@ public class TestNodeManager {
 
       // No Thread just one time HBs the node manager, so that these will be
       // marked as dead nodes eventually.
-      for (DatanodeID dn : deadNodeList) {
-        nodeManager.sendHeartbeat(dn, null, reportState);
+      for (DatanodeDetails dn : deadNodeList) {
+        nodeManager.sendHeartbeat(dn.getProtoBufMessage(), null, reportState);
       }
 
 
@@ -784,9 +807,9 @@ public class TestNodeManager {
 
       assertEquals(deadCount, nodeManager.getNodeCount(DEAD));
 
-      List<DatanodeID> deadList = nodeManager.getNodes(DEAD);
+      List<DatanodeDetails> deadList = nodeManager.getNodes(DEAD);
 
-      for (DatanodeID node : deadList) {
+      for (DatanodeDetails node : deadList) {
         assertThat(node.getHostName(), CoreMatchers.startsWith("Dead"));
       }
 
@@ -825,9 +848,10 @@ public class TestNodeManager {
         MILLISECONDS);
 
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-      List<DatanodeID> healthyList = createNodeSet(nodeManager,
+      List<DatanodeDetails> healthyList = createNodeSet(nodeManager,
           healthyCount, "h");
-      List<DatanodeID> staleList = createNodeSet(nodeManager, staleCount, "s");
+      List<DatanodeDetails> staleList = createNodeSet(nodeManager,
+          staleCount, "s");
 
       Runnable healthyNodeTask = () -> {
         try {
@@ -886,8 +910,8 @@ public class TestNodeManager {
     conf.setInt(OZONE_SCM_MAX_HB_COUNT_TO_PROCESS, 500);
 
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-      List<DatanodeID> healthyList = createNodeSet(nodeManager, healthyCount,
-          "h");
+      List<DatanodeDetails> healthyList = createNodeSet(nodeManager,
+          healthyCount, "h");
       GenericTestUtils.LogCapturer logCapturer =
           GenericTestUtils.LogCapturer.captureLogs(SCMNodeManager.LOG);
       Runnable healthyNodeTask = () -> {
@@ -921,8 +945,10 @@ public class TestNodeManager {
 
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
       nodeManager.setMinimumChillModeNodes(10);
-      DatanodeID datanodeID = TestUtils.getDatanodeID(nodeManager);
-      nodeManager.sendHeartbeat(datanodeID, null, reportState);
+      DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
+          nodeManager);
+      nodeManager.sendHeartbeat(
+          datanodeDetails.getProtoBufMessage(), null, reportState);
       String status = nodeManager.getChillModeStatus();
       Assert.assertThat(status, containsString("Still in chill " +
           "mode, waiting on nodes to report in."));
@@ -948,8 +974,9 @@ public class TestNodeManager {
 
       // Assert that node manager force enter cannot be overridden by nodes 
HBs.
       for (int x = 0; x < 20; x++) {
-        DatanodeID datanode = TestUtils.getDatanodeID(nodeManager);
-        nodeManager.sendHeartbeat(datanode, null, reportState);
+        DatanodeDetails datanode = TestUtils.getDatanodeDetails(nodeManager);
+        nodeManager.sendHeartbeat(datanode.getProtoBufMessage(),
+            null, reportState);
       }
 
       Thread.sleep(500);
@@ -985,14 +1012,15 @@ public class TestNodeManager {
 
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
       for (int x = 0; x < nodeCount; x++) {
-        DatanodeID datanodeID = TestUtils.getDatanodeID(nodeManager);
+        DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
+            nodeManager);
 
         SCMNodeReport.Builder nrb = SCMNodeReport.newBuilder();
         SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
         srb.setStorageUuid(UUID.randomUUID().toString());
         srb.setCapacity(capacity).setScmUsed(used).
             setRemaining(capacity - used).build();
-        nodeManager.sendHeartbeat(datanodeID,
+        nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
             nrb.addStorageReport(srb).build(), reportState);
       }
       GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
@@ -1029,7 +1057,8 @@ public class TestNodeManager {
     conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
 
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-      DatanodeID datanodeID = TestUtils.getDatanodeID(nodeManager);
+      DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
+          nodeManager);
       final long capacity = 2000;
       final long usedPerHeartbeat = 100;
 
@@ -1041,7 +1070,8 @@ public class TestNodeManager {
             .setRemaining(capacity - x * usedPerHeartbeat).build();
         nrb.addStorageReport(srb);
 
-        nodeManager.sendHeartbeat(datanodeID, nrb.build(), reportState);
+        nodeManager.sendHeartbeat(
+            datanodeDetails.getProtoBufMessage(), nrb.build(), reportState);
         Thread.sleep(100);
       }
 
@@ -1063,23 +1093,23 @@ public class TestNodeManager {
 
       // Test NodeManager#getNodeStats
       assertEquals(nodeCount, nodeManager.getNodeStats().size());
-      long nodeCapacity = nodeManager.getNodeStat(datanodeID).get()
+      long nodeCapacity = nodeManager.getNodeStat(datanodeDetails).get()
           .getCapacity().get();
       assertEquals(capacity, nodeCapacity);
 
-      foundScmUsed = nodeManager.getNodeStat(datanodeID).get().getScmUsed()
+      foundScmUsed = 
nodeManager.getNodeStat(datanodeDetails).get().getScmUsed()
           .get();
       assertEquals(expectedScmUsed, foundScmUsed);
 
-      foundRemaining = nodeManager.getNodeStat(datanodeID).get()
+      foundRemaining = nodeManager.getNodeStat(datanodeDetails).get()
           .getRemaining().get();
       assertEquals(expectedRemaining, foundRemaining);
 
       // Compare the result from
       // NodeManager#getNodeStats and NodeManager#getNodeStat
       SCMNodeStat stat1 = nodeManager.getNodeStats().
-          get(datanodeID.getDatanodeUuid());
-      SCMNodeStat stat2 = nodeManager.getNodeStat(datanodeID).get();
+          get(datanodeDetails);
+      SCMNodeStat stat2 = nodeManager.getNodeStat(datanodeDetails).get();
       assertEquals(stat1, stat2);
 
       // Wait up to 4s so that the node becomes stale
@@ -1089,14 +1119,14 @@ public class TestNodeManager {
           4 * 1000);
       assertEquals(nodeCount, nodeManager.getNodeStats().size());
 
-      foundCapacity = nodeManager.getNodeStat(datanodeID).get()
+      foundCapacity = nodeManager.getNodeStat(datanodeDetails).get()
           .getCapacity().get();
       assertEquals(capacity, foundCapacity);
-      foundScmUsed = nodeManager.getNodeStat(datanodeID).get()
+      foundScmUsed = nodeManager.getNodeStat(datanodeDetails).get()
           .getScmUsed().get();
       assertEquals(expectedScmUsed, foundScmUsed);
 
-      foundRemaining = nodeManager.getNodeStat(datanodeID).get().
+      foundRemaining = nodeManager.getNodeStat(datanodeDetails).get().
           getRemaining().get();
       assertEquals(expectedRemaining, foundRemaining);
 
@@ -1123,7 +1153,8 @@ public class TestNodeManager {
       srb.setCapacity(capacity).setScmUsed(expectedScmUsed)
           .setRemaining(expectedRemaining).build();
       nrb.addStorageReport(srb);
-      nodeManager.sendHeartbeat(datanodeID, nrb.build(), reportState);
+      nodeManager.sendHeartbeat(
+          datanodeDetails.getProtoBufMessage(), nrb.build(), reportState);
 
       // Wait up to 5 seconds so that the dead node becomes healthy
       // Verify usage info should be updated.
@@ -1134,13 +1165,13 @@ public class TestNodeManager {
           () -> nodeManager.getStats().getScmUsed().get() == expectedScmUsed,
           100, 4 * 1000);
       assertEquals(nodeCount, nodeManager.getNodeStats().size());
-      foundCapacity = nodeManager.getNodeStat(datanodeID).get()
+      foundCapacity = nodeManager.getNodeStat(datanodeDetails).get()
           .getCapacity().get();
       assertEquals(capacity, foundCapacity);
-      foundScmUsed = nodeManager.getNodeStat(datanodeID).get().getScmUsed()
+      foundScmUsed = 
nodeManager.getNodeStat(datanodeDetails).get().getScmUsed()
           .get();
       assertEquals(expectedScmUsed, foundScmUsed);
-      foundRemaining = nodeManager.getNodeStat(datanodeID).get()
+      foundRemaining = nodeManager.getNodeStat(datanodeDetails).get()
           .getRemaining().get();
       assertEquals(expectedRemaining, foundRemaining);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodePoolManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodePoolManager.java
 
b/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodePoolManager.java
index 86debbf..f12e831 100644
--- 
a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodePoolManager.java
+++ 
b/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodePoolManager.java
@@ -20,16 +20,15 @@ package org.apache.hadoop.ozone.scm.node;
 
 import org.apache.commons.collections.ListUtils;
 import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.container.common.SCMTestUtils;
+import org.apache.hadoop.ozone.scm.TestUtils;
 import 
org.apache.hadoop.ozone.scm.container.placement.algorithms.ContainerPlacementPolicy;
 import 
org.apache.hadoop.ozone.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
 import org.apache.hadoop.scm.ScmConfigKeys;
 import org.apache.hadoop.test.PathUtils;
 
-import static org.apache.hadoop.ozone.scm.TestUtils.getDatanodeIDs;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
@@ -79,21 +78,22 @@ public class TestSCMNodePoolManager {
       NodePoolManager npMgr = createNodePoolManager(conf);
 
       final int nodeCount = 4;
-      final List<DatanodeID> nodes = getDatanodeIDs(nodeCount);
+      final List<DatanodeDetails> nodes = TestUtils
+          .getListOfDatanodeDetails(nodeCount);
       assertEquals(0, npMgr.getNodePools().size());
-      for (DatanodeID node: nodes) {
+      for (DatanodeDetails node: nodes) {
         npMgr.addNode(defaultPool, node);
       }
-      List<DatanodeID> nodesRetrieved = npMgr.getNodes(defaultPool);
+      List<DatanodeDetails> nodesRetrieved = npMgr.getNodes(defaultPool);
       assertEquals(nodeCount, nodesRetrieved.size());
       assertTwoDatanodeListsEqual(nodes, nodesRetrieved);
 
-      DatanodeID nodeRemoved = nodes.remove(2);
+      DatanodeDetails nodeRemoved = nodes.remove(2);
       npMgr.removeNode(defaultPool, nodeRemoved);
-      List<DatanodeID> nodesAfterRemove = npMgr.getNodes(defaultPool);
+      List<DatanodeDetails> nodesAfterRemove = npMgr.getNodes(defaultPool);
       assertTwoDatanodeListsEqual(nodes, nodesAfterRemove);
 
-      List<DatanodeID> nonExistSet = npMgr.getNodes("NonExistSet");
+      List<DatanodeDetails> nonExistSet = npMgr.getNodes("NonExistSet");
       assertEquals(0, nonExistSet.size());
     } finally {
       FileUtil.fullyDelete(testDir);
@@ -111,16 +111,17 @@ public class TestSCMNodePoolManager {
     OzoneConfiguration conf = new OzoneConfiguration();
     final String defaultPool = "DefaultPool";
     final int nodeCount = 4;
-    final List<DatanodeID> nodes = getDatanodeIDs(nodeCount);
+    final List<DatanodeDetails> nodes = TestUtils
+        .getListOfDatanodeDetails(nodeCount);
 
     try {
       try {
         SCMNodePoolManager npMgr = createNodePoolManager(conf);
         assertEquals(0, npMgr.getNodePools().size());
-        for (DatanodeID node : nodes) {
+        for (DatanodeDetails node : nodes) {
           npMgr.addNode(defaultPool, node);
         }
-        List<DatanodeID> nodesRetrieved = npMgr.getNodes(defaultPool);
+        List<DatanodeDetails> nodesRetrieved = npMgr.getNodes(defaultPool);
         assertEquals(nodeCount, nodesRetrieved.size());
         assertTwoDatanodeListsEqual(nodes, nodesRetrieved);
         npMgr.close();
@@ -132,7 +133,7 @@ public class TestSCMNodePoolManager {
       // try reload with a new NodePoolManager instance
       try {
         SCMNodePoolManager npMgr = createNodePoolManager(conf);
-        List<DatanodeID> nodesRetrieved = npMgr.getNodes(defaultPool);
+        List<DatanodeDetails> nodesRetrieved = npMgr.getNodes(defaultPool);
         assertEquals(nodeCount, nodesRetrieved.size());
         assertTwoDatanodeListsEqual(nodes, nodesRetrieved);
       } finally {
@@ -148,8 +149,8 @@ public class TestSCMNodePoolManager {
    * @param list1 - datanode list 1.
    * @param list2 - datanode list 2.
    */
-  private void assertTwoDatanodeListsEqual(List<DatanodeID> list1,
-      List<DatanodeID> list2) {
+  private void assertTwoDatanodeListsEqual(List<DatanodeDetails> list1,
+      List<DatanodeDetails> list2) {
     assertEquals(list1.size(), list2.size());
     Collections.sort(list1);
     Collections.sort(list2);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/container/InfoContainerHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/container/InfoContainerHandler.java
 
b/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/container/InfoContainerHandler.java
index 89671fc..e62fe6b 100644
--- 
a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/container/InfoContainerHandler.java
+++ 
b/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/container/InfoContainerHandler.java
@@ -23,8 +23,8 @@ import org.apache.commons.cli.HelpFormatter;
 import org.apache.commons.cli.Option;
 import org.apache.commons.cli.Options;
 
+import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
 import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerData;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
 import org.apache.hadoop.ozone.scm.cli.OzoneCommandHandler;
 import org.apache.hadoop.scm.client.ScmClient;
@@ -94,7 +94,7 @@ public class InfoContainerHandler extends OzoneCommandHandler 
{
     // Print pipeline of an existing container.
     logOut("LeaderID: %s", pipeline.getLeader().getHostName());
     String machinesStr = pipeline.getMachines().stream().map(
-        DatanodeID::getHostName).collect(Collectors.joining(","));
+        DatanodeDetails::getHostName).collect(Collectors.joining(","));
     logOut("Datanodes: [%s]", machinesStr);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java
index 159cdf3..bff947e 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java
@@ -119,8 +119,8 @@ public final class OzoneUtils {
    * @param conf - Configuration
    * @return the path of datanode id as string
    */
-  public static String getDatanodeIDPath(Configuration conf) {
-    return HdslUtils.getDatanodeIDPath(conf);
+  public static String getDatanodeIdFilePath(Configuration conf) {
+    return HdslUtils.getDatanodeIdFilePath(conf);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClassicCluster.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClassicCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClassicCluster.java
index 83c8a90..403241f 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClassicCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClassicCluster.java
@@ -23,8 +23,10 @@ import com.google.common.base.Preconditions;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
 import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
@@ -46,6 +48,8 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY;
+
+import org.apache.hadoop.util.ServicePlugin;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -116,6 +120,10 @@ public final class MiniOzoneClassicCluster extends 
MiniDFSCluster
       int i, Configuration dnConf, boolean setupHostsFile,
       boolean checkDnAddrConf) throws IOException {
     super.setupDatanodeAddress(i, dnConf, setupHostsFile, checkDnAddrConf);
+    String path = GenericTestUtils.getTempPath(
+        MiniOzoneClassicCluster.class.getSimpleName() + "datanode");
+    dnConf.setStrings(ScmConfigKeys.OZONE_SCM_DATANODE_ID,
+        path + "/" + i + "-datanode.id");
     setConf(i, dnConf, 
OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
         getInstanceStorageDir(i, -1).getCanonicalPath());
     String containerMetaDirs = dnConf.get(
@@ -233,7 +241,7 @@ public final class MiniOzoneClassicCluster extends 
MiniDFSCluster
     // An Ozone request may originate at any DataNode, so pick one at random.
     int dnIndex = new Random().nextInt(getDataNodes().size());
     String uri = String.format("http://127.0.0.1:%d";,
-        getDataNodes().get(dnIndex).getDatanodeId().getOzoneRestPort());
+        getOzoneRestPort(getDataNodes().get(dnIndex)));
     LOG.info("Creating Ozone client to DataNode {} with URI {} and user {}",
         dnIndex, uri, USER_AUTH);
     try {
@@ -330,6 +338,20 @@ public final class MiniOzoneClassicCluster extends 
MiniDFSCluster
         4 * 1000);
   }
 
+  public static DatanodeDetails getDatanodeDetails(DataNode dataNode) {
+    DatanodeDetails datanodeDetails = null;
+    for (ServicePlugin plugin : dataNode.getPlugins()) {
+      if (plugin instanceof HdslDatanodeService) {
+        datanodeDetails = ((HdslDatanodeService) plugin).getDatanodeDetails();
+      }
+    }
+    return datanodeDetails;
+  }
+
+  public static int getOzoneRestPort(DataNode dataNode) {
+    return getDatanodeDetails(dataNode).getOzoneRestPort();
+  }
+
   /**
    * Builder for configuring the MiniOzoneCluster to run.
    */
@@ -479,8 +501,8 @@ public final class MiniOzoneClassicCluster extends 
MiniDFSCluster
       conf.set(KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
       conf.set(ScmConfigKeys.HDSL_REST_HTTP_ADDRESS_KEY, "127.0.0.1:0");
       conf.set(DFS_DATANODE_PLUGINS_KEY,
-          "org.apache.hadoop.ozone.HdslServerPlugin,"
-              + "org.apache.hadoop.ozone.web.ObjectStoreRestPlugin");
+          "org.apache.hadoop.ozone.web.ObjectStoreRestPlugin," +
+          "org.apache.hadoop.ozone.HdslDatanodeService");
 
       // Configure KSM and SCM handlers
       conf.setInt(ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY, numOfScmHandlers);
@@ -538,12 +560,6 @@ public final class MiniOzoneClassicCluster extends 
MiniDFSCluster
       Files.createDirectories(containerPath);
       conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath
           .toString());
-
-      // TODO : Fix this, we need a more generic mechanism to map
-      // different datanode ID for different datanodes when we have lots of
-      // datanodes in the cluster.
-      conf.setStrings(ScmConfigKeys.OZONE_SCM_DATANODE_ID,
-          scmPath.toString() + "/datanode.id");
     }
 
     private void initializeScm() throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneTestHelper.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneTestHelper.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneTestHelper.java
index 10e9c69..d6fb579 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneTestHelper.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneTestHelper.java
@@ -46,10 +46,10 @@ public class MiniOzoneTestHelper {
     return findHdslPlugin(dataNode).getDatanodeStateMachine();
   }
 
-  private static HdslServerPlugin findHdslPlugin(DataNode dataNode) {
+  private static HdslDatanodeService findHdslPlugin(DataNode dataNode) {
     for (ServicePlugin plugin : dataNode.getPlugins()) {
-      if (plugin instanceof HdslServerPlugin) {
-        return (HdslServerPlugin) plugin;
+      if (plugin instanceof HdslDatanodeService) {
+        return (HdslDatanodeService) plugin;
       }
     }
     throw new IllegalStateException("Can't find the Hdsl server plugin in the"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
index 574f0a4..eec097b 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
@@ -80,7 +80,8 @@ public interface RatisTestHelper {
     }
 
     public int getDatanodeOzoneRestPort() {
-      return cluster.getDataNodes().get(0).getDatanodeId().getOzoneRestPort();
+      return MiniOzoneClassicCluster.getOzoneRestPort(
+          cluster.getDataNodes().get(0));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
index 45c5a3b..ff697b5 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
@@ -20,22 +20,23 @@ package org.apache.hadoop.ozone;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
 import org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer;
 import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
+import org.apache.hadoop.ozone.scm.TestUtils;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.scm.ScmConfigKeys;
 import org.apache.hadoop.scm.XceiverClient;
 import org.apache.hadoop.scm.container.common.helpers.PipelineChannel;
 import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.test.TestGenericTestUtils;
+import org.apache.hadoop.util.ServicePlugin;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -89,30 +90,23 @@ public class TestMiniOzoneCluster {
         .numDataNodes(numberOfNodes)
         .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED)
         .build();
-
-    // make sure datanode.id file is correct
-    File idPath = new File(
-        conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID));
-    assertTrue(idPath.exists());
-    List<DatanodeID> ids = ContainerUtils.readDatanodeIDsFrom(idPath);
-    assertEquals(numberOfNodes, ids.size());
-
     List<DataNode> datanodes = cluster.getDataNodes();
-    assertEquals(datanodes.size(), numberOfNodes);
+    assertEquals(numberOfNodes, datanodes.size());
     for(DataNode dn : datanodes) {
-      // Each datanode ID should match an entry in the ID file
-      assertTrue("Datanode ID not found in ID file",
-          ids.contains(dn.getDatanodeId()));
-
       // Create a single member pipe line
       String containerName = OzoneUtils.getRequestID();
-      DatanodeID dnId = dn.getDatanodeId();
+      DatanodeDetails datanodeDetails = null;
+      for (ServicePlugin plugin : dn.getPlugins()) {
+        if (plugin instanceof HdslDatanodeService) {
+          datanodeDetails = ((HdslDatanodeService) 
plugin).getDatanodeDetails();
+        }
+      }
       final PipelineChannel pipelineChannel =
-          new PipelineChannel(dnId.getDatanodeUuid(),
+          new PipelineChannel(datanodeDetails.getUuidString(),
               HdslProtos.LifeCycleState.OPEN,
               HdslProtos.ReplicationType.STAND_ALONE,
               HdslProtos.ReplicationFactor.ONE, "test");
-      pipelineChannel.addMember(dnId);
+      pipelineChannel.addMember(datanodeDetails);
       Pipeline pipeline = new Pipeline(containerName, pipelineChannel);
 
       // Verify client is able to connect to the container
@@ -126,9 +120,9 @@ public class TestMiniOzoneCluster {
   @Test
   public void testDatanodeIDPersistent() throws Exception {
     // Generate IDs for testing
-    DatanodeID id1 = DFSTestUtil.getLocalDatanodeID(1);
-    DatanodeID id2 = DFSTestUtil.getLocalDatanodeID(2);
-    DatanodeID id3 = DFSTestUtil.getLocalDatanodeID(3);
+    DatanodeDetails id1 = TestUtils.getDatanodeDetails();
+    DatanodeDetails id2 = TestUtils.getDatanodeDetails();
+    DatanodeDetails id3 = TestUtils.getDatanodeDetails();
     id1.setContainerPort(1);
     id2.setContainerPort(2);
     id3.setContainerPort(3);
@@ -136,51 +130,37 @@ public class TestMiniOzoneCluster {
     // Write a single ID to the file and read it out
     File validIdsFile = new File(WRITE_TMP, "valid-values.id");
     validIdsFile.delete();
-    ContainerUtils.writeDatanodeIDTo(id1, validIdsFile);
-    List<DatanodeID> validIds = ContainerUtils
-        .readDatanodeIDsFrom(validIdsFile);
-    assertEquals(1, validIds.size());
-    DatanodeID id11 = validIds.iterator().next();
-    assertEquals(id11, id1);
-    assertEquals(id11.getProtoBufMessage(), id1.getProtoBufMessage());
-
-    // Write should avoid duplicate entries
-    File noDupIDFile = new File(WRITE_TMP, "no-dup-values.id");
-    noDupIDFile.delete();
-    ContainerUtils.writeDatanodeIDTo(id1, noDupIDFile);
-    ContainerUtils.writeDatanodeIDTo(id1, noDupIDFile);
-    ContainerUtils.writeDatanodeIDTo(id1, noDupIDFile);
-    ContainerUtils.writeDatanodeIDTo(id2, noDupIDFile);
-    ContainerUtils.writeDatanodeIDTo(id3, noDupIDFile);
-
-    List<DatanodeID> noDupIDs =ContainerUtils
-        .readDatanodeIDsFrom(noDupIDFile);
-    assertEquals(3, noDupIDs.size());
-    assertTrue(noDupIDs.contains(id1));
-    assertTrue(noDupIDs.contains(id2));
-    assertTrue(noDupIDs.contains(id3));
+    ContainerUtils.writeDatanodeDetailsTo(id1, validIdsFile);
+    DatanodeDetails validId = ContainerUtils.readDatanodeDetailsFrom(
+        validIdsFile);
+
+    assertEquals(id1, validId);
+    assertEquals(id1.getProtoBufMessage(), validId.getProtoBufMessage());
 
     // Write should fail if unable to create file or directory
     File invalidPath = new File(WRITE_TMP, "an/invalid/path");
     try {
-      ContainerUtils.writeDatanodeIDTo(id1, invalidPath);
+      ContainerUtils.writeDatanodeDetailsTo(id1, invalidPath);
+      Assert.fail();
     } catch (Exception e) {
-      e.printStackTrace();
       assertTrue(e instanceof IOException);
     }
 
     // Read should return an empty value if file doesn't exist
     File nonExistFile = new File(READ_TMP, "non_exist.id");
     nonExistFile.delete();
-    List<DatanodeID> emptyIDs =
-        ContainerUtils.readDatanodeIDsFrom(nonExistFile);
-    assertTrue(emptyIDs.isEmpty());
+    try {
+      ContainerUtils.readDatanodeDetailsFrom(nonExistFile);
+      Assert.fail();
+    } catch (Exception e) {
+      assertTrue(e instanceof IOException);
+    }
 
     // Read should fail if the file is malformed
     File malformedFile = new File(READ_TMP, "malformed.id");
     createMalformedIDFile(malformedFile);
     try {
-      ContainerUtils.readDatanodeIDsFrom(malformedFile);
+      ContainerUtils.readDatanodeDetailsFrom(malformedFile);
       fail("Read a malformed ID file should fail");
     } catch (Exception e) {
       assertTrue(e instanceof IOException);
@@ -202,11 +182,11 @@ public class TestMiniOzoneCluster {
         true);
     try (
         DatanodeStateMachine sm1 = new DatanodeStateMachine(
-            DFSTestUtil.getLocalDatanodeID(), ozoneConf);
+            TestUtils.getDatanodeDetails(), ozoneConf);
         DatanodeStateMachine sm2 = new DatanodeStateMachine(
-            DFSTestUtil.getLocalDatanodeID(), ozoneConf);
+            TestUtils.getDatanodeDetails(), ozoneConf);
         DatanodeStateMachine sm3 = new DatanodeStateMachine(
-            DFSTestUtil.getLocalDatanodeID(), ozoneConf);
+            TestUtils.getDatanodeDetails(), ozoneConf)
     ) {
       HashSet<Integer> ports = new HashSet<Integer>();
       assertTrue(ports.add(sm1.getContainer().getContainerServerPort()));
@@ -225,11 +205,11 @@ public class TestMiniOzoneCluster {
     ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
     try (
         DatanodeStateMachine sm1 = new DatanodeStateMachine(
-            DFSTestUtil.getLocalDatanodeID(), ozoneConf);
+            TestUtils.getDatanodeDetails(), ozoneConf);
         DatanodeStateMachine sm2 = new DatanodeStateMachine(
-            DFSTestUtil.getLocalDatanodeID(), ozoneConf);
+            TestUtils.getDatanodeDetails(), ozoneConf);
         DatanodeStateMachine sm3 = new DatanodeStateMachine(
-            DFSTestUtil.getLocalDatanodeID(), ozoneConf);
+            TestUtils.getDatanodeDetails(), ozoneConf)
     ) {
       HashSet<Integer> ports = new HashSet<Integer>();
       assertTrue(ports.add(sm1.getContainer().getContainerServerPort()));
@@ -244,8 +224,8 @@ public class TestMiniOzoneCluster {
   private void createMalformedIDFile(File malformedFile)
       throws IOException{
     malformedFile.delete();
-    DatanodeID id1 = DFSTestUtil.getLocalDatanodeID(1);
-    ContainerUtils.writeDatanodeIDTo(id1, malformedFile);
+    DatanodeDetails id = TestUtils.getDatanodeDetails();
+    ContainerUtils.writeDatanodeDetailsTo(id, malformedFile);
 
     FileOutputStream out = new FileOutputStream(malformedFile);
     out.write("malformed".getBytes());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 0033258..b4c94bd 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -309,7 +309,8 @@ public class TestStorageContainerManager {
       ReportState reportState = ReportState.newBuilder()
           .setState(ReportState.states.noContainerReports).setCount(0).build();
       List<SCMCommand> commands = nodeManager.sendHeartbeat(
-          nodeManager.getNodes(NodeState.HEALTHY).get(0), null, reportState);
+          nodeManager.getNodes(NodeState.HEALTHY).get(0).getProtoBufMessage(),
+          null, reportState);
 
       if (commands != null) {
         for (SCMCommand cmd : commands) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
index 0e0dc0c..e80e473 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
@@ -23,9 +23,9 @@ import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
+import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
@@ -159,18 +159,19 @@ public class TestStorageContainerManagerHelper {
       throws IOException {
     Pipeline pipeline = cluster.getStorageContainerManager()
         .getContainer(containerName);
-    DatanodeID leadDN = pipeline.getLeader();
+    DatanodeDetails leadDN = pipeline.getLeader();
     OzoneContainer containerServer =
-        getContainerServerByDatanodeID(leadDN.getDatanodeUuid());
+        getContainerServerByDatanodeUuid(leadDN.getUuidString());
     ContainerData containerData = containerServer.getContainerManager()
         .readContainer(containerName);
     return KeyUtils.getDB(containerData, conf);
   }
 
-  private OzoneContainer getContainerServerByDatanodeID(String dnUUID)
+  private OzoneContainer getContainerServerByDatanodeUuid(String dnUUID)
       throws IOException {
     for (DataNode dn : cluster.getDataNodes()) {
-      if (dn.getDatanodeId().getDatanodeUuid().equals(dnUUID)) {
+      if (MiniOzoneClassicCluster.getDatanodeDetails(dn).getUuidString()
+          .equals(dnUUID)) {
         return MiniOzoneTestHelper.getOzoneContainer(dn);
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index 1575309..3e5db29 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.container;
 import com.google.common.base.Preconditions;
 import com.google.protobuf.ByteString;
 import org.apache.commons.codec.binary.Hex;
+import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
 import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos;
 import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos
     .ContainerCommandRequestProto;
@@ -30,7 +31,6 @@ import 
org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState;
 import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor;
 import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType;
 import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.KeyValue;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -89,15 +89,22 @@ public final class ContainerTestHelper {
       return "127.0.0.1:" + s.getLocalPort();
     }
   }
-  public static DatanodeID createDatanodeID() throws IOException {
+  public static DatanodeDetails createDatanodeDetails() throws IOException {
     ServerSocket socket = new ServerSocket(0);
     int port = socket.getLocalPort();
-    DatanodeID datanodeID = new DatanodeID(socket.getInetAddress()
-        .getHostAddress(), socket.getInetAddress().getHostName(),
-        UUID.randomUUID().toString(), port, port, port, port);
-    datanodeID.setContainerPort(port);
+    DatanodeDetails datanodeDetails = DatanodeDetails.newBuilder()
+        .setUuid(UUID.randomUUID().toString())
+        .setIpAddress(socket.getInetAddress().getHostAddress())
+        .setHostName(socket.getInetAddress().getHostName())
+        .setInfoPort(port)
+        .setInfoSecurePort(port)
+        .setContainerPort(port)
+        .setRatisPort(port)
+        .setOzoneRestPort(port)
+        .build();
+
     socket.close();
-    return datanodeID;
+    return datanodeDetails;
   }
 
   /**
@@ -109,23 +116,23 @@ public final class ContainerTestHelper {
   public static Pipeline createPipeline(String containerName, int numNodes)
       throws IOException {
     Preconditions.checkArgument(numNodes >= 1);
-    final List<DatanodeID> ids = new ArrayList<>(numNodes);
+    final List<DatanodeDetails> ids = new ArrayList<>(numNodes);
     for(int i = 0; i < numNodes; i++) {
-      ids.add(createDatanodeID());
+      ids.add(createDatanodeDetails());
     }
     return createPipeline(containerName, ids);
   }
 
   public static Pipeline createPipeline(
-      String containerName, Iterable<DatanodeID> ids)
+      String containerName, Iterable<DatanodeDetails> ids)
       throws IOException {
     Objects.requireNonNull(ids, "ids == null");
-    final Iterator<DatanodeID> i = ids.iterator();
+    final Iterator<DatanodeDetails> i = ids.iterator();
     Preconditions.checkArgument(i.hasNext());
-    final DatanodeID leader = i.next();
+    final DatanodeDetails leader = i.next();
     String pipelineName = "TEST-" + UUID.randomUUID().toString().substring(3);
     final PipelineChannel pipelineChannel =
-        new PipelineChannel(leader.getDatanodeUuid(), LifeCycleState.OPEN,
+        new PipelineChannel(leader.getUuidString(), LifeCycleState.OPEN,
             ReplicationType.STAND_ALONE, ReplicationFactor.ONE, pipelineName);
     pipelineChannel.addMember(leader);
     for(; i.hasNext();) {
@@ -213,7 +220,7 @@ public final class ContainerTestHelper {
     request.setCmdType(ContainerProtos.Type.WriteChunk);
     request.setWriteChunk(writeRequest);
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeID(newPipeline.getLeader().getDatanodeUuid());
+    request.setDatanodeUuid(newPipeline.getLeader().getUuidString());
 
     return request.build();
   }
@@ -259,7 +266,7 @@ public final class ContainerTestHelper {
     request.setCmdType(ContainerProtos.Type.PutSmallFile);
     request.setPutSmallFile(smallFileRequest);
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeID(newPipeline.getLeader().getDatanodeUuid());
+    request.setDatanodeUuid(newPipeline.getLeader().getUuidString());
     return request.build();
   }
 
@@ -278,7 +285,7 @@ public final class ContainerTestHelper {
     request.setCmdType(ContainerProtos.Type.GetSmallFile);
     request.setGetSmallFile(smallFileRequest);
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeID(pipeline.getLeader().getDatanodeUuid());
+    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
     return request.build();
   }
 
@@ -309,7 +316,7 @@ public final class ContainerTestHelper {
     newRequest.setCmdType(ContainerProtos.Type.ReadChunk);
     newRequest.setReadChunk(readRequest);
     newRequest.setTraceID(UUID.randomUUID().toString());
-    newRequest.setDatanodeID(pipeline.getLeader().getDatanodeUuid());
+    newRequest.setDatanodeUuid(pipeline.getLeader().getUuidString());
     return newRequest.build();
   }
 
@@ -341,7 +348,7 @@ public final class ContainerTestHelper {
     request.setCmdType(ContainerProtos.Type.DeleteChunk);
     request.setDeleteChunk(deleteRequest);
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeID(pipeline.getLeader().getDatanodeUuid());
+    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
     return request.build();
   }
 
@@ -371,7 +378,7 @@ public final class ContainerTestHelper {
     request.setCmdType(ContainerProtos.Type.CreateContainer);
     request.setCreateContainer(createRequest);
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeID(pipeline.getLeader().getDatanodeUuid().toString());
+    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
 
     return request.build();
   }
@@ -410,7 +417,7 @@ public final class ContainerTestHelper {
     request.setCmdType(ContainerProtos.Type.UpdateContainer);
     request.setUpdateContainer(updateRequestBuilder.build());
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeID(pipeline.getLeader().getDatanodeUuid());
+    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
     return request.build();
   }
   /**
@@ -461,7 +468,7 @@ public final class ContainerTestHelper {
     request.setCmdType(ContainerProtos.Type.PutKey);
     request.setPutKey(putRequest);
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeID(pipeline.getLeader().getDatanodeUuid());
+    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
     return request.build();
   }
 
@@ -491,7 +498,7 @@ public final class ContainerTestHelper {
     request.setCmdType(ContainerProtos.Type.GetKey);
     request.setGetKey(getRequest);
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeID(pipeline.getLeader().getDatanodeUuid());
+    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
     return request.build();
   }
 
@@ -529,7 +536,7 @@ public final class ContainerTestHelper {
     request.setCmdType(ContainerProtos.Type.DeleteKey);
     request.setDeleteKey(delRequest);
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeID(pipeline.getLeader().getDatanodeUuid());
+    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
     return request.build();
   }
 
@@ -548,7 +555,7 @@ public final class ContainerTestHelper {
         ContainerCommandRequestProto.newBuilder().setCmdType(ContainerProtos
             .Type.CloseContainer).setCloseContainer(closeRequest)
             .setTraceID(UUID.randomUUID().toString())
-            .setDatanodeID(pipeline.getLeader().getDatanodeUuid())
+            .setDatanodeUuid(pipeline.getLeader().getUuidString())
             .build();
 
     return cmd;
@@ -568,7 +575,8 @@ public final class ContainerTestHelper {
     ContainerProtos.ContainerCommandRequestProto cmd =
             
ContainerCommandRequestProto.newBuilder().setCmdType(ContainerProtos
                     .Type.CloseContainer).setCloseContainer(closeRequest)
-                    .setDatanodeID(pipeline.getLeader().getDatanodeUuid())
+                    .setDatanodeUuid(
+                        pipeline.getLeader().getUuidString())
                     .build();
     return cmd;
   }
@@ -589,7 +597,7 @@ public final class ContainerTestHelper {
         .setCmdType(ContainerProtos.Type.DeleteContainer)
         .setDeleteContainer(deleteRequest)
         .setTraceID(UUID.randomUUID().toString())
-        .setDatanodeID(pipeline.getLeader().getDatanodeUuid())
+        .setDatanodeUuid(pipeline.getLeader().getUuidString())
         .build();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index f2b3838..41b5a88 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.container.common;
 import com.google.common.collect.Lists;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
@@ -35,6 +34,7 @@ import 
org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl;
 import 
org.apache.hadoop.ozone.container.common.impl.RandomContainerDeletionChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
 import 
org.apache.hadoop.ozone.container.common.statemachine.background.BlockDeletingService;
+import org.apache.hadoop.ozone.scm.TestUtils;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.scm.ScmConfigKeys;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -115,7 +115,7 @@ public class TestBlockDeletingService {
     ContainerManager containerManager = new ContainerManagerImpl();
     List<StorageLocation> pathLists = new LinkedList<>();
     pathLists.add(StorageLocation.parse(containersDir.getAbsolutePath()));
-    containerManager.init(conf, pathLists, DFSTestUtil.getLocalDatanodeID());
+    containerManager.init(conf, pathLists, TestUtils.getDatanodeDetails());
     return containerManager;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
index 81c44a2..cb61da3 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
@@ -28,15 +28,15 @@ import java.util.Map;
 import java.util.Random;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
+import org.apache.hadoop.ozone.scm.TestUtils;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.scm.ScmConfigKeys;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -89,7 +89,7 @@ public class TestContainerDeletionChoosingPolicy {
     List<StorageLocation> pathLists = new LinkedList<>();
     pathLists.add(StorageLocation.parse(containerDir.getAbsolutePath()));
     containerManager = new ContainerManagerImpl();
-    containerManager.init(conf, pathLists, DFSTestUtil.getLocalDatanodeID());
+    containerManager.init(conf, pathLists, TestUtils.getDatanodeDetails());
 
     int numContainers = 10;
     for (int i = 0; i < numContainers; i++) {
@@ -135,8 +135,8 @@ public class TestContainerDeletionChoosingPolicy {
     List<StorageLocation> pathLists = new LinkedList<>();
     pathLists.add(StorageLocation.parse(containerDir.getAbsolutePath()));
     containerManager = new ContainerManagerImpl();
-    DatanodeID datanodeID = DFSTestUtil.getLocalDatanodeID();
-    containerManager.init(conf, pathLists, datanodeID);
+    DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+    containerManager.init(conf, pathLists, datanodeDetails);
 
     int numContainers = 10;
     Random random = new Random();
@@ -172,7 +172,7 @@ public class TestContainerDeletionChoosingPolicy {
     containerManager.writeLock();
     containerManager.shutdown();
     containerManager.writeUnlock();
-    containerManager.init(conf, pathLists, datanodeID);
+    containerManager.init(conf, pathLists, datanodeDetails);
 
     List<ContainerData> result0 = containerManager
         .chooseContainerForBlockDeletion(5);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index 34f45e1..fb44270 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -21,12 +21,12 @@ import org.apache.commons.codec.binary.Hex;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.scm.TestUtils;
 import 
org.apache.hadoop.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
@@ -153,7 +153,7 @@ public class TestContainerPersistence {
       FileUtils.forceMkdir(new File(location.getNormalizedUri()));
     }
 
-    containerManager.init(conf, pathLists, DFSTestUtil.getLocalDatanodeID());
+    containerManager.init(conf, pathLists, TestUtils.getDatanodeDetails());
   }
 
   @After

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
index d1690c9..e2ca7f2 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
 
 import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
+import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
 import org.apache.hadoop.ozone.MiniOzoneTestHelper;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -85,9 +86,11 @@ public class TestCloseContainerHandler {
 
     Assert.assertFalse(isContainerClosed(cluster, containerName));
 
+    DatanodeDetails datanodeDetails = MiniOzoneClassicCluster
+        .getDatanodeDetails(cluster.getDataNodes().get(0));
     //send the order to close the container
     cluster.getStorageContainerManager().getScmNodeManager()
-        .addDatanodeCommand(cluster.getDataNodes().get(0).getDatanodeId(),
+        .addDatanodeCommand(datanodeDetails.getUuid(),
             new CloseContainerCommand(containerName));
 
     GenericTestUtils.waitFor(() -> isContainerClosed(cluster, containerName),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
index 34101c6..82eb222 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
@@ -24,6 +24,7 @@ import static org.mockito.Mockito.mock;
 
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
+import org.apache.hadoop.hdsl.protocol.DatanodeDetails;
 import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos;
 import 
org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandRequestProto;
 import 
org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandResponseProto;
@@ -35,6 +36,7 @@ import 
org.apache.hadoop.ozone.container.common.impl.Dispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.ChunkManager;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
 import org.apache.hadoop.ozone.container.common.transport.server.XceiverServer;
+import org.apache.hadoop.ozone.scm.TestUtils;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.scm.XceiverClient;
 import org.apache.hadoop.scm.container.common.helpers.Pipeline;
@@ -79,7 +81,8 @@ public class TestContainerMetrics {
 
       Dispatcher dispatcher = new Dispatcher(containerManager, conf);
       dispatcher.init();
-      server = new XceiverServer(conf, dispatcher);
+      DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+      server = new XceiverServer(datanodeDetails, conf, dispatcher);
       client = new XceiverClient(pipeline, conf);
 
       server.start();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 4b950bd..251ac3a 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.ozone.container.ozoneimpl;
 
-import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -26,6 +25,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.apache.hadoop.ozone.scm.TestUtils;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.scm.XceiverClient;
 import org.apache.hadoop.scm.XceiverClientSpi;
@@ -68,8 +68,7 @@ public class TestOzoneContainer {
       conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
           pipeline.getLeader().getContainerPort());
       conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
-      container = new OzoneContainer(DFSTestUtil.getLocalDatanodeID(1),
-          conf);
+      container = new OzoneContainer(TestUtils.getDatanodeDetails(), conf);
       container.start();
 
       XceiverClient client = new XceiverClient(pipeline, conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3440ca6e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java
index 913dad2..1edfbd3 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java
@@ -87,7 +87,8 @@ public class TestOzoneContainerRatis {
       final List<DataNode> datanodes = cluster.getDataNodes();
       final Pipeline pipeline = ContainerTestHelper.createPipeline(
           containerName,
-          CollectionUtils.as(datanodes, DataNode::getDatanodeId));
+          CollectionUtils.as(datanodes,
+              MiniOzoneClassicCluster::getDatanodeDetails));
       LOG.info("pipeline=" + pipeline);
 
       // Create Ratis cluster
@@ -97,7 +98,7 @@ public class TestOzoneContainerRatis {
 //      LOG.info("Created RatisCluster " + ratisId);
 //
 //      // check Ratis cluster members
-//      final List<DatanodeID> dns = manager.getMembers(ratisId);
+//      final List<DatanodeDetails> dns = manager.getMembers(ratisId);
 //      Assert.assertEquals(pipeline.getMachines(), dns);
 //
 //      // run test


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to