This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 3aae672576 HDDS-9678. Topology level is not set in datanode object for 
distance calculation (#5588)
3aae672576 is described below

commit 3aae6725764700ed7db2268b47f5867bbbabebc3
Author: Doroszlai, Attila <[email protected]>
AuthorDate: Tue Nov 14 17:16:27 2023 +0100

    HDDS-9678. Topology level is not set in datanode object for distance 
calculation (#5588)
---
 .../hadoop/hdds/protocol/DatanodeDetails.java      |  1 +
 .../hadoop/hdds/scm/net/NetworkTopologyImpl.java   | 22 ++++---
 .../scm/server/TestSCMBlockProtocolServer.java     | 70 +++++++++++++++++++---
 3 files changed, 78 insertions(+), 15 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
index 37446241e4..fe79f27384 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
@@ -141,6 +141,7 @@ public class DatanodeDetails extends NodeImpl implements
 
   public DatanodeDetails(DatanodeDetails datanodeDetails) {
     super(datanodeDetails.getHostName(), datanodeDetails.getNetworkLocation(),
+        datanodeDetails.getParent(), datanodeDetails.getLevel(),
         datanodeDetails.getCost());
     this.uuid = datanodeDetails.uuid;
     this.uuidString = uuid.toString();
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
index ef8bacddad..1cc10bb87c 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
@@ -695,7 +695,7 @@ public class NetworkTopologyImpl implements NetworkTopology 
{
    */
   @Override
   public int getDistanceCost(Node node1, Node node2) {
-    if ((node1 != null && node2 != null && node1.equals(node2)) ||
+    if ((node1 != null && node1.equals(node2)) ||
         (node1 == null && node2 == null))  {
       return 0;
     }
@@ -703,19 +703,25 @@ public class NetworkTopologyImpl implements 
NetworkTopology {
       LOG.warn("One of the nodes is a null pointer");
       return Integer.MAX_VALUE;
     }
+
+    // verify levels are in range
+    int level1 = node1.getLevel();
+    int level2 = node2.getLevel();
+    if (level1 < NetConstants.ROOT_LEVEL || level2 < NetConstants.ROOT_LEVEL) {
+      return Integer.MAX_VALUE;
+    }
+    if (level1 > maxLevel || level2 > maxLevel) {
+      return Integer.MAX_VALUE;
+    }
+
     int cost = 0;
     netlock.readLock().lock();
     try {
-      if ((node1.getAncestor(maxLevel - 1) != clusterTree) ||
-          (node2.getAncestor(maxLevel - 1) != clusterTree)) {
+      if ((node1.getAncestor(level1 - 1) != clusterTree) ||
+          (node2.getAncestor(level2 - 1) != clusterTree)) {
         LOG.debug("One of the nodes is outside of network topology");
         return Integer.MAX_VALUE;
       }
-      int level1 = node1.getLevel();
-      int level2 = node2.getLevel();
-      if (level1 > maxLevel || level2 > maxLevel) {
-        return Integer.MAX_VALUE;
-      }
       while (level1 > level2 && node1 != null) {
         node1 = node1.getParent();
         level1--;
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
index 31d9b20358..b25a517de7 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
@@ -18,15 +18,18 @@
 
 package org.apache.hadoop.hdds.scm.server;
 
+import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos;
 import org.apache.hadoop.hdds.scm.HddsTestUtils;
 import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub;
 import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.net.NodeImpl;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics;
 import 
org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocolServerSideTranslatorPB;
+import org.apache.hadoop.net.StaticMapping;
 import org.apache.hadoop.ozone.ClientVersion;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 
@@ -41,8 +44,11 @@ import java.io.File;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.UUID;
+import java.util.stream.Collectors;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY;
 import static 
org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_LEVEL;
 
 /**
  * Test class for @{@link SCMBlockProtocolServer}.
@@ -58,18 +64,30 @@ public class TestSCMBlockProtocolServer {
   @BeforeEach
   void setUp(@TempDir File dir) throws Exception {
     config = SCMTestUtils.getConf(dir);
+    config.set(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
+        StaticMapping.class.getName());
+    List<DatanodeDetails> datanodes = new ArrayList<>(NODE_COUNT);
+    List<String> nodeMapping = new ArrayList<>(NODE_COUNT);
+    for (int i = 0; i < NODE_COUNT; i++) {
+      DatanodeDetails dn = randomDatanodeDetails();
+      final String rack = "/rack" + (i % 2);
+      nodeMapping.add(dn.getHostName() + "=" + rack);
+      nodeMapping.add(dn.getIpAddress() + "=" + rack);
+      datanodes.add(dn);
+    }
+    config.set(StaticMapping.KEY_HADOOP_CONFIGURED_NODE_MAPPING,
+        String.join(",", nodeMapping));
+
     SCMConfigurator configurator = new SCMConfigurator();
     configurator.setSCMHAManager(SCMHAManagerStub.getInstance(true));
     configurator.setScmContext(SCMContext.emptyContext());
+
     scm = HddsTestUtils.getScm(config, configurator);
     scm.start();
     scm.exitSafeMode();
     // add nodes to scm node manager
     nodeManager = scm.getScmNodeManager();
-    for (int i = 0; i < NODE_COUNT; i++) {
-      nodeManager.register(randomDatanodeDetails(), null, null);
-
-    }
+    datanodes.forEach(dn -> nodeManager.register(dn, null, null));
     server = scm.getBlockProtocolServer();
     service = new ScmBlockLocationProtocolServerSideTranslatorPB(server, scm,
         Mockito.mock(ProtocolMessageMetrics.class));
@@ -83,11 +101,36 @@ public class TestSCMBlockProtocolServer {
     }
   }
 
+  @Test
+  void sortDatanodesRelativeToDatanode() {
+    List<String> nodes = getNetworkNames();
+    for (DatanodeDetails dn : nodeManager.getAllNodes()) {
+      Assertions.assertEquals(ROOT_LEVEL + 2, dn.getLevel());
+
+      List<DatanodeDetails> sorted =
+          server.sortDatanodes(nodes, nodeAddress(dn));
+
+      Assertions.assertEquals(dn, sorted.get(0),
+          "Source node should be sorted very first");
+
+      for (int i = 1; i < NODE_COUNT / 2; i++) {
+        DatanodeDetails item = sorted.get(i);
+        Assertions.assertEquals(dn.getNetworkLocation(),
+            item.getNetworkLocation(),
+            "Nodes in the same rack should be sorted first");
+      }
+      for (int i = NODE_COUNT / 2; i < NODE_COUNT; i++) {
+        DatanodeDetails item = sorted.get(i);
+        Assertions.assertNotEquals(dn.getNetworkLocation(),
+            item.getNetworkLocation(),
+            "Nodes in the other rack should be sorted last");
+      }
+    }
+  }
+
   @Test
   public void testSortDatanodes() throws Exception {
-    List<String> nodes = new ArrayList();
-    nodeManager.getAllNodes().stream().forEach(
-        node -> nodes.add(node.getNetworkName()));
+    List<String> nodes = getNetworkNames();
 
     // sort normal datanodes
     String client;
@@ -145,4 +188,17 @@ public class TestSCMBlockProtocolServer {
     resp.getNodeList().stream().forEach(
         node -> System.out.println(node.getNetworkName()));
   }
+
+  private List<String> getNetworkNames() {
+    return nodeManager.getAllNodes().stream()
+        .map(NodeImpl::getNetworkName)
+        .collect(Collectors.toList());
+  }
+
+  private String nodeAddress(DatanodeDetails dn) {
+    boolean useHostname = config.getBoolean(
+        DFSConfigKeysLegacy.DFS_DATANODE_USE_DN_HOSTNAME,
+        DFSConfigKeysLegacy.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
+    return useHostname ? dn.getHostName() : dn.getIpAddress();
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to