[ 
https://issues.apache.org/jira/browse/HDFS-16188?focusedWorklogId=645148&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-645148
 ]

ASF GitHub Bot logged work on HDFS-16188:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 01/Sep/21 09:43
            Start Date: 01/Sep/21 09:43
    Worklog Time Spent: 10m 
      Work Description: goiri commented on a change in pull request #3346:
URL: https://github.com/apache/hadoop/pull/3346#discussion_r699774160



##########
File path: 
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
##########
@@ -113,47 +117,93 @@ public NamenodeHeartbeatService(
 
   }
 
+  /**
+   * Create a new Namenode status updater.
+   *
+   * @param resolver Namenode resolver service to handle NN registration.
+   * @param nsId          Identifier of the nameservice.
+   * @param nnId          Identifier of the namenode in HA.
+   * @param resolvedHost  resolvedHostname for this specific namenode.
+   */
+  public NamenodeHeartbeatService(
+      ActiveNamenodeResolver resolver, String nsId, String nnId, String 
resolvedHost) {
+    super(getNnHeartBeatServiceName(nsId, nnId));
+
+    this.resolver = resolver;
+
+    this.nameserviceId = nsId;
+    // Concat a uniq id from original nnId and resolvedHost
+    this.namenodeId = nnId + "-" + resolvedHost;
+    this.resolvedHost = resolvedHost;
+    // Same the original nnid to get the ports from config.
+    this.originalNnId = nnId;
+
+  }
+
   @Override
   protected void serviceInit(Configuration configuration) throws Exception {
 
     this.conf = DFSHAAdmin.addSecurityConfiguration(configuration);
 
     String nnDesc = nameserviceId;
     if (this.namenodeId != null && !this.namenodeId.isEmpty()) {
-      this.localTarget = new NNHAServiceTarget(
-          conf, nameserviceId, namenodeId);
       nnDesc += "-" + namenodeId;
     } else {
       this.localTarget = null;
     }
 
+    if (originalNnId == null) {
+      originalNnId = namenodeId;
+    }
+
     // Get the RPC address for the clients to connect
-    this.rpcAddress = getRpcAddress(conf, nameserviceId, namenodeId);
+    this.rpcAddress = getRpcAddress(conf, nameserviceId, originalNnId);
+    if (resolvedHost != null) {
+      rpcAddress = resolvedHost + ":"
+          + NetUtils.createSocketAddr(rpcAddress).getPort();
+    }
     LOG.info("{} RPC address: {}", nnDesc, rpcAddress);
 
     // Get the Service RPC address for monitoring
     this.serviceAddress =
-        DFSUtil.getNamenodeServiceAddr(conf, nameserviceId, namenodeId);
+        DFSUtil.getNamenodeServiceAddr(conf, nameserviceId, originalNnId);
     if (this.serviceAddress == null) {
       LOG.error("Cannot locate RPC service address for NN {}, " +
           "using RPC address {}", nnDesc, this.rpcAddress);
       this.serviceAddress = this.rpcAddress;
     }
+    if (resolvedHost != null) {

Review comment:
       We do the same thing over and over for the lifeline and the others.
   Maybe do all of them in a single shot?
   The way to extract the port might also be expensive to be honest; creating a 
socket address is usually bad.

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
##########
@@ -426,37 +426,53 @@ static String concatSuffixes(String... suffixes) {
     Collection<String> nnIds = getNameNodeIds(conf, nsId);
     Map<String, InetSocketAddress> ret = Maps.newLinkedHashMap();
     for (String nnId : emptyAsSingletonNull(nnIds)) {
-      String suffix = concatSuffixes(nsId, nnId);
-      String address = checkKeysAndProcess(defaultValue, suffix, conf, keys);
-      if (address != null) {
-        InetSocketAddress isa = NetUtils.createSocketAddr(address);
-        try {
-          // Datanode should just use FQDN
-          String[] resolvedHostNames = dnr
-              .getAllResolvedHostnameByDomainName(isa.getHostName(), true);
-          int port = isa.getPort();
-          for (String hostname : resolvedHostNames) {
-            InetSocketAddress inetSocketAddress = new InetSocketAddress(
-                hostname, port);
-            // Concat nn info with host info to make uniq ID
-            String concatId;
-            if (nnId == null || nnId.isEmpty()) {
-              concatId = String
-                  .join("-", nsId, hostname, String.valueOf(port));
-            } else {
-              concatId = String
-                  .join("-", nsId, nnId, hostname, String.valueOf(port));
-            }
-            ret.put(concatId, inetSocketAddress);
-          }
-        } catch (UnknownHostException e) {
-          LOG.error("Failed to resolve address: " + address);
+      ret.putAll(getResolvedAddressesForNnId(
+          conf, nsId, nnId, dnr, defaultValue, keys));
+    }
+    return ret;
+  }
+
+  public static Map<String, InetSocketAddress> getResolvedAddressesForNnId(
+      Configuration conf, String nsId, String nnId,
+      DomainNameResolver dnr, String defaultValue,
+      String... keys) {
+    String suffix = concatSuffixes(nsId, nnId);
+    String address = checkKeysAndProcess(defaultValue, suffix, conf, keys);
+    Map<String, InetSocketAddress> ret = Maps.newLinkedHashMap();
+    if (address != null) {
+      InetSocketAddress isa = NetUtils.createSocketAddr(address);
+      try {
+        String[] resolvedHostNames = dnr
+            .getAllResolvedHostnameByDomainName(isa.getHostName(), true);
+        int port = isa.getPort();
+        for (String hostname : resolvedHostNames) {
+          InetSocketAddress inetSocketAddress = new InetSocketAddress(
+              hostname, port);
+          // Concat nn info with host info to make uniq ID
+          String concatId = getConcatNnId(nsId, nnId, hostname, port);
+          ret.put(concatId, inetSocketAddress);
         }
+      } catch (UnknownHostException e) {
+        LOG.error("Failed to resolve address: {}", address);
       }
     }
     return ret;
   }
 
+  /**
+   * Concat nn info with host info to make uniq ID.
+   * This is mainly used when configured nn is
+   * a domain record that has multiple hosts behind it.
+   */
+  static String getConcatNnId(String nsId, String nnId, String hostname, int 
port) {

Review comment:
       Add the args to the javadoc too.

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
##########
@@ -426,37 +426,53 @@ static String concatSuffixes(String... suffixes) {
     Collection<String> nnIds = getNameNodeIds(conf, nsId);
     Map<String, InetSocketAddress> ret = Maps.newLinkedHashMap();
     for (String nnId : emptyAsSingletonNull(nnIds)) {
-      String suffix = concatSuffixes(nsId, nnId);
-      String address = checkKeysAndProcess(defaultValue, suffix, conf, keys);
-      if (address != null) {
-        InetSocketAddress isa = NetUtils.createSocketAddr(address);
-        try {
-          // Datanode should just use FQDN
-          String[] resolvedHostNames = dnr
-              .getAllResolvedHostnameByDomainName(isa.getHostName(), true);
-          int port = isa.getPort();
-          for (String hostname : resolvedHostNames) {
-            InetSocketAddress inetSocketAddress = new InetSocketAddress(
-                hostname, port);
-            // Concat nn info with host info to make uniq ID
-            String concatId;
-            if (nnId == null || nnId.isEmpty()) {
-              concatId = String
-                  .join("-", nsId, hostname, String.valueOf(port));
-            } else {
-              concatId = String
-                  .join("-", nsId, nnId, hostname, String.valueOf(port));
-            }
-            ret.put(concatId, inetSocketAddress);
-          }
-        } catch (UnknownHostException e) {
-          LOG.error("Failed to resolve address: " + address);
+      ret.putAll(getResolvedAddressesForNnId(

Review comment:
       I would extract the output of the method before doing putAll().
   If something breaks, it is easier to debug if it points to the exact line 
too.

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
##########
@@ -426,37 +426,53 @@ static String concatSuffixes(String... suffixes) {
     Collection<String> nnIds = getNameNodeIds(conf, nsId);
     Map<String, InetSocketAddress> ret = Maps.newLinkedHashMap();
     for (String nnId : emptyAsSingletonNull(nnIds)) {
-      String suffix = concatSuffixes(nsId, nnId);
-      String address = checkKeysAndProcess(defaultValue, suffix, conf, keys);
-      if (address != null) {
-        InetSocketAddress isa = NetUtils.createSocketAddr(address);
-        try {
-          // Datanode should just use FQDN
-          String[] resolvedHostNames = dnr
-              .getAllResolvedHostnameByDomainName(isa.getHostName(), true);
-          int port = isa.getPort();
-          for (String hostname : resolvedHostNames) {
-            InetSocketAddress inetSocketAddress = new InetSocketAddress(
-                hostname, port);
-            // Concat nn info with host info to make uniq ID
-            String concatId;
-            if (nnId == null || nnId.isEmpty()) {
-              concatId = String
-                  .join("-", nsId, hostname, String.valueOf(port));
-            } else {
-              concatId = String
-                  .join("-", nsId, nnId, hostname, String.valueOf(port));
-            }
-            ret.put(concatId, inetSocketAddress);
-          }
-        } catch (UnknownHostException e) {
-          LOG.error("Failed to resolve address: " + address);
+      ret.putAll(getResolvedAddressesForNnId(
+          conf, nsId, nnId, dnr, defaultValue, keys));
+    }
+    return ret;
+  }
+
+  public static Map<String, InetSocketAddress> getResolvedAddressesForNnId(
+      Configuration conf, String nsId, String nnId,
+      DomainNameResolver dnr, String defaultValue,
+      String... keys) {
+    String suffix = concatSuffixes(nsId, nnId);
+    String address = checkKeysAndProcess(defaultValue, suffix, conf, keys);
+    Map<String, InetSocketAddress> ret = Maps.newLinkedHashMap();
+    if (address != null) {
+      InetSocketAddress isa = NetUtils.createSocketAddr(address);
+      try {
+        String[] resolvedHostNames = dnr
+            .getAllResolvedHostnameByDomainName(isa.getHostName(), true);
+        int port = isa.getPort();
+        for (String hostname : resolvedHostNames) {
+          InetSocketAddress inetSocketAddress = new InetSocketAddress(
+              hostname, port);
+          // Concat nn info with host info to make uniq ID
+          String concatId = getConcatNnId(nsId, nnId, hostname, port);
+          ret.put(concatId, inetSocketAddress);
         }
+      } catch (UnknownHostException e) {
+        LOG.error("Failed to resolve address: {}", address);
       }
     }
     return ret;
   }
 
+  /**
+   * Concat nn info with host info to make uniq ID.
+   * This is mainly used when configured nn is
+   * a domain record that has multiple hosts behind it.
+   */
+  static String getConcatNnId(String nsId, String nnId, String hostname, int 
port) {

Review comment:
       private?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Issue Time Tracking
-------------------

    Worklog Id:     (was: 645148)
    Time Spent: 2h 50m  (was: 2h 40m)

> RBF: Router to support resolving monitored namenodes with DNS
> -------------------------------------------------------------
>
>                 Key: HDFS-16188
>                 URL: https://issues.apache.org/jira/browse/HDFS-16188
>             Project: Hadoop HDFS
>          Issue Type: Improvement
>          Components: rbf
>            Reporter: Leon Gao
>            Assignee: Leon Gao
>            Priority: Minor
>              Labels: pull-request-available
>          Time Spent: 2h 50m
>  Remaining Estimate: 0h
>
> We can use a DNS round-robin record to configure list of monitored namenodes, 
> so we don't have to reconfigure everything namenode hostname is changed. For 
> example, in containerized environment the hostname of namenode/observers can 
> change pretty often.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

---------------------------------------------------------------------
To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org

Reply via email to