[ 
https://issues.apache.org/jira/browse/HDFS-16440?focusedWorklogId=724636&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-724636
 ]

ASF GitHub Bot logged work on HDFS-16440:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 10/Feb/22 17:21
            Start Date: 10/Feb/22 17:21
    Worklog Time Spent: 10m 
      Work Description: goiri commented on a change in pull request #3971:
URL: https://github.com/apache/hadoop/pull/3971#discussion_r803917406



##########
File path: 
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeHeartbeat.java
##########
@@ -211,6 +216,54 @@ public void testHearbeat() throws InterruptedException, 
IOException {
     assertEquals(NAMENODES[1], standby.getNamenodeId());
   }
 
+  @Test
+  public void testNamenodeHeartbeatServiceHAServiceProtocolProxy(){
+    testNamenodeHeartbeatServiceHAServiceProtocol(
+        "test-ns", "nn", 1000, -1, -1, 1003,
+        "host01.test:1000", "host02.test:1000");
+    testNamenodeHeartbeatServiceHAServiceProtocol(
+        "test-ns", "nn", 1000, 1001, -1, 1003,
+        "host01.test:1001", "host02.test:1001");
+    testNamenodeHeartbeatServiceHAServiceProtocol(
+        "test-ns", "nn", 1000, -1, 1002, 1003,
+        "host01.test:1002", "host02.test:1002");
+    testNamenodeHeartbeatServiceHAServiceProtocol(
+        "test-ns", "nn", 1000, 1001, 1002, 1003,
+        "host01.test:1002", "host02.test:1002");
+  }
+
+  private void testNamenodeHeartbeatServiceHAServiceProtocol(
+      String nsId, String nnId,
+      int rpcPort, int servicePort,
+      int lifelinePort, int webAddressPort,
+      String expected1, String expected2) {
+    Configuration conf = generateNamenodeConfiguration(nsId, nnId,
+        rpcPort, servicePort, lifelinePort, webAddressPort);
+
+    Router testRouter = new Router();
+    testRouter.setConf(conf);
+
+    Collection<NamenodeHeartbeatService> heartbeatServices =
+        testRouter.createNamenodeHeartbeatServices();
+
+    assertEquals(2, heartbeatServices.size());
+
+    Iterator<NamenodeHeartbeatService> iterator = heartbeatServices.iterator();
+    NamenodeHeartbeatService service = iterator.next();
+    service.init(conf);
+    assertNotNull(service.getLocalTarget());
+    LOG.info("NamenodeHeartbeatService HealthMonitorAddress {}",
+        service.getLocalTarget().getHealthMonitorAddress().toString());

Review comment:
       If we are going to log this, we probably should extract it a little.

##########
File path: 
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeHeartbeat.java
##########
@@ -211,6 +216,54 @@ public void testHearbeat() throws InterruptedException, 
IOException {
     assertEquals(NAMENODES[1], standby.getNamenodeId());
   }
 
+  @Test
+  public void testNamenodeHeartbeatServiceHAServiceProtocolProxy(){
+    testNamenodeHeartbeatServiceHAServiceProtocol(
+        "test-ns", "nn", 1000, -1, -1, 1003,
+        "host01.test:1000", "host02.test:1000");
+    testNamenodeHeartbeatServiceHAServiceProtocol(
+        "test-ns", "nn", 1000, 1001, -1, 1003,
+        "host01.test:1001", "host02.test:1001");
+    testNamenodeHeartbeatServiceHAServiceProtocol(
+        "test-ns", "nn", 1000, -1, 1002, 1003,
+        "host01.test:1002", "host02.test:1002");
+    testNamenodeHeartbeatServiceHAServiceProtocol(
+        "test-ns", "nn", 1000, 1001, 1002, 1003,
+        "host01.test:1002", "host02.test:1002");
+  }
+
+  private void testNamenodeHeartbeatServiceHAServiceProtocol(
+      String nsId, String nnId,
+      int rpcPort, int servicePort,
+      int lifelinePort, int webAddressPort,
+      String expected1, String expected2) {
+    Configuration conf = generateNamenodeConfiguration(nsId, nnId,
+        rpcPort, servicePort, lifelinePort, webAddressPort);
+
+    Router testRouter = new Router();
+    testRouter.setConf(conf);
+
+    Collection<NamenodeHeartbeatService> heartbeatServices =
+        testRouter.createNamenodeHeartbeatServices();
+
+    assertEquals(2, heartbeatServices.size());
+
+    Iterator<NamenodeHeartbeatService> iterator = heartbeatServices.iterator();
+    NamenodeHeartbeatService service = iterator.next();
+    service.init(conf);
+    assertNotNull(service.getLocalTarget());
+    LOG.info("NamenodeHeartbeatService HealthMonitorAddress {}",
+        service.getLocalTarget().getHealthMonitorAddress().toString());
+    assertEquals(expected1, 
service.getLocalTarget().getHealthMonitorAddress().toString());
+
+    service = iterator.next();

Review comment:
       What if we have service0 and service1 to make it easier to follow?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Issue Time Tracking
-------------------

    Worklog Id:     (was: 724636)
    Time Spent: 50m  (was: 40m)

> RBF: Support router get HAServiceStatus with Lifeline RPC address
> -----------------------------------------------------------------
>
>                 Key: HDFS-16440
>                 URL: https://issues.apache.org/jira/browse/HDFS-16440
>             Project: Hadoop HDFS
>          Issue Type: Improvement
>          Components: rbf
>            Reporter: YulongZ
>            Priority: Minor
>              Labels: pull-request-available
>         Attachments: HDFS-16440.001.patch, HDFS-16440.003.patch
>
>          Time Spent: 50m
>  Remaining Estimate: 0h
>
> NamenodeHeartbeatService gets HAServiceStatus using 
> NNHAServiceTarget.getProxy. When we set a special 
> dfs.namenode.lifeline.rpc-address , NamenodeHeartbeatService may get 
> HAServiceStatus using NNHAServiceTarget.getHealthMonitorProxy.



--
This message was sent by Atlassian Jira
(v8.20.1#820001)

---------------------------------------------------------------------
To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org

Reply via email to