[ 
https://issues.apache.org/jira/browse/HDFS-17361?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17815363#comment-17815363
 ] 

ASF GitHub Bot commented on HDFS-17361:
---------------------------------------

tasanuma commented on code in PR #6508:
URL: https://github.com/apache/hadoop/pull/6508#discussion_r1481790761


##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java:
##########
@@ -56,52 +66,72 @@ public QueryCommand(Configuration conf) {
   @Override
   public void execute(CommandLine cmd) throws Exception {
     LOG.info("Executing \"query plan\" command.");
+    TextStringBuilder result = new TextStringBuilder();
     Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.QUERY));
     verifyCommandOptions(DiskBalancerCLI.QUERY, cmd);
-    String nodeName = cmd.getOptionValue(DiskBalancerCLI.QUERY);
-    Preconditions.checkNotNull(nodeName);
-    nodeName = nodeName.trim();
-    String nodeAddress = nodeName;
-
-    // if the string is not name:port format use the default port.
-    if (!nodeName.matches("[^\\:]+:[0-9]{2,5}")) {
-      int defaultIPC = NetUtils.createSocketAddr(
-          getConf().getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
-              DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
-      nodeAddress = nodeName + ":" + defaultIPC;
-      LOG.debug("Using default data node port :  {}", nodeAddress);
+    String nodeVal = cmd.getOptionValue(DiskBalancerCLI.QUERY);
+    Preconditions.checkNotNull(nodeVal);
+    nodeVal = nodeVal.trim();
+    Set<String> resultSet = new TreeSet<>();
+    String[] nodes = nodeVal.split(",");
+    if (nodes.length == 0) {
+      String warnMsg = "The number of input nodes is 0. "
+          + "Please input the valid nodes.";
+      throw new DiskBalancerException(warnMsg,
+          DiskBalancerException.Result.INVALID_NODE);
     }
 
-    ClientDatanodeProtocol dataNode = getDataNodeProxy(nodeAddress);
-    try {
-      DiskBalancerWorkStatus workStatus = dataNode.queryDiskBalancerPlan();
-      System.out.printf("Plan File: %s%nPlan ID: %s%nResult: %s%n",
-              workStatus.getPlanFile(),
-              workStatus.getPlanID(),
-              workStatus.getResult().toString());
+    Collections.addAll(resultSet, nodes);
+    String outputLine = String.format(
+        "Get current status of the diskbalancer for DataNode(s). "
+            + "These DataNode(s) are parsed from '%s'.", nodeVal);
+    recordOutput(result, outputLine);
+    for (String nodeName : resultSet) {
+      // if the string is not name:port format use the default port.
+      String nodeAddress = nodeName;
+      if (!nodeName.matches("[^\\:]+:[0-9]{2,5}")) {
+        int defaultIPC = NetUtils.createSocketAddr(
+            getConf().getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
+                DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
+        nodeAddress = nodeName + ":" + defaultIPC;
+        LOG.debug("Using default data node port :  {}", nodeAddress);
+      }
 
-      if (cmd.hasOption(DiskBalancerCLI.VERBOSE)) {
-        System.out.printf("%s", workStatus.currentStateString());
+      ClientDatanodeProtocol dataNode = getDataNodeProxy(nodeAddress);
+      try {
+        DiskBalancerWorkStatus workStatus = dataNode.queryDiskBalancerPlan();
+        outputLine = String.format("DataNode: %s%nPlan File: %s%nPlan ID: 
%s%nResult: %s%n",
+            nodeAddress,
+            workStatus.getPlanFile(),
+            workStatus.getPlanID(),
+            workStatus.getResult().toString());
+        result.append(outputLine);
+        if (cmd.hasOption(DiskBalancerCLI.VERBOSE)) {
+          outputLine = String.format("%s", workStatus.currentStateString());
+          result.append(outputLine);
+        }
+        result.append(System.lineSeparator());
+      } catch (DiskBalancerException ex) {
+        LOG.error("Query plan failed by {}", nodeAddress, ex);
+        throw ex;
       }
-    } catch (DiskBalancerException ex) {
-      LOG.error("Query plan failed.", ex);
-      throw ex;
     }
+    getPrintStream().println(result.toString());

Review Comment:
   ```suggestion
       getPrintStream().println(result);
   ```



##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java:
##########
@@ -56,52 +66,72 @@ public QueryCommand(Configuration conf) {
   @Override
   public void execute(CommandLine cmd) throws Exception {
     LOG.info("Executing \"query plan\" command.");
+    TextStringBuilder result = new TextStringBuilder();
     Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.QUERY));
     verifyCommandOptions(DiskBalancerCLI.QUERY, cmd);
-    String nodeName = cmd.getOptionValue(DiskBalancerCLI.QUERY);
-    Preconditions.checkNotNull(nodeName);
-    nodeName = nodeName.trim();
-    String nodeAddress = nodeName;
-
-    // if the string is not name:port format use the default port.
-    if (!nodeName.matches("[^\\:]+:[0-9]{2,5}")) {
-      int defaultIPC = NetUtils.createSocketAddr(
-          getConf().getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
-              DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
-      nodeAddress = nodeName + ":" + defaultIPC;
-      LOG.debug("Using default data node port :  {}", nodeAddress);
+    String nodeVal = cmd.getOptionValue(DiskBalancerCLI.QUERY);
+    Preconditions.checkNotNull(nodeVal);
+    nodeVal = nodeVal.trim();
+    Set<String> resultSet = new TreeSet<>();
+    String[] nodes = nodeVal.split(",");
+    if (nodes.length == 0) {

Review Comment:
   `nodeVal.split(",").length` cannot be 0 even if `nodeVal` is an empty 
string. So this if condition will never be true.





> DiskBalancer: Query command support with multiple nodes
> -------------------------------------------------------
>
>                 Key: HDFS-17361
>                 URL: https://issues.apache.org/jira/browse/HDFS-17361
>             Project: Hadoop HDFS
>          Issue Type: Improvement
>          Components: datanode, diskbalancer
>            Reporter: Haiyang Hu
>            Assignee: Haiyang Hu
>            Priority: Major
>              Labels: pull-request-available
>
> For: https://issues.apache.org/jira/browse/HDFS-10821 mentioned, Query 
> command will support with multiple nodes.
> That means we can use command hdfs diskbalancer -query to print one or one 
> more datanodes status of the diskbalancer.



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org

Reply via email to