This is an automated email from the ASF dual-hosted git repository.

sumitagrawal pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 6486be886c HDDS-10514. Recon - Provide DN decommissioning detailed 
status and info inline with current CLI command output. (#6376)
6486be886c is described below

commit 6486be886c04df9e025227780bd6512766d7da29
Author: Devesh Kumar Singh <[email protected]>
AuthorDate: Sun May 19 21:54:46 2024 +0530

    HDDS-10514. Recon - Provide DN decommissioning detailed status and info 
inline with current CLI command output. (#6376)
---
 .../hadoop/hdds/client/DecommissionUtils.java      | 153 +++++++++++++++++++++
 .../cli/datanode/DecommissionStatusSubCommand.java |  50 ++-----
 .../hadoop/ozone/recon/ReconServerConfigKeys.java  |   1 +
 .../hadoop/ozone/recon/api/NodeEndpoint.java       | 127 ++++++++++++++++-
 .../ozone/recon/api/types/DatanodeMetrics.java     |  81 +++++++++++
 .../api/types/DecommissionStatusInfoResponse.java  |  73 ++++++++++
 .../hadoop/ozone/recon/api/TestEndpoints.java      | 153 ++++++++++++++++++++-
 7 files changed, 593 insertions(+), 45 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java
new file mode 100644
index 0000000000..7d5b610b08
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.client;
+
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.base.Strings;
+import jakarta.annotation.Nullable;
+import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.hdds.annotation.InterfaceStability;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+/**
+ * Decommission specific stateless utility functions.
+ */
[email protected]
[email protected]
+public final class DecommissionUtils {
+
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(DecommissionUtils.class);
+
+  private DecommissionUtils() {
+  }
+
+  /**
+   * Returns the list of uuid or ipAddress matching decommissioning status 
nodes.
+   *
+   * @param allNodes All datanodes which are in decommissioning status.
+   * @param uuid node uuid.
+   * @param ipAddress node ipAddress
+   * @return the list of uuid or ipAddress matching decommissioning status 
nodes.
+   */
+  public static List<HddsProtos.Node> 
getDecommissioningNodesList(Stream<HddsProtos.Node> allNodes,
+                                                                  String uuid,
+                                                                  String 
ipAddress) {
+    List<HddsProtos.Node> decommissioningNodes;
+    if (!Strings.isNullOrEmpty(uuid)) {
+      decommissioningNodes = allNodes.filter(p -> p.getNodeID().getUuid()
+          .equals(uuid)).collect(Collectors.toList());
+    } else if (!Strings.isNullOrEmpty(ipAddress)) {
+      decommissioningNodes = allNodes.filter(p -> p.getNodeID().getIpAddress()
+          .compareToIgnoreCase(ipAddress) == 0).collect(Collectors.toList());
+    } else {
+      decommissioningNodes = allNodes.collect(Collectors.toList());
+    }
+    return decommissioningNodes;
+  }
+
+  /**
+   * Returns Json node of datanode metrics.
+   *
+   * @param metricsJson
+   * @return Json node of datanode metrics
+   * @throws IOException
+   */
+  public static JsonNode getBeansJsonNode(String metricsJson) throws 
IOException {
+    JsonNode jsonNode;
+    ObjectMapper objectMapper = new ObjectMapper();
+    JsonFactory factory = objectMapper.getFactory();
+    JsonParser parser = factory.createParser(metricsJson);
+    jsonNode = (JsonNode) objectMapper.readTree(parser).get("beans").get(0);
+    return jsonNode;
+  }
+
+  /**
+   * Returns the number of decommissioning nodes.
+   *
+   * @param jsonNode
+   * @return
+   */
+  public static int getNumDecomNodes(JsonNode jsonNode) {
+    int numDecomNodes;
+    JsonNode totalDecom = jsonNode.get("DecommissioningMaintenanceNodesTotal");
+    numDecomNodes = (totalDecom == null ? -1 : 
Integer.parseInt(totalDecom.toString()));
+    return numDecomNodes;
+  }
+
+  /**
+   * Returns the counts of following info attributes.
+   *  - decommissionStartTime
+   *  - numOfUnclosedPipelines
+   *  - numOfUnderReplicatedContainers
+   *  - numOfUnclosedContainers
+   *
+   * @param datanode
+   * @param counts
+   * @param numDecomNodes
+   * @param countsMap
+   * @param errMsg
+   * @return
+   * @throws IOException
+   */
+  @Nullable
+  public static Map<String, Object> getCountsMap(DatanodeDetails datanode, 
JsonNode counts, int numDecomNodes,
+                                                 Map<String, Object> 
countsMap, String errMsg)
+      throws IOException {
+    for (int i = 1; i <= numDecomNodes; i++) {
+      if (datanode.getHostName().equals(counts.get("tag.datanode." + 
i).asText())) {
+        JsonNode pipelinesDN = counts.get("PipelinesWaitingToCloseDN." + i);
+        JsonNode underReplicatedDN = counts.get("UnderReplicatedDN." + i);
+        JsonNode unclosedDN = counts.get("UnclosedContainersDN." + i);
+        JsonNode startTimeDN = counts.get("StartTimeDN." + i);
+        if (pipelinesDN == null || underReplicatedDN == null || unclosedDN == 
null || startTimeDN == null) {
+          throw new IOException(errMsg);
+        }
+
+        int pipelines = Integer.parseInt(pipelinesDN.toString());
+        double underReplicated = 
Double.parseDouble(underReplicatedDN.toString());
+        double unclosed = Double.parseDouble(unclosedDN.toString());
+        long startTime = Long.parseLong(startTimeDN.toString());
+        Date date = new Date(startTime);
+        DateFormat formatter = new SimpleDateFormat("dd/MM/yyyy hh:mm:ss z");
+        countsMap.put("decommissionStartTime", formatter.format(date));
+        countsMap.put("numOfUnclosedPipelines", pipelines);
+        countsMap.put("numOfUnderReplicatedContainers", underReplicated);
+        countsMap.put("numOfUnclosedContainers", unclosed);
+        return countsMap;
+      }
+    }
+    return null;
+  }
+}
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java
index b146d68a58..18ddbd086d 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java
@@ -17,12 +17,10 @@
  */
 package org.apache.hadoop.hdds.scm.cli.datanode;
 
-import com.fasterxml.jackson.core.JsonFactory;
-import com.fasterxml.jackson.core.JsonParser;
 import com.fasterxml.jackson.databind.JsonNode;
-import com.fasterxml.jackson.databind.ObjectMapper;
 import com.google.common.base.Strings;
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.client.DecommissionUtils;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.cli.ScmSubcommand;
@@ -32,11 +30,8 @@ import org.apache.hadoop.hdds.server.JsonUtils;
 import picocli.CommandLine;
 
 import java.io.IOException;
-import java.text.DateFormat;
-import java.text.SimpleDateFormat;
 import java.util.LinkedHashMap;
 import java.util.ArrayList;
-import java.util.Date;
 import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
@@ -72,29 +67,25 @@ public class DecommissionStatusSubCommand extends 
ScmSubcommand {
 
   @Override
   public void execute(ScmClient scmClient) throws IOException {
-    List<HddsProtos.Node> decommissioningNodes;
     Stream<HddsProtos.Node> allNodes = scmClient.queryNode(DECOMMISSIONING,
         null, HddsProtos.QueryScope.CLUSTER, "").stream();
+    List<HddsProtos.Node> decommissioningNodes =
+        DecommissionUtils.getDecommissioningNodesList(allNodes, uuid, 
ipAddress);
     if (!Strings.isNullOrEmpty(uuid)) {
-      decommissioningNodes = allNodes.filter(p -> p.getNodeID().getUuid()
-          .equals(uuid)).collect(Collectors.toList());
       if (decommissioningNodes.isEmpty()) {
         System.err.println("Datanode: " + uuid + " is not in DECOMMISSIONING");
         return;
       }
     } else if (!Strings.isNullOrEmpty(ipAddress)) {
-      decommissioningNodes = allNodes.filter(p -> p.getNodeID().getIpAddress()
-          .compareToIgnoreCase(ipAddress) == 0).collect(Collectors.toList());
       if (decommissioningNodes.isEmpty()) {
         System.err.println("Datanode: " + ipAddress + " is not in " +
             "DECOMMISSIONING");
         return;
       }
     } else {
-      decommissioningNodes = allNodes.collect(Collectors.toList());
       if (!json) {
         System.out.println("\nDecommission Status: DECOMMISSIONING - " +
-                decommissioningNodes.size() + " node(s)");
+            decommissioningNodes.size() + " node(s)");
       }
     }
 
@@ -102,12 +93,8 @@ public class DecommissionStatusSubCommand extends 
ScmSubcommand {
     int numDecomNodes = -1;
     JsonNode jsonNode = null;
     if (metricsJson != null) {
-      ObjectMapper objectMapper = new ObjectMapper();
-      JsonFactory factory = objectMapper.getFactory();
-      JsonParser parser = factory.createParser(metricsJson);
-      jsonNode = (JsonNode) objectMapper.readTree(parser).get("beans").get(0);
-      JsonNode totalDecom = 
jsonNode.get("DecommissioningMaintenanceNodesTotal");
-      numDecomNodes = (totalDecom == null ? -1 : 
Integer.parseInt(totalDecom.toString()));
+      jsonNode = DecommissionUtils.getBeansJsonNode(metricsJson);
+      numDecomNodes = DecommissionUtils.getNumDecomNodes(jsonNode);
     }
 
     if (json) {
@@ -164,28 +151,9 @@ public class DecommissionStatusSubCommand extends 
ScmSubcommand {
     Map<String, Object> countsMap = new LinkedHashMap<>();
     String errMsg = getErrorMessage() + datanode.getHostName();
     try {
-      for (int i = 1; i <= numDecomNodes; i++) {
-        if (datanode.getHostName().equals(counts.get("tag.datanode." + 
i).asText())) {
-          JsonNode pipelinesDN = counts.get("PipelinesWaitingToCloseDN." + i);
-          JsonNode underReplicatedDN = counts.get("UnderReplicatedDN." + i);
-          JsonNode unclosedDN = counts.get("UnclosedContainersDN." + i);
-          JsonNode startTimeDN = counts.get("StartTimeDN." + i);
-          if (pipelinesDN == null || underReplicatedDN == null || unclosedDN 
== null || startTimeDN == null) {
-            throw new IOException(errMsg);
-          }
-
-          int pipelines = Integer.parseInt(pipelinesDN.toString());
-          double underReplicated = 
Double.parseDouble(underReplicatedDN.toString());
-          double unclosed = Double.parseDouble(unclosedDN.toString());
-          long startTime = Long.parseLong(startTimeDN.toString());
-          Date date = new Date(startTime);
-          DateFormat formatter = new SimpleDateFormat("dd/MM/yyyy hh:mm:ss z");
-          countsMap.put("decommissionStartTime", formatter.format(date));
-          countsMap.put("numOfUnclosedPipelines", pipelines);
-          countsMap.put("numOfUnderReplicatedContainers", underReplicated);
-          countsMap.put("numOfUnclosedContainers", unclosed);
-          return countsMap;
-        }
+      countsMap = DecommissionUtils.getCountsMap(datanode, counts, 
numDecomNodes, countsMap, errMsg);
+      if (countsMap != null) {
+        return countsMap;
       }
       System.err.println(errMsg);
     } catch (IOException e) {
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java
index ab87bda441..5c9e403963 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java
@@ -185,6 +185,7 @@ public final class  ReconServerConfigKeys {
 
   public static final int
       OZONE_RECON_SCM_CLIENT_FAILOVER_MAX_RETRY_DEFAULT = 3;
+
   /**
    * Private constructor for utility class.
    */
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
index d384c761dd..a0bcfd3025 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
@@ -18,7 +18,10 @@
 
 package org.apache.hadoop.ozone.recon.api;
 
+import com.fasterxml.jackson.databind.JsonNode;
 import com.google.common.base.Preconditions;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.client.DecommissionUtils;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
@@ -32,8 +35,10 @@ import 
org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
 import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.ozone.ClientVersion;
 import org.apache.hadoop.ozone.recon.api.types.DatanodeMetadata;
 import org.apache.hadoop.ozone.recon.api.types.DatanodePipeline;
 import org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport;
@@ -48,6 +53,7 @@ import javax.ws.rs.GET;
 import javax.ws.rs.PUT;
 import javax.ws.rs.Path;
 import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
 import javax.ws.rs.WebApplicationException;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
@@ -55,16 +61,21 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
 
 import org.apache.hadoop.ozone.recon.scm.ReconPipelineManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING;
+
 /**
  * Endpoint to fetch details about datanodes.
  */
@@ -78,14 +89,18 @@ public class NodeEndpoint {
   private ReconNodeManager nodeManager;
   private ReconPipelineManager pipelineManager;
   private ReconContainerManager reconContainerManager;
+  private StorageContainerLocationProtocol scmClient;
+  private String errorMessage = "Error getting pipeline and container metrics 
for ";
 
   @Inject
-  NodeEndpoint(OzoneStorageContainerManager reconSCM) {
+  NodeEndpoint(OzoneStorageContainerManager reconSCM,
+               StorageContainerLocationProtocol scmClient) {
     this.nodeManager =
         (ReconNodeManager) reconSCM.getScmNodeManager();
-    this.reconContainerManager = 
+    this.reconContainerManager =
         (ReconContainerManager) reconSCM.getContainerManager();
     this.pipelineManager = (ReconPipelineManager) 
reconSCM.getPipelineManager();
+    this.scmClient = scmClient;
   }
 
   /**
@@ -325,4 +340,112 @@ public class NodeEndpoint {
           }
         });
   }
+
+  /**
+   * This GET API provides the information of all datanodes for which 
decommissioning is initiated.
+   * @return the wrapped  Response output
+   */
+  @GET
+  @Path("/decommission/info")
+  public Response getDatanodesDecommissionInfo() {
+    try {
+      return getDecommissionStatusResponse(null, null);
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
+   * This GET API provides the information of a specific datanode for which 
decommissioning is initiated.
+   * API accepts both uuid or ipAddress, uuid will be given preference if both 
provided.
+   * @return the wrapped  Response output
+   */
+  @GET
+  @Path("/decommission/info/datanode")
+  public Response getDecommissionInfoForDatanode(@QueryParam("uuid") String 
uuid,
+                                                 @QueryParam("ipAddress") 
String ipAddress) {
+    if (StringUtils.isEmpty(uuid)) {
+      Preconditions.checkNotNull(ipAddress, "Either uuid or ipAddress of a 
datanode should be provided !!!");
+      Preconditions.checkArgument(!ipAddress.isEmpty(),
+          "Either uuid or ipAddress of a datanode should be provided !!!");
+    }
+    try {
+      return getDecommissionStatusResponse(uuid, ipAddress);
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private Response getDecommissionStatusResponse(String uuid, String 
ipAddress) throws IOException {
+    Response.ResponseBuilder builder = Response.status(Response.Status.OK);
+    Map<String, Object> responseMap = new HashMap<>();
+    Stream<HddsProtos.Node> allNodes = scmClient.queryNode(DECOMMISSIONING,
+        null, HddsProtos.QueryScope.CLUSTER, "", 
ClientVersion.CURRENT_VERSION).stream();
+    List<HddsProtos.Node> decommissioningNodes =
+        DecommissionUtils.getDecommissioningNodesList(allNodes, uuid, 
ipAddress);
+    String metricsJson = 
scmClient.getMetrics("Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics");
+    int numDecomNodes = -1;
+    JsonNode jsonNode = null;
+    if (metricsJson != null) {
+      jsonNode = DecommissionUtils.getBeansJsonNode(metricsJson);
+      numDecomNodes = DecommissionUtils.getNumDecomNodes(jsonNode);
+    }
+    List<Map<String, Object>> dnDecommissionInfo =
+        getDecommissioningNodesDetails(decommissioningNodes, jsonNode, 
numDecomNodes);
+    try {
+      responseMap.put("DatanodesDecommissionInfo", dnDecommissionInfo);
+      builder.entity(responseMap);
+      return builder.build();
+    } catch (Exception exception) {
+      LOG.error("Unexpected Error: {}", exception);
+      throw new WebApplicationException(exception, 
Response.Status.INTERNAL_SERVER_ERROR);
+    }
+  }
+
+  private List<Map<String, Object>> 
getDecommissioningNodesDetails(List<HddsProtos.Node> decommissioningNodes,
+                                                                   JsonNode 
jsonNode,
+                                                                   int 
numDecomNodes) throws IOException {
+    List<Map<String, Object>> decommissioningNodesDetails = new ArrayList<>();
+
+    for (HddsProtos.Node node : decommissioningNodes) {
+      DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf(
+          node.getNodeID());
+      Map<String, Object> datanodeMap = new LinkedHashMap<>();
+      datanodeMap.put("datanodeDetails", datanode);
+      datanodeMap.put("metrics", getCounts(datanode, jsonNode, numDecomNodes));
+      datanodeMap.put("containers", getContainers(datanode));
+      decommissioningNodesDetails.add(datanodeMap);
+    }
+    return decommissioningNodesDetails;
+  }
+
+  private Map<String, Object> getCounts(DatanodeDetails datanode, JsonNode 
counts, int numDecomNodes) {
+    Map<String, Object> countsMap = new LinkedHashMap<>();
+    String errMsg = getErrorMessage() + datanode.getHostName();
+    try {
+      countsMap = DecommissionUtils.getCountsMap(datanode, counts, 
numDecomNodes, countsMap, errMsg);
+      if (countsMap != null) {
+        return countsMap;
+      }
+      LOG.error(errMsg);
+    } catch (IOException e) {
+      LOG.error(errMsg + ": {} ", e);
+    }
+    return countsMap;
+  }
+
+  private Map<String, Object> getContainers(DatanodeDetails datanode)
+      throws IOException {
+    Map<String, List<ContainerID>> containers = 
scmClient.getContainersOnDecomNode(datanode);
+    return containers.entrySet().stream()
+        .collect(Collectors.toMap(
+            Map.Entry::getKey,
+            entry -> entry.getValue().stream().
+                map(ContainerID::toString).
+                collect(Collectors.toList())));
+  }
+
+  public String getErrorMessage() {
+    return errorMessage;
+  }
 }
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetrics.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetrics.java
new file mode 100644
index 0000000000..e2312e2fdb
--- /dev/null
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetrics.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.recon.api.types;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+/**
+ * Class that represents the datanode metrics captured during decommissioning.
+ */
+public class DatanodeMetrics {
+  /**
+   * Start time of decommission of datanode.
+   */
+  @JsonProperty("decommissionStartTime")
+  private String decommissionStartTime;
+
+  /**
+   * Number of pipelines in unclosed status.
+   */
+  @JsonProperty("numOfUnclosedPipelines")
+  private int numOfUnclosedPipelines;
+
+  /**
+   * Number of under replicated containers.
+   */
+  @JsonProperty("numOfUnderReplicatedContainers")
+  private double numOfUnderReplicatedContainers;
+
+  /**
+   * Number of containers still not closed.
+   */
+  @JsonProperty("numOfUnclosedContainers")
+  private double numOfUnclosedContainers;
+
+  public String getDecommissionStartTime() {
+    return decommissionStartTime;
+  }
+
+  public void setDecommissionStartTime(String decommissionStartTime) {
+    this.decommissionStartTime = decommissionStartTime;
+  }
+
+  public int getNumOfUnclosedPipelines() {
+    return numOfUnclosedPipelines;
+  }
+
+  public void setNumOfUnclosedPipelines(int numOfUnclosedPipelines) {
+    this.numOfUnclosedPipelines = numOfUnclosedPipelines;
+  }
+
+  public double getNumOfUnderReplicatedContainers() {
+    return numOfUnderReplicatedContainers;
+  }
+
+  public void setNumOfUnderReplicatedContainers(double 
numOfUnderReplicatedContainers) {
+    this.numOfUnderReplicatedContainers = numOfUnderReplicatedContainers;
+  }
+
+  public double getNumOfUnclosedContainers() {
+    return numOfUnclosedContainers;
+  }
+
+  public void setNumOfUnclosedContainers(double numOfUnclosedContainers) {
+    this.numOfUnclosedContainers = numOfUnclosedContainers;
+  }
+}
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java
new file mode 100644
index 0000000000..aab2a2789b
--- /dev/null
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.recon.api.types;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Class that represents the API Response of decommissioning status info of 
datanode.
+ */
+public class DecommissionStatusInfoResponse {
+  /**
+   * Metadata of a datanode when decommissioning of datanode is in progress.
+   */
+  @JsonProperty("datanodeDetails")
+  private DatanodeDetails dataNodeDetails;
+
+  /**
+   * Metrics of datanode when decommissioning of datanode is in progress.
+   */
+  @JsonProperty("metrics")
+  private DatanodeMetrics datanodeMetrics;
+
+  /**
+   * containers info of a datanode when decommissioning of datanode is in 
progress.
+   */
+  @JsonProperty("containers")
+  private Map<String, List<ContainerID>> containers;
+
+  public DatanodeDetails getDataNodeDetails() {
+    return dataNodeDetails;
+  }
+
+  public void setDataNodeDetails(DatanodeDetails dataNodeDetails) {
+    this.dataNodeDetails = dataNodeDetails;
+  }
+
+  public DatanodeMetrics getDatanodeMetrics() {
+    return datanodeMetrics;
+  }
+
+  public void setDatanodeMetrics(DatanodeMetrics datanodeMetrics) {
+    this.datanodeMetrics = datanodeMetrics;
+  }
+
+  public Map<String, List<ContainerID>> getContainers() {
+    return containers;
+  }
+
+  public void setContainers(
+      Map<String, List<ContainerID>> containers) {
+    this.containers = containers;
+  }
+}
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
index c6cce75324..2c3439cd19 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
@@ -102,6 +102,7 @@ import org.jooq.DSLContext;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.io.TempDir;
+import org.mockito.Mockito;
 
 import static 
org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
 import static 
org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.defaultLayoutVersionProto;
@@ -144,6 +145,7 @@ import java.nio.file.Path;
 import java.util.Arrays;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -194,11 +196,15 @@ public class TestEndpoints extends AbstractReconSqlDBTest 
{
   private static final String PROMETHEUS_TEST_RESPONSE_FILE =
       "prometheus-test-response.txt";
   private ReconUtils reconUtilsMock;
+  private StorageContainerLocationProtocol mockScmClient;
 
   private ContainerHealthSchemaManager containerHealthSchemaManager;
   private CommonUtils commonUtils;
   private PipelineManager pipelineManager;
   private ReconPipelineManager reconPipelineManager;
+  private List<HddsProtos.Node> nodes = getNodeDetails(2);
+  private Map<String, List<ContainerID>> containerOnDecom = 
getContainersOnDecomNodes();
+  private ArrayList<String> metrics = getMetrics();
 
   public TestEndpoints() {
     super();
@@ -236,8 +242,8 @@ public class TestEndpoints extends AbstractReconSqlDBTest {
     ContainerWithPipeline containerWithPipeline =
         new ContainerWithPipeline(containerInfo, pipeline);
 
-    StorageContainerLocationProtocol mockScmClient = mock(
-        StorageContainerLocationProtocol.class);
+    mockScmClient = mock(
+        StorageContainerLocationProtocol.class, Mockito.RETURNS_DEEP_STUBS);
     StorageContainerServiceProvider mockScmServiceProvider = mock(
         StorageContainerServiceProviderImpl.class);
     when(mockScmServiceProvider.getPipeline(
@@ -1313,4 +1319,147 @@ public class TestEndpoints extends 
AbstractReconSqlDBTest {
     DatanodeMetadata datanodeMetadata = datanodes.stream().findFirst().get();
     assertEquals(dnUUID, datanodeMetadata.getUuid());
   }
+
+  @Test
+  public void testSuccessWhenDecommissionStatus() throws IOException {
+    when(mockScmClient.queryNode(any(), any(), any(), any(), 
any(Integer.class))).thenReturn(
+        nodes); // 2 nodes decommissioning
+    
when(mockScmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom);
+    when(mockScmClient.getMetrics(any())).thenReturn(metrics.get(1));
+    Response datanodesDecommissionInfo = 
nodeEndpoint.getDatanodesDecommissionInfo();
+    Map<String, Object> responseMap = (Map<String, Object>) 
datanodesDecommissionInfo.getEntity();
+    List<Map<String, Object>> dnDecommissionInfo =
+        (List<Map<String, Object>>) 
responseMap.get("DatanodesDecommissionInfo");
+    DatanodeDetails datanode = (DatanodeDetails) 
dnDecommissionInfo.get(0).get("datanodeDetails");
+    Map<String, Object> dnMetrics = (Map<String, Object>) 
dnDecommissionInfo.get(0).get("metrics");
+    Map<String, Object> containers = (Map<String, Object>) 
dnDecommissionInfo.get(0).get("containers");
+    assertNotNull(datanode);
+    assertNotNull(dnMetrics);
+    assertNotNull(containers);
+    assertFalse(datanode.getUuidString().isEmpty());
+    assertFalse(((String) dnMetrics.get("decommissionStartTime")).isEmpty());
+    assertEquals(1, dnMetrics.get("numOfUnclosedPipelines"));
+    assertEquals(3.0, dnMetrics.get("numOfUnderReplicatedContainers"));
+    assertEquals(3.0, dnMetrics.get("numOfUnclosedContainers"));
+
+    assertEquals(3, ((List<String>) containers.get("UnderReplicated")).size());
+    assertEquals(3, ((List<String>) containers.get("UnClosed")).size());
+  }
+
+  @Test
+  public void testSuccessWhenDecommissionStatusWithUUID() throws IOException {
+    when(mockScmClient.queryNode(any(), any(), any(), any(), 
any(Integer.class))).thenReturn(
+        getNodeDetailsForUuid("654c4b89-04ef-4015-8a3b-50d0fb0e1684")); // 1 
nodes decommissioning
+    
when(mockScmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom);
+    Response datanodesDecommissionInfo =
+        
nodeEndpoint.getDecommissionInfoForDatanode("654c4b89-04ef-4015-8a3b-50d0fb0e1684",
 "");
+    Map<String, Object> responseMap = (Map<String, Object>) 
datanodesDecommissionInfo.getEntity();
+    List<Map<String, Object>> dnDecommissionInfo =
+        (List<Map<String, Object>>) 
responseMap.get("DatanodesDecommissionInfo");
+    DatanodeDetails datanode = (DatanodeDetails) 
dnDecommissionInfo.get(0).get("datanodeDetails");
+    Map<String, Object> containers = (Map<String, Object>) 
dnDecommissionInfo.get(0).get("containers");
+    assertNotNull(datanode);
+    assertNotNull(containers);
+    assertFalse(datanode.getUuidString().isEmpty());
+    assertEquals("654c4b89-04ef-4015-8a3b-50d0fb0e1684", 
datanode.getUuidString());
+
+    assertEquals(3, ((List<String>) containers.get("UnderReplicated")).size());
+    assertEquals(3, ((List<String>) containers.get("UnClosed")).size());
+  }
+
+  private List<HddsProtos.Node> getNodeDetailsForUuid(String uuid) {
+    List<HddsProtos.Node> nodesList = new ArrayList<>();
+
+    HddsProtos.DatanodeDetailsProto.Builder dnd =
+        HddsProtos.DatanodeDetailsProto.newBuilder();
+    dnd.setHostName("hostName");
+    dnd.setIpAddress("1.2.3.5");
+    dnd.setNetworkLocation("/default");
+    dnd.setNetworkName("hostName");
+    dnd.addPorts(HddsProtos.Port.newBuilder()
+        .setName("ratis").setValue(5678).build());
+    dnd.setUuid(uuid);
+
+    HddsProtos.Node.Builder builder = HddsProtos.Node.newBuilder();
+    builder.addNodeOperationalStates(
+        HddsProtos.NodeOperationalState.DECOMMISSIONING);
+    builder.addNodeStates(HddsProtos.NodeState.HEALTHY);
+    builder.setNodeID(dnd.build());
+    nodesList.add(builder.build());
+    return nodesList;
+  }
+
+  private List<HddsProtos.Node> getNodeDetails(int n) {
+    List<HddsProtos.Node> nodesList = new ArrayList<>();
+
+    for (int i = 0; i < n; i++) {
+      HddsProtos.DatanodeDetailsProto.Builder dnd =
+          HddsProtos.DatanodeDetailsProto.newBuilder();
+      dnd.setHostName("host" + i);
+      dnd.setIpAddress("1.2.3." + i + 1);
+      dnd.setNetworkLocation("/default");
+      dnd.setNetworkName("host" + i);
+      dnd.addPorts(HddsProtos.Port.newBuilder()
+          .setName("ratis").setValue(5678).build());
+      dnd.setUuid(UUID.randomUUID().toString());
+
+      HddsProtos.Node.Builder builder  = HddsProtos.Node.newBuilder();
+      builder.addNodeOperationalStates(
+          HddsProtos.NodeOperationalState.DECOMMISSIONING);
+      builder.addNodeStates(HddsProtos.NodeState.HEALTHY);
+      builder.setNodeID(dnd.build());
+      nodesList.add(builder.build());
+    }
+    return nodesList;
+  }
+
+  private Map<String, List<ContainerID>> getContainersOnDecomNodes() {
+    Map<String, List<ContainerID>> containerMap = new HashMap<>();
+    List<ContainerID> underReplicated = new ArrayList<>();
+    underReplicated.add(new ContainerID(1L));
+    underReplicated.add(new ContainerID(2L));
+    underReplicated.add(new ContainerID(3L));
+    containerMap.put("UnderReplicated", underReplicated);
+    List<ContainerID> unclosed = new ArrayList<>();
+    unclosed.add(new ContainerID(10L));
+    unclosed.add(new ContainerID(11L));
+    unclosed.add(new ContainerID(12L));
+    containerMap.put("UnClosed", unclosed);
+    return containerMap;
+  }
+
+  private ArrayList<String> getMetrics() {
+    ArrayList<String> result = new ArrayList<>();
+    // no nodes decommissioning
+    result.add("{  \"beans\" : [ {    " +
+        "\"name\" : 
\"Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics\",    " +
+        "\"modelerType\" : \"NodeDecommissionMetrics\",    
\"DecommissioningMaintenanceNodesTotal\" : 0,    " +
+        "\"RecommissionNodesTotal\" : 0,    \"PipelinesWaitingToCloseTotal\" : 
0,    " +
+        "\"ContainersUnderReplicatedTotal\" : 0,    
\"ContainersUnClosedTotal\" : 0,    " +
+        "\"ContainersSufficientlyReplicatedTotal\" : 0  } ]}");
+    // 2 nodes in decommisioning
+    result.add("{  \"beans\" : [ {    " +
+        "\"name\" : 
\"Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics\",    " +
+        "\"modelerType\" : \"NodeDecommissionMetrics\",    
\"DecommissioningMaintenanceNodesTotal\" : 2,    " +
+        "\"RecommissionNodesTotal\" : 0,    \"PipelinesWaitingToCloseTotal\" : 
2,    " +
+        "\"ContainersUnderReplicatedTotal\" : 6,    
\"ContainersUnclosedTotal\" : 6,    " +
+        "\"ContainersSufficientlyReplicatedTotal\" : 10,   " +
+        "\"tag.datanode.1\" : \"host0\",    \"tag.Hostname.1\" : \"host0\",    
" +
+        "\"PipelinesWaitingToCloseDN.1\" : 1,    \"UnderReplicatedDN.1\" : 3,  
  " +
+        "\"SufficientlyReplicatedDN.1\" : 0,    \"UnclosedContainersDN.1\" : 
3,    \"StartTimeDN.1\" : 111211,    " +
+        "\"tag.datanode.2\" : \"host1\",    \"tag.Hostname.2\" : \"host1\",    
" +
+        "\"PipelinesWaitingToCloseDN.2\" : 1,    \"UnderReplicatedDN.2\" : 3,  
  " +
+        "\"SufficientlyReplicatedDN.2\" : 0,    \"UnclosedContainersDN.2\" : 
3,    \"StartTimeDN.2\" : 221221} ]}");
+    // only host 1 decommissioning
+    result.add("{  \"beans\" : [ {    " +
+        "\"name\" : 
\"Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics\",    " +
+        "\"modelerType\" : \"NodeDecommissionMetrics\",    
\"DecommissioningMaintenanceNodesTotal\" : 1,    " +
+        "\"RecommissionNodesTotal\" : 0,    \"PipelinesWaitingToCloseTotal\" : 
1,    " +
+        "\"ContainersUnderReplicatedTotal\" : 3,    
\"ContainersUnclosedTotal\" : 3,    " +
+        "\"ContainersSufficientlyReplicatedTotal\" : 10,   " +
+        "\"tag.datanode.1\" : \"host0\",\n    \"tag.Hostname.1\" : 
\"host0\",\n    " +
+        "\"PipelinesWaitingToCloseDN.1\" : 1,\n    \"UnderReplicatedDN.1\" : 
3,\n    " +
+        "\"SufficientlyReplicatedDN.1\" : 0,\n    \"UnclosedContainersDN.1\" : 
3,    \"StartTimeDN.1\" : 221221} ]}");
+    return result;
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to