This is an automated email from the ASF dual-hosted git repository.

sodonnell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 784c89865b HDDS-9239. Ozone cli command to get container info should 
deal with empty values for --json (#5367)
784c89865b is described below

commit 784c89865be57313ca009b28fc76c580f8e6538b
Author: Tejaskriya <[email protected]>
AuthorDate: Thu Sep 28 14:56:18 2023 +0530

    HDDS-9239. Ozone cli command to get container info should deal with empty 
values for --json (#5367)
---
 .../apache/hadoop/hdds/scm/pipeline/Pipeline.java  |   6 +-
 .../hdds/scm/cli/container/InfoSubcommand.java     | 101 ++++++++++++++++++++-
 2 files changed, 102 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
index de0c1e932c..42aff0678a 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
@@ -33,6 +33,7 @@ import java.util.Optional;
 import java.util.Set;
 import java.util.UUID;
 
+import com.fasterxml.jackson.annotation.JsonIgnore;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.lang3.builder.EqualsBuilder;
 import org.apache.commons.lang3.builder.HashCodeBuilder;
@@ -191,6 +192,7 @@ public final class Pipeline {
    *
    * @return Set of DatanodeDetails
    */
+  @JsonIgnore
   public Set<DatanodeDetails> getNodeSet() {
     return Collections.unmodifiableSet(nodeStatus.keySet());
   }
@@ -282,10 +284,12 @@ public final class Pipeline {
         "All nodes are excluded: Pipeline=%s, excluded=%s", id, excluded));
   }
 
+  @JsonIgnore
   public boolean isClosed() {
     return state == PipelineState.CLOSED;
   }
 
+  @JsonIgnore
   public boolean isOpen() {
     return state == PipelineState.OPEN;
   }
@@ -317,7 +321,7 @@ public final class Pipeline {
 
   public boolean isHealthy() {
     // EC pipelines are not reported by the DN and do not have a leader. If a
-    // node goes stale or dead, EC pipelines will by closed like RATIS 
pipelines
+    // node goes stale or dead, EC pipelines will be closed like RATIS 
pipelines
     // but at the current time there are not other health metrics for EC.
     if (replicationConfig.getReplicationType() == ReplicationType.EC) {
       return true;
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
index 7f5f48fea1..f045ec63bc 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
@@ -20,11 +20,17 @@ package org.apache.hadoop.hdds.scm.cli.container;
 import java.io.IOException;
 import java.util.Comparator;
 import java.util.List;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.ArrayList;
+import java.time.Instant;
 import java.util.stream.Collectors;
 
 import org.apache.hadoop.hdds.cli.GenericParentCommand;
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.cli.ScmSubcommand;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
@@ -34,6 +40,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.server.JsonUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -80,10 +87,17 @@ public class InfoSubcommand extends ScmSubcommand {
     }
 
     if (json) {
-      ContainerWithPipelineAndReplicas wrapper =
-          new ContainerWithPipelineAndReplicas(container.getContainerInfo(),
-              container.getPipeline(), replicas);
-      LOG.info(JsonUtils.toJsonStringWithDefaultPrettyPrinter(wrapper));
+      if (container.getPipeline().size() != 0) {
+        ContainerWithPipelineAndReplicas wrapper =
+            new ContainerWithPipelineAndReplicas(container.getContainerInfo(),
+                container.getPipeline(), replicas);
+        LOG.info(JsonUtils.toJsonStringWithDefaultPrettyPrinter(wrapper));
+      } else {
+        ContainerWithoutDatanodes wrapper =
+            new ContainerWithoutDatanodes(container.getContainerInfo(),
+                container.getPipeline(), replicas);
+        LOG.info(JsonUtils.toJsonStringWithDefaultPrettyPrinter(wrapper));
+      }
     } else {
       // Print container report info.
       LOG.info("Container id: {}", containerID);
@@ -155,4 +169,83 @@ public class InfoSubcommand extends ScmSubcommand {
       return replicas;
     }
   }
+
+  private static class ContainerWithoutDatanodes {
+
+    private ContainerInfo containerInfo;
+    private PipelineWithoutDatanodes pipeline;
+    private List<ContainerReplicaInfo> replicas;
+
+    ContainerWithoutDatanodes(ContainerInfo container, Pipeline pipeline,
+                                     List<ContainerReplicaInfo> replicas) {
+      this.containerInfo = container;
+      this.pipeline = new PipelineWithoutDatanodes(pipeline);
+      this.replicas = replicas;
+    }
+
+    public ContainerInfo getContainerInfo() {
+      return containerInfo;
+    }
+
+    public PipelineWithoutDatanodes getPipeline() {
+      return pipeline;
+    }
+
+    public List<ContainerReplicaInfo> getReplicas() {
+      return replicas;
+    }
+  }
+
+  // All Pipeline information except the ones dependent on datanodes
+  private static final class PipelineWithoutDatanodes {
+    private final PipelineID id;
+    private final ReplicationConfig replicationConfig;
+    private final Pipeline.PipelineState state;
+    private Instant creationTimestamp;
+    private Map<DatanodeDetails, Long> nodeStatus;
+
+    private PipelineWithoutDatanodes(Pipeline pipeline) {
+      this.id = pipeline.getId();
+      this.replicationConfig = pipeline.getReplicationConfig();
+      this.state = pipeline.getPipelineState();
+      this.creationTimestamp = pipeline.getCreationTimestamp();
+      this.nodeStatus = new HashMap<>(); // All DNs down
+    }
+
+    public PipelineID getId() {
+      return id;
+    }
+
+    public ReplicationConfig getReplicationConfig() {
+      return replicationConfig;
+    }
+
+    public Pipeline.PipelineState getPipelineState() {
+      return state;
+    }
+
+    public Instant getCreationTimestamp() {
+      return creationTimestamp;
+    }
+
+    public HddsProtos.ReplicationType getType() {
+      return replicationConfig.getReplicationType();
+    }
+
+    public boolean isEmpty() {
+      return nodeStatus.isEmpty();
+    }
+
+    public List<DatanodeDetails> getNodes() {
+      return new ArrayList<>(nodeStatus.keySet());
+    }
+
+    public boolean isAllocationTimeout() {
+      return false;
+    }
+
+    public boolean isHealthy() {
+      return false; // leaderId is always null, So pipeline is unhealthy
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to