adoroszlai commented on code in PR #7266:
URL: https://github.com/apache/ozone/pull/7266#discussion_r2111331667
##########
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java:
##########
@@ -489,4 +490,22 @@ DecommissionScmResponseProto decommissionScm(
String scmId) throws IOException;
String getMetrics(String query) throws IOException;
+
+ /**
+ * Get getVolumeInfos based on query conditions.
+ *
+ * @param displayMode Represents the mode for displaying volumes.
+ * Options include "all" for all volumes, "failed" for failed volumes,
+ * and "normal" for normal volumes.
+ * @param uuid datanode uuid String.
+ * @param hostName datanode hostName String.
+ * @param pageSize Records displayed per page.
+ * @param startItem The starting point for pagination.
+ * @return Volume Information List.
+ * @throws IOException
+ * I/O exceptions that may occur during the process of querying the volume.
+ */
+ StorageContainerLocationProtocolProtos.GetVolumeInfosResponseProto
getVolumeInfos(
Review Comment:
nit: please import `GetVolumeInfosResponseProto` instead of
`StorageContainerLocationProtocolProtos`.
##########
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/datanode/VolumeInfo.java:
##########
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.datanode;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.google.common.base.Preconditions;
+import org.apache.commons.lang3.builder.CompareToBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.hdds.utils.db.DelegatedCodec;
+import org.apache.hadoop.hdds.utils.db.Proto2Codec;
+
+/**
+ * This class is used to record disk failure conditions.
+ * The failureTime may be 0, and capacityLost may be 0, because if the DN
restarts,
+ * we will not know the original capacity of the failed disk.
+ */
+public final class VolumeInfo implements Comparable<VolumeInfo> {
+
+ private String uuid;
+ private String hostName;
+ private String volumeName;
+ private boolean failed;
+ private long failureTime;
+ private long capacity;
+
+ private static final Codec<VolumeInfo> CODEC = new DelegatedCodec<>(
+ Proto2Codec.get(HddsProtos.VolumeInfoProto.getDefaultInstance()),
+ VolumeInfo::fromProtobuf,
+ VolumeInfo::getProtobuf,
+ VolumeInfo.class);
+
+ public static Codec<VolumeInfo> getCodec() {
+ return CODEC;
+ }
+
+ private VolumeInfo(VolumeInfo.Builder b) {
+ this.uuid = b.uuid;
+ this.volumeName = b.volumeName;
+ this.failureTime = b.failureTime;
+ this.hostName = b.hostName;
+ this.failed = b.failed;
+ this.capacity = b.capacity;
+ }
+
+ public static VolumeInfo fromProtobuf(HddsProtos.VolumeInfoProto info) {
+ VolumeInfo.Builder builder = new VolumeInfo.Builder();
+ builder.setUuid(info.getUuid())
+ .setHostName(info.getHostName())
+ .setFailed(info.getFailed())
+ .setVolumeName(info.getVolumeName())
+ .setFailureTime(info.getFailureTime())
+ .setCapacity(info.getCapacity());
+ return builder.build();
+ }
+
+ @JsonIgnore
+ public HddsProtos.VolumeInfoProto getProtobuf() {
+ HddsProtos.VolumeInfoProto.Builder builder =
+ HddsProtos.VolumeInfoProto.newBuilder();
+ builder.setUuid(getUuid())
+ .setHostName(getHostName())
+ .setFailed(isFailed())
+ .setVolumeName(getVolumeName())
+ .setFailureTime(getFailureTime())
+ .setCapacity(getCapacity());
+ return builder.build();
+ }
+
+ /**
+ * Builder class for creating an instance of a complex object.
+ * <p>
+ * This Builder provides a fluent interface for gradually setting
+ * the object's properties. Finally, the build() method is used
+ * to create the object.
+ * </p>
+ */
+ public static class Builder {
+ private String uuid;
+ private String hostName;
+ private boolean failed;
+ private String volumeName;
+ private long failureTime;
+ private long capacity;
+
+ public VolumeInfo.Builder setUuid(String pUuid) {
+ this.uuid = pUuid;
+ return this;
+ }
+
+ public VolumeInfo.Builder setHostName(String pHostName) {
+ this.hostName = pHostName;
+ return this;
+ }
+
+ public VolumeInfo.Builder setFailed(boolean pFailed) {
+ this.failed = pFailed;
+ return this;
+ }
+
+ public VolumeInfo.Builder setVolumeName(String pVolumeName) {
+ this.volumeName = pVolumeName;
+ return this;
+ }
+
+ public VolumeInfo.Builder setFailureTime(long pFailureTime) {
+ this.failureTime = pFailureTime;
+ return this;
+ }
+
+ public VolumeInfo.Builder setCapacity(long pCapacity) {
+ this.capacity = pCapacity;
+ return this;
+ }
+
+ public VolumeInfo build() {
+ return new VolumeInfo(this);
+ }
+ }
+
+ public String getUuid() {
+ return uuid;
+ }
+
+ public String getVolumeName() {
+ return volumeName;
+ }
+
+ public long getFailureTime() {
+ return failureTime;
+ }
+
+ public long getCapacity() {
+ return capacity;
+ }
+
+ public String getHostName() {
+ return hostName;
+ }
+
+ public boolean isFailed() {
+ return failed;
+ }
+
+ @Override
+ public int compareTo(VolumeInfo that) {
+ Preconditions.checkNotNull(that);
Review Comment:
nit: prefer builtin `Objects.requireNonNull`
##########
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/datanode/VolumeInfo.java:
##########
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.datanode;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.google.common.base.Preconditions;
+import org.apache.commons.lang3.builder.CompareToBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.hdds.utils.db.DelegatedCodec;
+import org.apache.hadoop.hdds.utils.db.Proto2Codec;
+
+/**
+ * This class is used to record disk failure conditions.
+ * The failureTime may be 0, and capacityLost may be 0, because if the DN
restarts,
+ * we will not know the original capacity of the failed disk.
+ */
+public final class VolumeInfo implements Comparable<VolumeInfo> {
+
+ private String uuid;
+ private String hostName;
+ private String volumeName;
+ private boolean failed;
+ private long failureTime;
+ private long capacity;
+
+ private static final Codec<VolumeInfo> CODEC = new DelegatedCodec<>(
+ Proto2Codec.get(HddsProtos.VolumeInfoProto.getDefaultInstance()),
+ VolumeInfo::fromProtobuf,
+ VolumeInfo::getProtobuf,
+ VolumeInfo.class);
+
+ public static Codec<VolumeInfo> getCodec() {
+ return CODEC;
+ }
Review Comment:
Codec is required only for storing in DB, but `VolumeInfo` does not seem to
be persisted by either datanode or SCM. So I think this can be removed.
##########
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/datanode/VolumeInfo.java:
##########
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.datanode;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.google.common.base.Preconditions;
+import org.apache.commons.lang3.builder.CompareToBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.hdds.utils.db.DelegatedCodec;
+import org.apache.hadoop.hdds.utils.db.Proto2Codec;
+
+/**
+ * This class is used to record disk failure conditions.
+ * The failureTime may be 0, and capacityLost may be 0, because if the DN
restarts,
+ * we will not know the original capacity of the failed disk.
+ */
+public final class VolumeInfo implements Comparable<VolumeInfo> {
Review Comment:
nit: please add this class in existing package
`org.apache.hadoop.hdds.protocol` (which contains other datanode-related
classes like `DatanodeID` and `DatanodeDetails`)
##########
hadoop-ozone/cli-admin/src/test/java/org/apache/hadoop/ozone/scm/TestVolumeCommand.java:
##########
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.scm;
Review Comment:
Should be in `org.apache.hadoop.hdds.scm.cli.datanode` to match the class
being tested?
##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java:
##########
@@ -382,6 +384,8 @@ public void removeVolume(String volumeRoot) throws
IOException {
LOG.info("Removed Volume : {} from VolumeSet", volumeRoot);
} else if (failedVolumeMap.containsKey(volumeRoot)) {
+ StorageVolume storageVolume = failedVolumeMap.get(volumeRoot);
+ storageVolume.resetFailureTime();
failedVolumeMap.remove(volumeRoot);
Review Comment:
nit: can `remove` instead of `get`+`remove`
##########
hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/VolumeSubCommand.java:
##########
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.cli.datanode;
+
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Locale;
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.VolumeInfoProto;
+import
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetVolumeInfosResponseProto;
+import org.apache.hadoop.hdds.scm.cli.ScmSubcommand;
+import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.hdds.scm.datanode.VolumeInfo;
+import org.apache.hadoop.hdds.server.JsonUtils;
+import org.apache.hadoop.ozone.shell.ListPaginationOptions;
+import org.apache.hadoop.ozone.utils.FormattingCLIUtils;
+import org.apache.hadoop.util.StringUtils;
+import picocli.CommandLine;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+
+/**
+ * We provide a set of volume commands to display the disk information of the
DataNode.
+ */
+@Command(
+ name = "volumes",
+ description = "Display the list of volumes on the DataNode.",
+ mixinStandardHelpOptions = true,
+ versionProvider = HddsVersionProvider.class)
+public class VolumeSubCommand extends ScmSubcommand {
+
+ private static final String DATANODE_VOLUME_FAILURES_TITLE = "Datanode
Volume";
+ private static final List<String> DATANODE_VOLUME_FAILURES_HEADER =
Arrays.asList(
+ "UUID", "Host Name", "Volume Name", "Volume Status", "Capacity /
Capacity Lost", "Failure Time");
+
+ private SimpleDateFormat sdf = new SimpleDateFormat(
+ "EEE MMM dd HH:mm:ss Z yyyy", Locale.ENGLISH);
+
+ /**
+ * We have designed a new option called 'show',
+ * which includes two selectable configurations:
+ * 'failed' is used to display failed disks,
+ * and 'normal' is used to display normal disks.
+ */
+ @Option(names = { "--displayMode" },
+ defaultValue = "all",
+ description = "Display mode for disks: 'failed' shows failed disks, " +
+ "'normal' shows healthy disks, 'all' shows all disks.")
+ private DISPLAYMODE displayMode;
Review Comment:
On another look, I think "display mode" is confusing. JSON and Table are
display modes. "all/normal/failed" filter the list by volume state.
I suggest renaming to `--state`, and renaming `normal` to `healthy`.
Then description can be simplified to `Filter disks by state`.
##########
hadoop-ozone/cli-admin/src/test/java/org/apache/hadoop/ozone/scm/TestVolumeCommand.java:
##########
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.scm;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import java.io.UnsupportedEncodingException;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetVolumeInfosResponseProto;
+import org.apache.hadoop.hdds.scm.cli.datanode.VolumeSubCommand;
+import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.hdds.scm.datanode.VolumeInfo;
+import org.apache.hadoop.util.Time;
+import org.apache.ozone.test.GenericTestUtils;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import picocli.CommandLine;
+
+/**
+ * This unit test is used to verify whether the output of
+ * `TestVolumeFailureSubCommand` meets the expected results.
Review Comment:
`TestVolumeFailureSubCommand` does not exist (should be `VolumeSubCommand`?)
##########
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java:
##########
@@ -155,16 +158,63 @@ public void updateStorageReports(List<StorageReportProto>
reports) {
.filter(e -> e.hasFailed() && e.getFailed())
.count();
+ // We choose to update the status of failed disks during the heartbeat,
+ // so we can directly retrieve it when querying.
+ List<VolumeInfoProto> volumeInfoLists = new ArrayList<>();
+ for (StorageReportProto report : reports) {
+
+ String storageLocation = report.getStorageLocation();
+ long capacity = report.getCapacity();
+ String uuid = getUuidString();
+ String hostName = getHostName();
+
+ long failureTime = -1;
+ if (report.hasFailureTime()) {
+ failureTime = report.getFailureTime();
+ }
+
+ boolean failed = false;
+ if (report.hasFailed() && report.getFailed()) {
+ failed = true;
+ }
+
+ VolumeInfoProto volumeFailure =
+ VolumeInfoProto.newBuilder().
+ setUuid(uuid).
+ setHostName(hostName).
+ setVolumeName(storageLocation).
+ setCapacity(capacity).
+ setFailed(failed).
+ setFailureTime(failureTime).
+ build();
+ volumeInfoLists.add(volumeFailure);
+ }
+
Review Comment:
Please wrap `volumeInfoLists` in an `unmodifiableList` after all items are
added.
##########
hadoop-hdds/interface-client/src/main/proto/hdds.proto:
##########
@@ -304,6 +304,15 @@ message RemoveScmResponseProto {
optional string scmId = 2;
}
+message VolumeInfoProto {
+ optional string uuid = 1;
Review Comment:
Please use `DatanodeIDProto`.
##########
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java:
##########
@@ -489,4 +490,22 @@ DecommissionScmResponseProto decommissionScm(
String scmId) throws IOException;
String getMetrics(String query) throws IOException;
+
+ /**
+ * Get getVolumeInfos based on query conditions.
Review Comment:
nit: `Get getVolumeInfos` does not sound right
##########
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/datanode/VolumeInfo.java:
##########
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.datanode;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.google.common.base.Preconditions;
+import org.apache.commons.lang3.builder.CompareToBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.hdds.utils.db.DelegatedCodec;
+import org.apache.hadoop.hdds.utils.db.Proto2Codec;
+
+/**
+ * This class is used to record disk failure conditions.
+ * The failureTime may be 0, and capacityLost may be 0, because if the DN
restarts,
+ * we will not know the original capacity of the failed disk.
+ */
+public final class VolumeInfo implements Comparable<VolumeInfo> {
+
+ private String uuid;
Review Comment:
Please use `DatanodeID`.
##########
hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/VolumeSubCommand.java:
##########
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.cli.datanode;
+
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Locale;
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.VolumeInfoProto;
+import
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetVolumeInfosResponseProto;
+import org.apache.hadoop.hdds.scm.cli.ScmSubcommand;
+import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.hdds.scm.datanode.VolumeInfo;
+import org.apache.hadoop.hdds.server.JsonUtils;
+import org.apache.hadoop.ozone.shell.ListPaginationOptions;
+import org.apache.hadoop.ozone.utils.FormattingCLIUtils;
+import org.apache.hadoop.util.StringUtils;
+import picocli.CommandLine;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+
+/**
+ * We provide a set of volume commands to display the disk information of the
DataNode.
+ */
+@Command(
+ name = "volumes",
+ description = "Display the list of volumes on the DataNode.",
+ mixinStandardHelpOptions = true,
+ versionProvider = HddsVersionProvider.class)
+public class VolumeSubCommand extends ScmSubcommand {
+
+ private static final String DATANODE_VOLUME_FAILURES_TITLE = "Datanode
Volume";
+ private static final List<String> DATANODE_VOLUME_FAILURES_HEADER =
Arrays.asList(
+ "UUID", "Host Name", "Volume Name", "Volume Status", "Capacity /
Capacity Lost", "Failure Time");
+
+ private SimpleDateFormat sdf = new SimpleDateFormat(
+ "EEE MMM dd HH:mm:ss Z yyyy", Locale.ENGLISH);
+
+ /**
+ * We have designed a new option called 'show',
+ * which includes two selectable configurations:
+ * 'failed' is used to display failed disks,
+ * and 'normal' is used to display normal disks.
+ */
+ @Option(names = { "--displayMode" },
+ defaultValue = "all",
+ description = "Display mode for disks: 'failed' shows failed disks, " +
+ "'normal' shows healthy disks, 'all' shows all disks.")
+ private DISPLAYMODE displayMode;
+
+ // The UUID identifier of the DataNode.
+ @Option(names = { "--uuid" },
+ defaultValue = "",
+ description = "Filter disks by the UUID of the DataNode.")
+ private String uuid;
+
+ // The HostName identifier of the DataNode.
+ @Option(names = { "--hostName" },
+ defaultValue = "",
+ description = "Filter disks by the host name of the DataNode.")
+ private String hostName;
+
+ // Display it in JSON format.
+ @Option(names = { "--json" },
+ defaultValue = "false",
+ description = "Format output as JSON.")
+ private boolean json;
+
+ // Display it in TABLE format.
+ @Option(names = { "--table" },
+ defaultValue = "false",
+ description = "Format output as Table.")
+ private boolean table;
+
+ // Mixin command-line pagination options to support paginated query
functionality
+ @CommandLine.Mixin
+ private ListPaginationOptions listOptions;
+
+ enum DISPLAYMODE { all, normal, failed }
Review Comment:
nit: Enums should be named like other types (classes, interfaces):
`DisplayMode`.
Also, please consider using all-caps for values (`ALL`, etc.)
##########
hadoop-ozone/cli-admin/src/test/java/org/apache/hadoop/ozone/scm/TestVolumeCommand.java:
##########
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.scm;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import java.io.UnsupportedEncodingException;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetVolumeInfosResponseProto;
+import org.apache.hadoop.hdds.scm.cli.datanode.VolumeSubCommand;
+import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.hdds.scm.datanode.VolumeInfo;
+import org.apache.hadoop.util.Time;
+import org.apache.ozone.test.GenericTestUtils;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import picocli.CommandLine;
+
+/**
+ * This unit test is used to verify whether the output of
+ * `TestVolumeFailureSubCommand` meets the expected results.
+ */
+public class TestVolumeCommand {
Review Comment:
Should be `TestVolumeSubCommand`?
##########
hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/VolumeSubCommand.java:
##########
@@ -0,0 +1,222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.cli.datanode;
+
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Locale;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.VolumeInfoProto;
+import
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetVolumeInfosResponseProto;
+import org.apache.hadoop.hdds.scm.cli.ScmSubcommand;
+import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.hdds.scm.datanode.VolumeInfo;
+import org.apache.hadoop.hdds.server.JsonUtils;
+import org.apache.hadoop.ozone.utils.FormattingCLIUtils;
+import org.apache.hadoop.util.StringUtils;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+
+/**
+ * We provide a set of volume commands to display the disk information of the
DataNode.
+ */
+@Command(
+ name = "volumes",
+ description = "Display the list of volumes on the DataNode.",
+ mixinStandardHelpOptions = true,
+ versionProvider = HddsVersionProvider.class)
+public class VolumeSubCommand extends ScmSubcommand {
+
+ // Display it in JSON format.
+ @Option(names = { "--json" },
+ defaultValue = "false",
+ description = "Format output as JSON.")
+ private boolean json;
+
+ // Display it in TABLE format.
+ @Option(names = { "--table" },
+ defaultValue = "false",
+ description = "Format output as Table.")
+ private boolean table;
+
+ // PageSize refers to the number of items displayed per page
+ // in a paginated view.
+ @Option(names = { "--pageSize" },
+ defaultValue = "20",
+ description = "The number of volume information items displayed per
page.")
+ private int pageSize;
+
+ // The current page.
+ @Option(names = { "--currentPage" },
+ defaultValue = "1",
+ description = "The current page.")
+ private int currentPage;
+
+ /**
+ * We have designed a new option called 'show',
+ * which includes two selectable configurations:
+ * 'failed' is used to display failed disks,
+ * and 'normal' is used to display normal disks.
+ */
+ @Option(names = { "--displayMode" },
+ defaultValue = "all",
+ description = "failed is used to display failed disks, " +
+ "normal is used to display normal disks.")
+ private String displayMode;
+
+ // The UUID identifier of the DataNode.
+ @Option(names = { "--uuid" },
+ defaultValue = "",
+ description = "failed is used to display failed disks, " +
+ "normal is used to display normal disks.")
+ private String uuid;
+
+ // The HostName identifier of the DataNode.
+ @Option(names = { "--hostName" },
Review Comment:
This still applies to the latest patch.
##########
hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/VolumeSubCommand.java:
##########
@@ -0,0 +1,222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.cli.datanode;
+
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Locale;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.VolumeInfoProto;
+import
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetVolumeInfosResponseProto;
+import org.apache.hadoop.hdds.scm.cli.ScmSubcommand;
+import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.hdds.scm.datanode.VolumeInfo;
+import org.apache.hadoop.hdds.server.JsonUtils;
+import org.apache.hadoop.ozone.utils.FormattingCLIUtils;
+import org.apache.hadoop.util.StringUtils;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+
+/**
+ * We provide a set of volume commands to display the disk information of the
DataNode.
+ */
+@Command(
+ name = "volumes",
+ description = "Display the list of volumes on the DataNode.",
+ mixinStandardHelpOptions = true,
+ versionProvider = HddsVersionProvider.class)
+public class VolumeSubCommand extends ScmSubcommand {
+
+ // Display it in JSON format.
+ @Option(names = { "--json" },
+ defaultValue = "false",
+ description = "Format output as JSON.")
+ private boolean json;
+
+ // Display it in TABLE format.
+ @Option(names = { "--table" },
+ defaultValue = "false",
+ description = "Format output as Table.")
+ private boolean table;
+
+ // PageSize refers to the number of items displayed per page
+ // in a paginated view.
+ @Option(names = { "--pageSize" },
+ defaultValue = "20",
+ description = "The number of volume information items displayed per
page.")
+ private int pageSize;
+
+ // The current page.
+ @Option(names = { "--currentPage" },
+ defaultValue = "1",
+ description = "The current page.")
+ private int currentPage;
+
+ /**
+ * We have designed a new option called 'show',
+ * which includes two selectable configurations:
+ * 'failed' is used to display failed disks,
+ * and 'normal' is used to display normal disks.
+ */
+ @Option(names = { "--displayMode" },
+ defaultValue = "all",
+ description = "failed is used to display failed disks, " +
+ "normal is used to display normal disks.")
+ private String displayMode;
+
+ // The UUID identifier of the DataNode.
+ @Option(names = { "--uuid" },
+ defaultValue = "",
+ description = "failed is used to display failed disks, " +
+ "normal is used to display normal disks.")
+ private String uuid;
+
+ // The HostName identifier of the DataNode.
+ @Option(names = { "--hostName" },
+ defaultValue = "",
+ description = "failed is used to display failed disks, " +
+ "normal is used to display normal disks.")
+ private String hostName;
+
+ private SimpleDateFormat sdf = new SimpleDateFormat(
+ "EEE MMM dd HH:mm:ss Z yyyy", Locale.ENGLISH);
+
+ private static final String DATANODE_VOLUME_FAILURES_TITLE = "Datanode
Volume";
+
+ private static final List<String> DATANODE_VOLUME_FAILURES_HEADER =
Arrays.asList(
+ "UUID", "Host Name", "Volume Name", "Volume Status", "Capacity /
Capacity Lost",
+ "Failure Time");
+
+ private final String[] validModes = {"all", "normal", "failed"};
+
+ @Override
+ public void execute(ScmClient client) throws IOException {
+
+ validateDisplayMode(displayMode);
+
+ // Retrieve the volume data based on the conditions.
+ GetVolumeInfosResponseProto response =
+ client.getVolumeInfos(displayMode, uuid, hostName, pageSize,
currentPage);
+
+ // Print the relevant information if the return value is empty.
+ if (response == null ||
CollectionUtils.isEmpty(response.getVolumeInfosList())) {
+ System.out.println("No volume data was retrieved.");
+ return;
+ }
+
+ List<VolumeInfoProto> volumeInfosList = response.getVolumeInfosList();
+ List<VolumeInfo> volumeInfos = convertToVolumeInfos(volumeInfosList);
+
+ // If displayed in JSON format.
+ if (json) {
+
System.out.print(JsonUtils.toJsonStringWithDefaultPrettyPrinter(volumeInfos));
Review Comment:
This is still applicable.
Also, please use `println`, to avoid situation like HDDS-13100.
##########
hadoop-ozone/cli-admin/src/test/java/org/apache/hadoop/ozone/scm/TestVolumeCommand.java:
##########
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.scm;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import java.io.UnsupportedEncodingException;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetVolumeInfosResponseProto;
+import org.apache.hadoop.hdds.scm.cli.datanode.VolumeSubCommand;
+import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.hdds.scm.datanode.VolumeInfo;
+import org.apache.hadoop.util.Time;
+import org.apache.ozone.test.GenericTestUtils;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import picocli.CommandLine;
+
+/**
+ * This unit test is used to verify whether the output of
+ * `TestVolumeFailureSubCommand` meets the expected results.
+ */
+public class TestVolumeCommand {
+ private VolumeSubCommand cmd;
+
+ @BeforeEach
+ public void setup() throws UnsupportedEncodingException {
+ cmd = new VolumeSubCommand();
+ }
+
+ @AfterEach
+ public void tearDown() {
+ }
+
Review Comment:
nit: unnecessary
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]