This is an automated email from the ASF dual-hosted git repository.

erose pushed a commit to branch HDDS-14496-zdu
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/HDDS-14496-zdu by this push:
     new 97e127333f4 HDDS-14735. ComponentVersions should not be compared using 
int values. (#9841)
97e127333f4 is described below

commit 97e127333f452433ab6741d38d661f4156385c5e
Author: Ethan Rose <[email protected]>
AuthorDate: Tue Mar 3 12:43:58 2026 -0500

    HDDS-14735. ComponentVersions should not be compared using int values. 
(#9841)
    
    Generated-By: Cursor
---
 .../hadoop/hdds/scm/storage/BlockOutputStream.java |  4 +-
 .../client/io/BlockInputStreamFactoryImpl.java     |  2 +-
 .../org/apache/hadoop/hdds/ComponentVersion.java   | 11 ++++
 .../java/org/apache/hadoop/hdds/HDDSVersion.java   | 22 ++++---
 .../hadoop/hdds/protocol/DatanodeDetails.java      |  7 +--
 .../apache/hadoop/hdds/scm/pipeline/Pipeline.java  |  2 +-
 .../org/apache/hadoop/ozone/ClientVersion.java     | 26 ++++----
 .../apache/hadoop/ozone/OzoneManagerVersion.java   | 21 +++++--
 .../apache/hadoop/ozone/upgrade/LayoutFeature.java |  8 +++
 .../hdds/TestComponentVersionInvariants.java       | 69 +++++++++++++++++++---
 .../hadoop/hdds/protocol/TestDatanodeDetails.java  |  6 +-
 .../apache/hadoop/ozone/HddsDatanodeService.java   |  4 +-
 .../transport/server/ratis/XceiverServerRatis.java |  5 +-
 .../hadoop/hdds/protocol/DiskBalancerProtocol.java |  2 +-
 .../container/common/helpers/MoveDataNodePair.java |  2 +-
 ...lockLocationProtocolClientSideTranslatorPB.java |  2 +-
 ...inerLocationProtocolClientSideTranslatorPB.java |  2 +-
 .../hdds/scm/pipeline/PipelineManagerImpl.java     |  2 +-
 ...inerLocationProtocolServerSideTranslatorPB.java |  4 +-
 .../hdds/scm/node/TestDatanodeUsageInfo.java       |  4 +-
 .../hdds/scm/pipeline/MockPipelineManager.java     |  4 +-
 .../TestPipelineDatanodesIntersection.java         |  2 +-
 .../scm/pipeline/TestPipelinePlacementPolicy.java  |  6 +-
 .../scm/pipeline/TestPipelineStateManagerImpl.java | 40 ++++++-------
 .../scm/pipeline/TestRatisPipelineProvider.java    | 16 ++---
 .../scm/pipeline/TestSimplePipelineProvider.java   |  4 +-
 .../scm/server/TestSCMBlockProtocolServer.java     |  4 +-
 .../hdds/scm/cli/ContainerOperationClient.java     |  8 +--
 .../apache/hadoop/ozone/debug/VersionDebug.java    |  4 +-
 .../apache/hadoop/ozone/fsck/ContainerMapper.java  |  2 +-
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |  6 +-
 .../hadoop/ozone/client/rpc/TestRpcClient.java     | 50 ++++++++--------
 .../apache/hadoop/ozone/om/helpers/OmKeyInfo.java  |  2 +-
 .../hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java |  2 +-
 .../hadoop/ozone/om/helpers/ServiceInfo.java       |  2 +-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |  6 +-
 ...doopRpcOMFollowerReadFailoverProxyProvider.java |  4 +-
 .../hadoop/ozone/om/helpers/TestOmKeyInfo.java     |  6 +-
 .../ozone/om/protocolPB/TestS3GrpcOmTransport.java | 12 ++--
 .../hadoop/ozone/freon/DatanodeSimulator.java      |  6 +-
 .../container/TestScmApplyTransactionFailure.java  |  4 +-
 .../client/rpc/TestBlockDataStreamOutput.java      |  6 +-
 .../ozone/client/rpc/TestSecureOzoneRpcClient.java |  4 +-
 .../org/apache/hadoop/ozone/debug/TestLDBCli.java  |  2 +-
 .../TestContainerCommandReconciliation.java        |  8 +--
 ...stOzoneManagerHAFollowerReadWithAllRunning.java |  4 +-
 .../ozone/om/TestOzoneManagerHAWithAllRunning.java |  2 +-
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |  2 +-
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  4 +-
 .../hadoop/ozone/om/TrashOzoneFileSystem.java      |  2 +-
 .../om/request/bucket/OMBucketCreateRequest.java   |  4 +-
 .../om/request/validation/ValidationCondition.java |  2 +-
 .../om/request/validation/VersionExtractor.java    |  3 +-
 .../ozone/om/service/DirectoryDeletingService.java |  4 +-
 .../ozone/om/service/KeyDeletingService.java       |  2 +-
 .../ozone/om/service/OpenKeyCleanupService.java    |  2 +-
 .../ozone/om/service/SnapshotDeletingService.java  |  4 +-
 .../ozone/om/request/OMRequestTestUtils.java       |  7 ++-
 .../om/request/file/TestOMRecoverLeaseRequest.java |  2 +-
 .../TestOMDirectoriesPurgeRequestAndResponse.java  |  4 +-
 .../request/validation/TestRequestValidations.java |  4 +-
 .../request/validation/TestVersionExtractor.java   |  2 +-
 .../TestOMSnapshotMoveTableKeysResponse.java       |  4 +-
 .../om/service/TestSnapshotDeletingService.java    |  4 +-
 .../hadoop/ozone/recon/api/NodeEndpoint.java       |  2 +-
 .../ozone/recon/scm/ReconPipelineManager.java      |  2 +-
 .../impl/StorageContainerServiceProviderImpl.java  |  2 +-
 67 files changed, 290 insertions(+), 193 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
index 31d6d794d07..a18ac8f86b8 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
@@ -285,8 +285,8 @@ private boolean allDataNodesSupportPiggybacking() {
     // that supports PutBlock piggybacking.
     for (DatanodeDetails dn : pipeline.getNodes()) {
       LOG.debug("dn = {}, version = {}", dn, dn.getCurrentVersion());
-      if (dn.getCurrentVersion() <
-              COMBINED_PUTBLOCK_WRITECHUNK_RPC.serialize()) {
+      if (!COMBINED_PUTBLOCK_WRITECHUNK_RPC.isSupportedBy(
+          dn.getCurrentVersion())) {
         return false;
       }
     }
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java
index 99fb8b89806..fff8fc445a6 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java
@@ -103,7 +103,7 @@ private boolean allDataNodesSupportStreamBlock(Pipeline 
pipeline) {
     // return true only if all DataNodes in the pipeline are on a version
     // that supports for reading a block by streaming chunks..
     for (DatanodeDetails dn : pipeline.getNodes()) {
-      if (dn.getCurrentVersion() < STREAM_BLOCK_SUPPORT.serialize()) {
+      if (!STREAM_BLOCK_SUPPORT.isSupportedBy(dn.getCurrentVersion())) {
         return false;
       }
     }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ComponentVersion.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ComponentVersion.java
index b3e8c77572b..10f232ee0c9 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ComponentVersion.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ComponentVersion.java
@@ -24,6 +24,10 @@
  * Base type for component version enums.
  */
 public interface ComponentVersion {
+  /**
+   * @return The serialized representation of this version. This is an opaque 
value which should not be checked or
+   * compared directly.
+   */
   int serialize();
 
   /**
@@ -31,6 +35,13 @@ public interface ComponentVersion {
    */
   String description();
 
+  /**
+   * Deserializes a ComponentVersion and checks if its feature set is 
supported by the current ComponentVersion.
+   *
+   * @return true if this version supports the features of otherVersion. False 
otherwise.
+   */
+  boolean isSupportedBy(int serializedVersion);
+
   default Optional<? extends UpgradeAction> action() {
     return Optional.empty();
   }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HDDSVersion.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HDDSVersion.java
index 655083a41d9..6b4131e2226 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HDDSVersion.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HDDSVersion.java
@@ -39,10 +39,9 @@ public enum HDDSVersion implements ComponentVersion {
   FUTURE_VERSION(-1, "Used internally in the client when the server side is "
       + " newer and an unknown server version has arrived to the client.");
 
-  public static final HDDSVersion CURRENT = latest();
-  public static final int CURRENT_VERSION = CURRENT.version;
+  public static final HDDSVersion SOFTWARE_VERSION = latest();
 
-  private static final Map<Integer, HDDSVersion> BY_PROTO_VALUE =
+  private static final Map<Integer, HDDSVersion> BY_VALUE =
       Arrays.stream(values())
           .collect(toMap(HDDSVersion::serialize, identity()));
 
@@ -64,17 +63,26 @@ public int serialize() {
     return version;
   }
 
+  public static HDDSVersion deserialize(int value) {
+    return BY_VALUE.getOrDefault(value, FUTURE_VERSION);
+  }
+
   @Override
-  public String toString() {
-    return name() + " (" + serialize() + ")";
+  public boolean isSupportedBy(int serializedVersion) {
+    // In order for the other serialized version to support this version's 
features,
+    // the other version must be equal or larger to this version.
+    return deserialize(serializedVersion).compareTo(this) >= 0;
   }
 
-  public static HDDSVersion fromProtoValue(int value) {
-    return BY_PROTO_VALUE.getOrDefault(value, FUTURE_VERSION);
+  @Override
+  public String toString() {
+    return name() + " (" + serialize() + ")";
   }
 
   private static HDDSVersion latest() {
     HDDSVersion[] versions = HDDSVersion.values();
+    // The last entry in the array will be `FUTURE_VERSION`. We want the entry 
prior to this which defines the latest
+    // version in the software.
     return versions[versions.length - 2];
   }
 }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
index d1ebc130505..a8df2c2aa69 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
@@ -517,7 +517,7 @@ public static DatanodeDetails getFromProtoBuf(
    */
   @JsonIgnore
   public HddsProtos.DatanodeDetailsProto getProtoBufMessage() {
-    return toProto(ClientVersion.CURRENT_VERSION);
+    return toProto(ClientVersion.CURRENT.serialize());
   }
 
   public HddsProtos.DatanodeDetailsProto toProto(int clientVersion) {
@@ -572,8 +572,7 @@ public HddsProtos.DatanodeDetailsProto.Builder 
toProtoBuilder(
     builder.setPersistedOpStateExpiry(persistedOpStateExpiryEpochSec);
 
     final boolean handlesUnknownPorts =
-        ClientVersion.fromProtoValue(clientVersion)
-        .compareTo(VERSION_HANDLES_UNKNOWN_DN_PORTS) >= 0;
+        VERSION_HANDLES_UNKNOWN_DN_PORTS.isSupportedBy(clientVersion);
     final int requestedPortCount = filterPorts.size();
     final boolean maySkip = requestedPortCount > 0;
     for (Port port : ports) {
@@ -727,7 +726,7 @@ public static final class Builder {
     private HddsProtos.NodeOperationalState persistedOpState;
     private long persistedOpStateExpiryEpochSec = 0;
     private int initialVersion;
-    private int currentVersion = HDDSVersion.CURRENT_VERSION;
+    private int currentVersion = HDDSVersion.SOFTWARE_VERSION.serialize();
 
     /**
      * Default private constructor. To create Builder instance use
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
index cff7d73dcd1..0a79e84e7ae 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
@@ -67,7 +67,7 @@ public final class Pipeline {
   private static final Codec<Pipeline> CODEC = new DelegatedCodec<>(
       Proto2Codec.get(HddsProtos.Pipeline.getDefaultInstance()),
       Pipeline::getFromProtobufSetCreationTimestamp,
-      p -> p.getProtobufMessage(ClientVersion.CURRENT_VERSION),
+      p -> p.getProtobufMessage(ClientVersion.CURRENT.serialize()),
       Pipeline.class,
       DelegatedCodec.CopyType.UNSUPPORTED);
 
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java
index 8c4c1a2e10d..ac20b5fe3b7 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java
@@ -21,7 +21,6 @@
 import static java.util.stream.Collectors.toMap;
 
 import java.util.Arrays;
-import java.util.Comparator;
 import java.util.Map;
 import org.apache.hadoop.hdds.ComponentVersion;
 
@@ -46,9 +45,8 @@ public enum ClientVersion implements ComponentVersion {
       + " unknown client version has arrived from the client.");
 
   public static final ClientVersion CURRENT = latest();
-  public static final int CURRENT_VERSION = CURRENT.version;
 
-  private static final Map<Integer, ClientVersion> BY_PROTO_VALUE =
+  private static final Map<Integer, ClientVersion> BY_VALUE =
       Arrays.stream(values())
           .collect(toMap(ClientVersion::serialize, identity()));
 
@@ -70,18 +68,26 @@ public int serialize() {
     return version;
   }
 
+  public static ClientVersion deserialize(int value) {
+    return BY_VALUE.getOrDefault(value, FUTURE_VERSION);
+  }
+
   @Override
-  public String toString() {
-    return name() + " (" + serialize() + ")";
+  public boolean isSupportedBy(int serializedVersion) {
+    // In order for the other serialized version to support this version's 
features,
+    // the other version must be equal or larger to this version.
+    return deserialize(serializedVersion).compareTo(this) >= 0;
   }
 
-  public static ClientVersion fromProtoValue(int value) {
-    return BY_PROTO_VALUE.getOrDefault(value, FUTURE_VERSION);
+  @Override
+  public String toString() {
+    return name() + " (" + serialize() + ")";
   }
 
   private static ClientVersion latest() {
-    return Arrays.stream(ClientVersion.values())
-        
.max(Comparator.comparingInt(ComponentVersion::serialize)).orElse(null);
+    ClientVersion[] versions = ClientVersion.values();
+    // The last entry in the array will be `FUTURE_VERSION`. We want the entry 
prior to this which defines the latest
+    // version in the software.
+    return versions[versions.length - 2];
   }
-
 }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java
index e2946ce8e19..9ec8870855e 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java
@@ -58,9 +58,9 @@ public enum OzoneManagerVersion implements ComponentVersion {
   FUTURE_VERSION(-1, "Used internally in the client when the server side is "
       + " newer and an unknown server version has arrived to the client.");
 
-  public static final OzoneManagerVersion CURRENT = latest();
+  public static final OzoneManagerVersion SOFTWARE_VERSION = latest();
 
-  private static final Map<Integer, OzoneManagerVersion> BY_PROTO_VALUE =
+  private static final Map<Integer, OzoneManagerVersion> BY_VALUE =
       Arrays.stream(values())
           .collect(toMap(OzoneManagerVersion::serialize, identity()));
 
@@ -82,17 +82,26 @@ public int serialize() {
     return version;
   }
 
+  public static OzoneManagerVersion deserialize(int value) {
+    return BY_VALUE.getOrDefault(value, FUTURE_VERSION);
+  }
+
   @Override
-  public String toString() {
-    return name() + " (" + serialize() + ")";
+  public boolean isSupportedBy(int serializedVersion) {
+    // In order for the other serialized version to support this version's 
features,
+    // the other version must be equal or larger to this version.
+    return deserialize(serializedVersion).compareTo(this) >= 0;
   }
 
-  public static OzoneManagerVersion fromProtoValue(int value) {
-    return BY_PROTO_VALUE.getOrDefault(value, FUTURE_VERSION);
+  @Override
+  public String toString() {
+    return name() + " (" + serialize() + ")";
   }
 
   private static OzoneManagerVersion latest() {
     OzoneManagerVersion[] versions = OzoneManagerVersion.values();
+    // The last entry in the array will be `FUTURE_VERSION`. We want the entry 
prior to this which defines the latest
+    // version in the software.
     return versions[versions.length - 2];
   }
 }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutFeature.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutFeature.java
index 7f7374c8137..848a35104e5 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutFeature.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutFeature.java
@@ -29,4 +29,12 @@ public interface LayoutFeature extends ComponentVersion {
   default int serialize() {
     return this.layoutVersion();
   }
+
+  @Override
+  default boolean isSupportedBy(int serializedVersion) {
+    // In order for the other serialized version to support this version's 
features,
+    // the other version must be equal or larger to this version.
+    // We can compare the values directly since there is no FUTURE_VERSION for 
layout features.
+    return serializedVersion >= layoutVersion();
+  }
 }
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestComponentVersionInvariants.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestComponentVersionInvariants.java
index f0b1df91940..0edab7e76d2 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestComponentVersionInvariants.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestComponentVersionInvariants.java
@@ -18,11 +18,13 @@
 package org.apache.hadoop.hdds;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.params.provider.Arguments.arguments;
 
 import java.util.stream.Stream;
 import org.apache.hadoop.ozone.ClientVersion;
 import org.apache.hadoop.ozone.OzoneManagerVersion;
+import org.junit.jupiter.api.Test;
 import org.junit.jupiter.params.ParameterizedTest;
 import org.junit.jupiter.params.provider.Arguments;
 import org.junit.jupiter.params.provider.MethodSource;
@@ -53,8 +55,7 @@ public static Stream<Arguments> values() {
   // FUTURE_VERSION is the latest
   @ParameterizedTest
   @MethodSource("values")
-  public void testFutureVersionHasTheHighestOrdinal(
-      ComponentVersion[] values, ComponentVersion defaultValue,
+  public void testFutureVersionHasTheHighestOrdinal(ComponentVersion[] values, 
ComponentVersion defaultValue,
       ComponentVersion futureValue) {
 
     assertEquals(values[values.length - 1], futureValue);
@@ -63,8 +64,7 @@ public void testFutureVersionHasTheHighestOrdinal(
   // FUTURE_VERSION's internal version id is -1
   @ParameterizedTest
   @MethodSource("values")
-  public void testFuturVersionHasMinusOneAsProtoRepresentation(
-      ComponentVersion[] values, ComponentVersion defaultValue,
+  public void testFutureVersionSerializesToMinusOne(ComponentVersion[] values, 
ComponentVersion defaultValue,
       ComponentVersion futureValue) {
     assertEquals(-1, futureValue.serialize());
 
@@ -73,8 +73,7 @@ public void testFuturVersionHasMinusOneAsProtoRepresentation(
   // DEFAULT_VERSION's internal version id is 0
   @ParameterizedTest
   @MethodSource("values")
-  public void testDefaultVersionHasZeroAsProtoRepresentation(
-      ComponentVersion[] values, ComponentVersion defaultValue,
+  public void testDefaultVersionSerializesToZero(ComponentVersion[] values, 
ComponentVersion defaultValue,
       ComponentVersion futureValue) {
     assertEquals(0, defaultValue.serialize());
   }
@@ -82,8 +81,7 @@ public void testDefaultVersionHasZeroAsProtoRepresentation(
   // versions are increasing monotonically by one
   @ParameterizedTest
   @MethodSource("values")
-  public void testAssignedProtoRepresentations(
-      ComponentVersion[] values, ComponentVersion defaultValue,
+  public void testSerializedValuesAreMonotonic(ComponentVersion[] values, 
ComponentVersion defaultValue,
       ComponentVersion futureValue) {
     int startValue = defaultValue.serialize();
     // we skip the future version at the last position
@@ -92,4 +90,59 @@ public void testAssignedProtoRepresentations(
     }
     assertEquals(values.length, ++startValue);
   }
+
+  @ParameterizedTest
+  @MethodSource("values")
+  public void testVersionIsSupportedByItself(ComponentVersion[] values, 
ComponentVersion defaultValue,
+      ComponentVersion futureValue) {
+    for (ComponentVersion value : values) {
+      assertTrue(value.isSupportedBy(value.serialize()));
+    }
+  }
+
+  @ParameterizedTest
+  @MethodSource("values")
+  public void 
testOnlyEqualOrHigherVersionsCanSupportAFeature(ComponentVersion[] values, 
ComponentVersion defaultValue,
+      ComponentVersion futureValue) {
+    int knownVersionCount = values.length - 1;
+    for (int featureIndex = 0; featureIndex < knownVersionCount; 
featureIndex++) {
+      ComponentVersion requiredFeature = values[featureIndex];
+      for (int providerIndex = 0; providerIndex < knownVersionCount; 
providerIndex++) {
+        ComponentVersion provider = values[providerIndex];
+        boolean expected = providerIndex >= featureIndex;
+        assertEquals(expected, 
requiredFeature.isSupportedBy(provider.serialize()));
+      }
+    }
+  }
+
+  @ParameterizedTest
+  @MethodSource("values")
+  public void testFutureVersionSupportsAllKnownVersions(ComponentVersion[] 
values, ComponentVersion defaultValue,
+      ComponentVersion futureValue) {
+    int unknownFutureVersion = Integer.MAX_VALUE;
+    for (ComponentVersion requiredFeature : values) {
+      assertTrue(requiredFeature.isSupportedBy(unknownFutureVersion));
+    }
+  }
+
+  @Test
+  public void testHDDSVersionSerDes() {
+    for (HDDSVersion version: HDDSVersion.values()) {
+      assertEquals(version, HDDSVersion.deserialize(version.serialize()));
+    }
+  }
+
+  @Test
+  public void testOMVersionSerDes() {
+    for (OzoneManagerVersion version: OzoneManagerVersion.values()) {
+      assertEquals(version, 
OzoneManagerVersion.deserialize(version.serialize()));
+    }
+  }
+
+  @Test
+  public void testClientVersionSerDes() {
+    for (ClientVersion version: ClientVersion.values()) {
+      assertEquals(version, ClientVersion.deserialize(version.serialize()));
+    }
+  }
 }
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java
index f3a091c46d6..0f592c71d63 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java
@@ -77,13 +77,15 @@ public void testNewBuilderCurrentVersion() {
         dn.toProtoBuilder(DEFAULT_VERSION.serialize(), requiredPorts);
     protoBuilder.clearCurrentVersion();
     DatanodeDetails dn2 = 
DatanodeDetails.newBuilder(protoBuilder.build()).build();
-    assertEquals(HDDSVersion.SEPARATE_RATIS_PORTS_AVAILABLE.serialize(), 
dn2.getCurrentVersion());
+    assertEquals(HDDSVersion.SEPARATE_RATIS_PORTS_AVAILABLE,
+        HDDSVersion.deserialize(dn2.getCurrentVersion()));
 
     // test that if the current version is set, it is used
     protoBuilder =
         dn.toProtoBuilder(DEFAULT_VERSION.serialize(), requiredPorts);
     DatanodeDetails dn3 = 
DatanodeDetails.newBuilder(protoBuilder.build()).build();
-    assertEquals(HDDSVersion.CURRENT.serialize(), dn3.getCurrentVersion());
+    assertEquals(HDDSVersion.SOFTWARE_VERSION,
+        HDDSVersion.deserialize(dn3.getCurrentVersion()));
   }
 
   public static void assertPorts(HddsProtos.DatanodeDetailsProto dn,
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index aca713d1104..269af34b48a 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -816,13 +816,13 @@ private String reconfigScmNodes(String value) {
    * Returns the initial version of the datanode.
    */
   private int getInitialVersion() {
-    return conf.getInt(TESTING_DATANODE_VERSION_INITIAL, 
HDDSVersion.CURRENT_VERSION);
+    return conf.getInt(TESTING_DATANODE_VERSION_INITIAL, 
HDDSVersion.SOFTWARE_VERSION.serialize());
   }
 
   /**
    * Returns the current version of the datanode.
    */
   private int getCurrentVersion() {
-    return conf.getInt(TESTING_DATANODE_VERSION_CURRENT, 
HDDSVersion.CURRENT_VERSION);
+    return conf.getInt(TESTING_DATANODE_VERSION_CURRENT, 
HDDSVersion.SOFTWARE_VERSION.serialize());
   }
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index b41d086f742..02389ab60fa 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -52,7 +52,6 @@
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicLong;
-import org.apache.hadoop.hdds.HDDSVersion;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
@@ -216,8 +215,8 @@ private void assignPorts() {
         OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT,
         OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT_DEFAULT);
 
-    if (HDDSVersion.fromProtoValue(datanodeDetails.getInitialVersion())
-        .compareTo(SEPARATE_RATIS_PORTS_AVAILABLE) >= 0) {
+    if (SEPARATE_RATIS_PORTS_AVAILABLE.isSupportedBy(
+        datanodeDetails.getInitialVersion())) {
       adminPort = determinePort(
           OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT,
           OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT);
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/DiskBalancerProtocol.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/DiskBalancerProtocol.java
index b3efb18a616..cd89604ffe4 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/DiskBalancerProtocol.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/DiskBalancerProtocol.java
@@ -38,7 +38,7 @@ public interface DiskBalancerProtocol extends Closeable {
   long VERSIONID = 1L;
 
   GetDiskBalancerInfoRequestProto DEFAULT_GET_DISK_BALANCER_INFO_REQUEST
-      = 
GetDiskBalancerInfoRequestProto.newBuilder().setClientVersion(ClientVersion.CURRENT_VERSION).build();
+      = 
GetDiskBalancerInfoRequestProto.newBuilder().setClientVersion(ClientVersion.CURRENT.serialize()).build();
 
   @Idempotent
   default DatanodeDiskBalancerInfoProto getDiskBalancerInfo() throws 
IOException {
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/MoveDataNodePair.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/MoveDataNodePair.java
index 38f45092dff..bc43c2a5422 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/MoveDataNodePair.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/MoveDataNodePair.java
@@ -35,7 +35,7 @@ public class MoveDataNodePair {
   private static final Codec<MoveDataNodePair> CODEC = new DelegatedCodec<>(
       Proto2Codec.get(MoveDataNodePairProto.getDefaultInstance()),
       MoveDataNodePair::getFromProtobuf,
-      pair -> pair.getProtobufMessage(ClientVersion.CURRENT_VERSION),
+      pair -> pair.getProtobufMessage(ClientVersion.CURRENT.serialize()),
       MoveDataNodePair.class,
       DelegatedCodec.CopyType.SHALLOW);
 
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
index ae4d58fd18f..13e19cc7bf6 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
@@ -125,7 +125,7 @@ public ScmBlockLocationProtocolClientSideTranslatorPB(
   private SCMBlockLocationRequest.Builder createSCMBlockRequest(Type cmdType) {
     return SCMBlockLocationRequest.newBuilder()
         .setCmdType(cmdType)
-        .setVersion(ClientVersion.CURRENT_VERSION)
+        .setVersion(ClientVersion.CURRENT.serialize())
         .setTraceID(TracingUtil.exportCurrentSpan());
   }
 
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
index 94b2230e68b..b079936baa1 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -188,7 +188,7 @@ private ScmContainerLocationResponse submitRequest(
     try {
       Builder builder = ScmContainerLocationRequest.newBuilder()
           .setCmdType(type)
-          .setVersion(ClientVersion.CURRENT_VERSION)
+          .setVersion(ClientVersion.CURRENT.serialize())
           .setTraceID(TracingUtil.exportCurrentSpan());
       builderConsumer.accept(builder);
       ScmContainerLocationRequest wrapper = builder.build();
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
index bf33282aaf4..8ffa79c8eb3 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
@@ -269,7 +269,7 @@ private void checkIfPipelineCreationIsAllowed(
   private void addPipelineToManager(Pipeline pipeline)
       throws IOException {
     HddsProtos.Pipeline pipelineProto = pipeline.getProtobufMessage(
-        ClientVersion.CURRENT_VERSION);
+        ClientVersion.CURRENT.serialize());
     acquireWriteLock();
     try {
       stateManager.addPipeline(pipelineProto);
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
index acb7dc7d252..176de420c2c 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
@@ -220,8 +220,8 @@ public ScmContainerLocationResponse 
submitRequest(RpcController controller,
     // this server interface, this should be removed and solved via new
     // annotated interceptors.
     boolean checkResponseForECRepConfig = false;
-    if (request.getVersion() <
-        ClientVersion.ERASURE_CODING_SUPPORT.serialize()) {
+    if (!ClientVersion.ERASURE_CODING_SUPPORT.isSupportedBy(
+        request.getVersion())) {
       if (request.getCmdType() == GetContainer
           || request.getCmdType() == ListContainer
           || request.getCmdType() == GetContainerWithPipeline
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeUsageInfo.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeUsageInfo.java
index 60eb21850ec..142e2638cf8 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeUsageInfo.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeUsageInfo.java
@@ -41,7 +41,7 @@ void testToProtoDoesNotIncludeFilesystemFieldsByDefault() {
     );
 
     DatanodeUsageInfo info = new DatanodeUsageInfo(dn, stat);
-    DatanodeUsageInfoProto proto = info.toProto(ClientVersion.CURRENT_VERSION);
+    DatanodeUsageInfoProto proto = 
info.toProto(ClientVersion.CURRENT.serialize());
 
     assertThat(proto.hasFsCapacity()).isFalse();
     assertThat(proto.hasFsAvailable()).isFalse();
@@ -59,7 +59,7 @@ void testToProtoIncludesFilesystemFieldsWhenPresent() {
     DatanodeUsageInfo info = new DatanodeUsageInfo(dn, stat);
     info.setFilesystemUsage(2000L, 1500L);
 
-    DatanodeUsageInfoProto proto = info.toProto(ClientVersion.CURRENT_VERSION);
+    DatanodeUsageInfoProto proto = 
info.toProto(ClientVersion.CURRENT.serialize());
 
     assertThat(proto.hasFsCapacity()).isTrue();
     assertThat(proto.hasFsAvailable()).isTrue();
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java
index dbe551fcda7..cbbd52d3c5a 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java
@@ -85,7 +85,7 @@ public Pipeline createPipeline(ReplicationConfig 
replicationConfig,
     }
 
     stateManager.addPipeline(pipeline.getProtobufMessage(
-        ClientVersion.CURRENT_VERSION));
+        ClientVersion.CURRENT.serialize()));
     return pipeline;
   }
 
@@ -109,7 +109,7 @@ public Pipeline buildECPipeline(ReplicationConfig 
replicationConfig,
   public void addEcPipeline(Pipeline pipeline)
       throws IOException {
     stateManager.addPipeline(pipeline.getProtobufMessage(
-        ClientVersion.CURRENT_VERSION));
+        ClientVersion.CURRENT.serialize()));
   }
 
   @Override
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java
index 586718766b2..7847da628f0 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java
@@ -110,7 +110,7 @@ public void testPipelineDatanodesIntersection(int nodeCount,
         Pipeline pipeline = provider.create(RatisReplicationConfig.getInstance(
             ReplicationFactor.THREE));
         HddsProtos.Pipeline pipelineProto = pipeline.getProtobufMessage(
-            ClientVersion.CURRENT_VERSION);
+            ClientVersion.CURRENT.serialize());
         stateManager.addPipeline(pipelineProto);
         nodeManager.addPipeline(pipeline);
         List<Pipeline> overlapPipelines = RatisPipelineUtils
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
index 7094a39c79e..91ac0fb935b 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
@@ -291,7 +291,7 @@ public void testPickLowestLoadAnchor() throws IOException, 
TimeoutException {
             .setNodes(nodes)
             .build();
         HddsProtos.Pipeline pipelineProto = pipeline.getProtobufMessage(
-            ClientVersion.CURRENT_VERSION);
+            ClientVersion.CURRENT.serialize());
         nodeManager.addPipeline(pipeline);
         stateManager.addPipeline(pipelineProto);
       } catch (SCMException e) {
@@ -648,7 +648,7 @@ private void insertHeavyNodesIntoNodeManager(
               .build();
 
           pipelineProto = pipeline.getProtobufMessage(
-              ClientVersion.CURRENT_VERSION);
+              ClientVersion.CURRENT.serialize());
           nodeManager.addPipeline(pipeline);
           stateManager.addPipeline(pipelineProto);
           pipelineCount++;
@@ -791,7 +791,7 @@ private void 
createPipelineWithReplicationConfig(List<DatanodeDetails> dnList,
         .build();
 
     HddsProtos.Pipeline pipelineProto = pipeline.getProtobufMessage(
-        ClientVersion.CURRENT_VERSION);
+        ClientVersion.CURRENT.serialize());
     nodeManager.addPipeline(pipeline);
     stateManager.addPipeline(pipelineProto);
   }
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java
index 460d08eb60d..fc447a74e91 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java
@@ -110,14 +110,14 @@ private Pipeline 
createDummyPipeline(HddsProtos.ReplicationType type,
   public void testAddAndGetPipeline() throws IOException, TimeoutException {
     Exception e = assertThrows(SCMException.class,
         () -> stateManager.addPipeline(createDummyPipeline(0)
-            .getProtobufMessage(ClientVersion.CURRENT_VERSION)));
+            .getProtobufMessage(ClientVersion.CURRENT.serialize())));
     // replication factor and number of nodes in the pipeline do not match
     assertThat(e.getMessage()).contains("do not match");
 
     // add a pipeline
     Pipeline pipeline = createDummyPipeline(1);
     HddsProtos.Pipeline pipelineProto = pipeline
-        .getProtobufMessage(ClientVersion.CURRENT_VERSION);
+        .getProtobufMessage(ClientVersion.CURRENT.serialize());
 
     try {
       stateManager.addPipeline(pipelineProto);
@@ -144,11 +144,11 @@ public void testGetPipelines() throws IOException, 
TimeoutException {
 
     Set<HddsProtos.Pipeline> pipelines = new HashSet<>();
     HddsProtos.Pipeline pipeline = createDummyPipeline(1).getProtobufMessage(
-        ClientVersion.CURRENT_VERSION);
+        ClientVersion.CURRENT.serialize());
     stateManager.addPipeline(pipeline);
     pipelines.add(pipeline);
     pipeline = createDummyPipeline(1).getProtobufMessage(
-        ClientVersion.CURRENT_VERSION);
+        ClientVersion.CURRENT.serialize());
     stateManager.addPipeline(pipeline);
     pipelines.add(pipeline);
 
@@ -179,19 +179,19 @@ public void testGetPipelinesByTypeAndFactor()
           // 5 pipelines in allocated state for each type and factor
           HddsProtos.Pipeline pipeline =
               createDummyPipeline(type, factor, factor.getNumber())
-                  .getProtobufMessage(ClientVersion.CURRENT_VERSION);
+                  .getProtobufMessage(ClientVersion.CURRENT.serialize());
           stateManager.addPipeline(pipeline);
           pipelines.add(pipeline);
 
           // 5 pipelines in open state for each type and factor
           pipeline = createDummyPipeline(type, factor, factor.getNumber())
-              .getProtobufMessage(ClientVersion.CURRENT_VERSION);
+              .getProtobufMessage(ClientVersion.CURRENT.serialize());
           stateManager.addPipeline(pipeline);
           pipelines.add(pipeline);
 
           // 5 pipelines in closed state for each type and factor
           pipeline = createDummyPipeline(type, factor, factor.getNumber())
-              .getProtobufMessage(ClientVersion.CURRENT_VERSION);
+              .getProtobufMessage(ClientVersion.CURRENT.serialize());
           stateManager.addPipeline(pipeline);
           pipelines.add(pipeline);
         }
@@ -232,20 +232,20 @@ public void testGetPipelinesByTypeFactorAndState()
           // 5 pipelines in allocated state for each type and factor
           HddsProtos.Pipeline pipeline =
               createDummyPipeline(type, factor, factor.getNumber())
-                  .getProtobufMessage(ClientVersion.CURRENT_VERSION);
+                  .getProtobufMessage(ClientVersion.CURRENT.serialize());
           stateManager.addPipeline(pipeline);
           pipelines.add(pipeline);
 
           // 5 pipelines in open state for each type and factor
           pipeline = createDummyPipeline(type, factor, factor.getNumber())
-              .getProtobufMessage(ClientVersion.CURRENT_VERSION);
+              .getProtobufMessage(ClientVersion.CURRENT.serialize());
           stateManager.addPipeline(pipeline);
           openPipeline(pipeline);
           pipelines.add(pipeline);
 
           // 5 pipelines in dormant state for each type and factor
           pipeline = createDummyPipeline(type, factor, factor.getNumber())
-              .getProtobufMessage(ClientVersion.CURRENT_VERSION);
+              .getProtobufMessage(ClientVersion.CURRENT.serialize());
           stateManager.addPipeline(pipeline);
           openPipeline(pipeline);
           deactivatePipeline(pipeline);
@@ -253,7 +253,7 @@ public void testGetPipelinesByTypeFactorAndState()
 
           // 5 pipelines in closed state for each type and factor
           pipeline = createDummyPipeline(type, factor, factor.getNumber())
-              .getProtobufMessage(ClientVersion.CURRENT_VERSION);
+              .getProtobufMessage(ClientVersion.CURRENT.serialize());
           stateManager.addPipeline(pipeline);
           finalizePipeline(pipeline);
           pipelines.add(pipeline);
@@ -292,7 +292,7 @@ public void testAddAndGetContainer() throws IOException, 
TimeoutException {
     long containerID = 0;
     Pipeline pipeline = createDummyPipeline(1);
     HddsProtos.Pipeline pipelineProto = pipeline
-        .getProtobufMessage(ClientVersion.CURRENT_VERSION);
+        .getProtobufMessage(ClientVersion.CURRENT.serialize());
     stateManager.addPipeline(pipelineProto);
     pipeline = stateManager.getPipeline(pipeline.getId());
     stateManager.addContainerToPipeline(pipeline.getId(),
@@ -325,7 +325,7 @@ public void testAddAndGetContainer() throws IOException, 
TimeoutException {
   public void testRemovePipeline() throws IOException, TimeoutException {
     Pipeline pipeline = createDummyPipeline(1);
     HddsProtos.Pipeline pipelineProto = pipeline
-        .getProtobufMessage(ClientVersion.CURRENT_VERSION);
+        .getProtobufMessage(ClientVersion.CURRENT.serialize());
     stateManager.addPipeline(pipelineProto);
     // close the pipeline
     openPipeline(pipelineProto);
@@ -347,7 +347,7 @@ public void testRemoveContainer() throws IOException, 
TimeoutException {
     long containerID = 1;
     Pipeline pipeline = createDummyPipeline(1);
     HddsProtos.Pipeline pipelineProto = pipeline
-        .getProtobufMessage(ClientVersion.CURRENT_VERSION);
+        .getProtobufMessage(ClientVersion.CURRENT.serialize());
     // create an open pipeline in stateMap
     stateManager.addPipeline(pipelineProto);
     openPipeline(pipelineProto);
@@ -387,7 +387,7 @@ public void testRemoveContainer() throws IOException, 
TimeoutException {
   public void testFinalizePipeline() throws IOException, TimeoutException {
     Pipeline pipeline = createDummyPipeline(1);
     HddsProtos.Pipeline pipelineProto = pipeline
-        .getProtobufMessage(ClientVersion.CURRENT_VERSION);
+        .getProtobufMessage(ClientVersion.CURRENT.serialize());
     stateManager.addPipeline(pipelineProto);
     // finalize on ALLOCATED pipeline
     finalizePipeline(pipelineProto);
@@ -398,7 +398,7 @@ public void testFinalizePipeline() throws IOException, 
TimeoutException {
 
     pipeline = createDummyPipeline(1);
     pipelineProto = pipeline
-        .getProtobufMessage(ClientVersion.CURRENT_VERSION);
+        .getProtobufMessage(ClientVersion.CURRENT.serialize());
     stateManager.addPipeline(pipelineProto);
     openPipeline(pipelineProto);
     // finalize on OPEN pipeline
@@ -410,7 +410,7 @@ public void testFinalizePipeline() throws IOException, 
TimeoutException {
 
     pipeline = createDummyPipeline(1);
     pipelineProto = pipeline
-        .getProtobufMessage(ClientVersion.CURRENT_VERSION);
+        .getProtobufMessage(ClientVersion.CURRENT.serialize());
     stateManager.addPipeline(pipelineProto);
     openPipeline(pipelineProto);
     finalizePipeline(pipelineProto);
@@ -426,7 +426,7 @@ public void testFinalizePipeline() throws IOException, 
TimeoutException {
   public void testOpenPipeline() throws IOException, TimeoutException {
     Pipeline pipeline = createDummyPipeline(1);
     HddsProtos.Pipeline pipelineProto = pipeline
-        .getProtobufMessage(ClientVersion.CURRENT_VERSION);
+        .getProtobufMessage(ClientVersion.CURRENT.serialize());
     stateManager.addPipeline(pipelineProto);
     // open on ALLOCATED pipeline
     openPipeline(pipelineProto);
@@ -448,7 +448,7 @@ public void testQueryPipeline() throws IOException, 
TimeoutException {
         HddsProtos.ReplicationFactor.THREE, 3);
     // pipeline in allocated state should not be reported
     HddsProtos.Pipeline pipelineProto = pipeline
-        .getProtobufMessage(ClientVersion.CURRENT_VERSION);
+        .getProtobufMessage(ClientVersion.CURRENT.serialize());
     stateManager.addPipeline(pipelineProto);
     assertEquals(0, stateManager
         .getPipelines(RatisReplicationConfig
@@ -470,7 +470,7 @@ public void testQueryPipeline() throws IOException, 
TimeoutException {
         .setState(Pipeline.PipelineState.OPEN)
         .build();
     HddsProtos.Pipeline pipelineProto2 = pipeline2
-        .getProtobufMessage(ClientVersion.CURRENT_VERSION);
+        .getProtobufMessage(ClientVersion.CURRENT.serialize());
     // pipeline in open state should be reported
     stateManager.addPipeline(pipelineProto2);
     assertEquals(2, stateManager
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
index ca9d1f5a6c3..10a30482f65 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
@@ -136,14 +136,14 @@ private void createPipelineAndAssertions(
     assertPipelineProperties(pipeline, factor, REPLICATION_TYPE,
         Pipeline.PipelineState.ALLOCATED);
     HddsProtos.Pipeline pipelineProto = pipeline.getProtobufMessage(
-        ClientVersion.CURRENT_VERSION);
+        ClientVersion.CURRENT.serialize());
     stateManager.addPipeline(pipelineProto);
     nodeManager.addPipeline(pipeline);
 
     Pipeline pipeline1 = provider.create(RatisReplicationConfig
         .getInstance(factor));
     HddsProtos.Pipeline pipelineProto1 = pipeline1.getProtobufMessage(
-        ClientVersion.CURRENT_VERSION);
+        ClientVersion.CURRENT.serialize());
     assertPipelineProperties(pipeline1, factor, REPLICATION_TYPE,
         Pipeline.PipelineState.ALLOCATED);
     // New pipeline should not overlap with the previous created pipeline
@@ -185,7 +185,7 @@ public void testCreatePipelineWithFactor() throws Exception 
{
     assertPipelineProperties(pipeline, factor, REPLICATION_TYPE,
         Pipeline.PipelineState.ALLOCATED);
     HddsProtos.Pipeline pipelineProto = pipeline.getProtobufMessage(
-        ClientVersion.CURRENT_VERSION);
+        ClientVersion.CURRENT.serialize());
     stateManager.addPipeline(pipelineProto);
 
     factor = HddsProtos.ReplicationFactor.ONE;
@@ -194,7 +194,7 @@ public void testCreatePipelineWithFactor() throws Exception 
{
     assertPipelineProperties(pipeline1, factor, REPLICATION_TYPE,
         Pipeline.PipelineState.ALLOCATED);
     HddsProtos.Pipeline pipelineProto1 = pipeline1.getProtobufMessage(
-        ClientVersion.CURRENT_VERSION);
+        ClientVersion.CURRENT.serialize());
     stateManager.addPipeline(pipelineProto1);
     // With enough pipeline quote on datanodes, they should not share
     // the same set of datanodes.
@@ -274,7 +274,7 @@ public void testCreatePipelinesDnExclude() throws Exception 
{
     assertPipelineProperties(pipeline, factor, REPLICATION_TYPE,
         Pipeline.PipelineState.ALLOCATED);
     HddsProtos.Pipeline pipelineProto = pipeline.getProtobufMessage(
-        ClientVersion.CURRENT_VERSION);
+        ClientVersion.CURRENT.serialize());
     nodeManager.addPipeline(pipeline);
     stateManager.addPipeline(pipelineProto);
 
@@ -401,7 +401,7 @@ public void testCreatePipelineWithDefaultLimit() throws 
Exception {
       Pipeline p = provider.create(
           RatisReplicationConfig.getInstance(ReplicationFactor.THREE),
           new ArrayList<>(), new ArrayList<>());
-      
stateManager.addPipeline(p.getProtobufMessage(ClientVersion.CURRENT_VERSION));
+      
stateManager.addPipeline(p.getProtobufMessage(ClientVersion.CURRENT.serialize()));
     }
 
     // Next pipeline creation should fail with default limit message.
@@ -426,7 +426,7 @@ public void 
testCreatePipelineThrowErrorWithDataNodeLimit(int limit, int pipelin
     for (int i = 0; i < pipelineCount; i++) {
       stateManager.addPipeline(
           
provider.create(RatisReplicationConfig.getInstance(ReplicationFactor.THREE),
-              new ArrayList<>(), new 
ArrayList<>()).getProtobufMessage(ClientVersion.CURRENT_VERSION)
+              new ArrayList<>(), new 
ArrayList<>()).getProtobufMessage(ClientVersion.CURRENT.serialize())
       );
     }
 
@@ -453,7 +453,7 @@ private void addPipeline(
         .setId(PipelineID.randomId())
         .build();
     HddsProtos.Pipeline pipelineProto = openPipeline.getProtobufMessage(
-        ClientVersion.CURRENT_VERSION);
+        ClientVersion.CURRENT.serialize());
 
     stateManager.addPipeline(pipelineProto);
     nodeManager.addPipeline(openPipeline);
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java
index 338d9129512..07a064a2b7d 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java
@@ -81,7 +81,7 @@ public void testCreatePipelineWithFactor() throws Exception {
     Pipeline pipeline =
         provider.create(StandaloneReplicationConfig.getInstance(factor));
     HddsProtos.Pipeline pipelineProto = pipeline.getProtobufMessage(
-        ClientVersion.CURRENT_VERSION);
+        ClientVersion.CURRENT.serialize());
     stateManager.addPipeline(pipelineProto);
     assertEquals(pipeline.getType(), HddsProtos.ReplicationType.STAND_ALONE);
     assertEquals(pipeline.getReplicationConfig().getRequiredNodes(), 
factor.getNumber());
@@ -92,7 +92,7 @@ public void testCreatePipelineWithFactor() throws Exception {
     Pipeline pipeline1 =
         provider.create(StandaloneReplicationConfig.getInstance(factor));
     HddsProtos.Pipeline pipelineProto1 = pipeline1.getProtobufMessage(
-        ClientVersion.CURRENT_VERSION);
+        ClientVersion.CURRENT.serialize());
     stateManager.addPipeline(pipelineProto1);
     assertEquals(pipeline1.getType(), HddsProtos.ReplicationType.STAND_ALONE);
     assertEquals(
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
index 895baef27d6..0fe156448ee 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
@@ -274,7 +274,7 @@ public void testSortDatanodes() throws Exception {
             .setClient(client)
             .build();
     ScmBlockLocationProtocolProtos.SortDatanodesResponseProto resp =
-        service.sortDatanodes(request, ClientVersion.CURRENT_VERSION);
+        service.sortDatanodes(request, ClientVersion.CURRENT.serialize());
     assertEquals(NODE_COUNT, resp.getNodeList().size());
     System.out.println("client = " + client);
     resp.getNodeList().stream().forEach(
@@ -290,7 +290,7 @@ public void testSortDatanodes() throws Exception {
         .addAllNodeNetworkName(nodes)
         .setClient(client)
         .build();
-    resp = service.sortDatanodes(request, ClientVersion.CURRENT_VERSION);
+    resp = service.sortDatanodes(request, ClientVersion.CURRENT.serialize());
     System.out.println("client = " + client);
     assertEquals(0, resp.getNodeList().size());
     resp.getNodeList().stream().forEach(
diff --git 
a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
 
b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
index 61c0f4150c3..6e1df27b27b 100644
--- 
a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
+++ 
b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
@@ -238,7 +238,7 @@ public List<HddsProtos.Node> queryNode(
       HddsProtos.QueryScope queryScope, String poolName)
       throws IOException {
     return storageContainerLocationClient.queryNode(opState, nodeState,
-        queryScope, poolName, ClientVersion.CURRENT_VERSION);
+        queryScope, poolName, ClientVersion.CURRENT.serialize());
   }
 
   @Override
@@ -437,7 +437,7 @@ public ContainerWithPipeline getContainerWithPipeline(long 
containerId)
   public List<ContainerReplicaInfo> getContainerReplicas(long containerId) 
throws IOException {
     List<HddsProtos.SCMContainerReplicaProto> protos =
         storageContainerLocationClient.getContainerReplicas(containerId,
-            ClientVersion.CURRENT_VERSION);
+            ClientVersion.CURRENT.serialize());
     List<ContainerReplicaInfo> replicas = new ArrayList<>();
     for (HddsProtos.SCMContainerReplicaProto p : protos) {
       replicas.add(ContainerReplicaInfo.fromProto(p));
@@ -558,14 +558,14 @@ public DeletedBlocksTransactionSummary 
getDeletedBlockSummary() throws IOExcepti
   public List<HddsProtos.DatanodeUsageInfoProto> getDatanodeUsageInfo(
       String address, String uuid) throws IOException {
     return storageContainerLocationClient.getDatanodeUsageInfo(address,
-        uuid, ClientVersion.CURRENT_VERSION);
+        uuid, ClientVersion.CURRENT.serialize());
   }
 
   @Override
   public List<HddsProtos.DatanodeUsageInfoProto> getDatanodeUsageInfo(
       boolean mostUsed, int count) throws IOException {
     return storageContainerLocationClient.getDatanodeUsageInfo(mostUsed, count,
-        ClientVersion.CURRENT_VERSION);
+        ClientVersion.CURRENT.serialize());
   }
 
   @Override
diff --git 
a/hadoop-ozone/cli-debug/src/main/java/org/apache/hadoop/ozone/debug/VersionDebug.java
 
b/hadoop-ozone/cli-debug/src/main/java/org/apache/hadoop/ozone/debug/VersionDebug.java
index 20cf91d0872..87cf936deb3 100644
--- 
a/hadoop-ozone/cli-debug/src/main/java/org/apache/hadoop/ozone/debug/VersionDebug.java
+++ 
b/hadoop-ozone/cli-debug/src/main/java/org/apache/hadoop/ozone/debug/VersionDebug.java
@@ -52,8 +52,8 @@ public Void call() throws IOException {
         ),
         "components", ImmutableSortedMap.of(
             "client", asMap(ClientVersion.CURRENT),
-            "datanode", asMap(HDDSVersion.CURRENT),
-            "om", asMap(OzoneManagerVersion.CURRENT)
+            "datanode", asMap(HDDSVersion.SOFTWARE_VERSION),
+            "om", asMap(OzoneManagerVersion.SOFTWARE_VERSION)
         )
     )));
     return null;
diff --git 
a/hadoop-ozone/cli-debug/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java
 
b/hadoop-ozone/cli-debug/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java
index f023f7d094d..294b06f0c02 100644
--- 
a/hadoop-ozone/cli-debug/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java
+++ 
b/hadoop-ozone/cli-debug/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java
@@ -88,7 +88,7 @@ public static void main(String[] args) throws IOException {
                 keyValueTableIterator.next();
             OmKeyInfo omKeyInfo = keyValue.getValue();
             byte[] value = omKeyInfo
-                .getProtobuf(true, ClientVersion.CURRENT_VERSION)
+                .getProtobuf(true, ClientVersion.CURRENT.serialize())
                 .toByteArray();
             OmKeyInfo keyInfo = OmKeyInfo.getFromProtobuf(
                 OzoneManagerProtocolProtos.KeyInfo.parseFrom(value));
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 3947e4b6818..53f3dda10cd 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -344,11 +344,11 @@ public XceiverClientFactory getXceiverClientManager() {
   }
 
   public static OzoneManagerVersion getOmVersion(ServiceInfoEx info) {
-    OzoneManagerVersion version = OzoneManagerVersion.CURRENT;
+    OzoneManagerVersion version = OzoneManagerVersion.SOFTWARE_VERSION;
     for (ServiceInfo si : info.getServiceInfoList()) {
       if (si.getNodeType() == HddsProtos.NodeType.OM) {
         OzoneManagerVersion current =
-            
OzoneManagerVersion.fromProtoValue(si.getProtobuf().getOMVersion());
+            OzoneManagerVersion.deserialize(si.getProtobuf().getOMVersion());
         if (version.compareTo(current) > 0) {
           version = current;
         }
@@ -376,7 +376,7 @@ static boolean validateOmVersion(OzoneManagerVersion 
minimumVersion,
       if (s.getNodeType() == HddsProtos.NodeType.OM) {
         OzoneManagerVersion omv =
             OzoneManagerVersion
-                .fromProtoValue(s.getProtobuf().getOMVersion());
+                .deserialize(s.getProtobuf().getOMVersion());
         if (minimumVersion.compareTo(omv) > 0) {
           return false;
         } else {
diff --git 
a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestRpcClient.java
 
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestRpcClient.java
index 4e4efef51e1..b6b501f6774 100644
--- 
a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestRpcClient.java
+++ 
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestRpcClient.java
@@ -42,23 +42,23 @@ private enum ValidateOmVersionTestCases {
         true), // Should validation pass
     NULL_EXPECTED_ONE_OM(
         null,
-        OzoneManagerVersion.CURRENT,
+        OzoneManagerVersion.SOFTWARE_VERSION,
         null,
         true),
     NULL_EXPECTED_TWO_OM(
         null,
-        OzoneManagerVersion.CURRENT,
-        OzoneManagerVersion.CURRENT,
+        OzoneManagerVersion.SOFTWARE_VERSION,
+        OzoneManagerVersion.SOFTWARE_VERSION,
         true),
     NULL_EXPECTED_ONE_DEFAULT_ONE_CURRENT_OM(
         null,
-        OzoneManagerVersion.CURRENT,
+        OzoneManagerVersion.SOFTWARE_VERSION,
         OzoneManagerVersion.DEFAULT_VERSION,
         true
     ),
     NULL_EXPECTED_ONE_CURRENT_ONE_FUTURE_OM(
         null,
-        OzoneManagerVersion.CURRENT,
+        OzoneManagerVersion.SOFTWARE_VERSION,
         OzoneManagerVersion.FUTURE_VERSION,
         true
     ),
@@ -81,7 +81,7 @@ private enum ValidateOmVersionTestCases {
         true),
     DEFAULT_EXPECTED_ONE_CURRENT_OM(
         OzoneManagerVersion.DEFAULT_VERSION,
-        OzoneManagerVersion.CURRENT,
+        OzoneManagerVersion.SOFTWARE_VERSION,
         null,
         true),
     DEFAULT_EXPECTED_ONE_FUTURE_OM(
@@ -96,8 +96,8 @@ private enum ValidateOmVersionTestCases {
         true),
     DEFAULT_EXPECTED_TWO_CURRENT_OM(
         OzoneManagerVersion.DEFAULT_VERSION,
-        OzoneManagerVersion.CURRENT,
-        OzoneManagerVersion.CURRENT,
+        OzoneManagerVersion.SOFTWARE_VERSION,
+        OzoneManagerVersion.SOFTWARE_VERSION,
         true),
     DEFAULT_EXPECTED_TWO_FUTURE_OM(
         OzoneManagerVersion.DEFAULT_VERSION,
@@ -107,7 +107,7 @@ private enum ValidateOmVersionTestCases {
     DEFAULT_EXPECTED_ONE_DEFAULT_ONE_CURRENT_OM(
         OzoneManagerVersion.DEFAULT_VERSION,
         OzoneManagerVersion.DEFAULT_VERSION,
-        OzoneManagerVersion.CURRENT,
+        OzoneManagerVersion.SOFTWARE_VERSION,
         true),
     DEFAULT_EXPECTED_ONE_DEFAULT_ONE_FUTURE_OM(
         OzoneManagerVersion.DEFAULT_VERSION,
@@ -116,58 +116,58 @@ private enum ValidateOmVersionTestCases {
         true),
     DEFAULT_EXPECTED_ONE_CURRENT_ONE_FUTURE_OM(
         OzoneManagerVersion.DEFAULT_VERSION,
-        OzoneManagerVersion.CURRENT,
+        OzoneManagerVersion.SOFTWARE_VERSION,
         OzoneManagerVersion.FUTURE_VERSION,
         true),
 
     CURRENT_EXPECTED_NO_OM(
-        OzoneManagerVersion.CURRENT,
+        OzoneManagerVersion.SOFTWARE_VERSION,
         null,
         null,
         false),
     CURRENT_EXPECTED_ONE_DEFAULT_OM(
-        OzoneManagerVersion.CURRENT,
+        OzoneManagerVersion.SOFTWARE_VERSION,
         OzoneManagerVersion.DEFAULT_VERSION,
         null,
         false),
     CURRENT_EXPECTED_ONE_CURRENT_OM(
-        OzoneManagerVersion.CURRENT,
-        OzoneManagerVersion.CURRENT,
+        OzoneManagerVersion.SOFTWARE_VERSION,
+        OzoneManagerVersion.SOFTWARE_VERSION,
         null,
         true),
     CURRENT_EXPECTED_ONE_FUTURE_OM(
-        OzoneManagerVersion.CURRENT,
+        OzoneManagerVersion.SOFTWARE_VERSION,
         OzoneManagerVersion.FUTURE_VERSION,
         null,
         true),
     CURRENT_EXPECTED_TWO_DEFAULT_OM(
-        OzoneManagerVersion.CURRENT,
+        OzoneManagerVersion.SOFTWARE_VERSION,
         OzoneManagerVersion.DEFAULT_VERSION,
         OzoneManagerVersion.DEFAULT_VERSION,
         false),
     CURRENT_EXPECTED_TWO_CURRENT_OM(
-        OzoneManagerVersion.CURRENT,
-        OzoneManagerVersion.CURRENT,
-        OzoneManagerVersion.CURRENT,
+        OzoneManagerVersion.SOFTWARE_VERSION,
+        OzoneManagerVersion.SOFTWARE_VERSION,
+        OzoneManagerVersion.SOFTWARE_VERSION,
         true),
     CURRENT_EXPECTED_TWO_FUTURE_OM(
-        OzoneManagerVersion.CURRENT,
+        OzoneManagerVersion.SOFTWARE_VERSION,
         OzoneManagerVersion.FUTURE_VERSION,
         OzoneManagerVersion.FUTURE_VERSION,
         true),
     CURRENT_EXPECTED_ONE_DEFAULT_ONE_CURRENT_OM(
-        OzoneManagerVersion.CURRENT,
+        OzoneManagerVersion.SOFTWARE_VERSION,
         OzoneManagerVersion.DEFAULT_VERSION,
-        OzoneManagerVersion.CURRENT,
+        OzoneManagerVersion.SOFTWARE_VERSION,
         false),
     CURRENT_EXPECTED_ONE_DEFAULT_ONE_FUTURE_OM(
-        OzoneManagerVersion.CURRENT,
+        OzoneManagerVersion.SOFTWARE_VERSION,
         OzoneManagerVersion.DEFAULT_VERSION,
         OzoneManagerVersion.FUTURE_VERSION,
         false),
     CURRENT_EXPECTED_ONE_CURRENT_ONE_FUTURE_OM(
-        OzoneManagerVersion.CURRENT,
-        OzoneManagerVersion.CURRENT,
+        OzoneManagerVersion.SOFTWARE_VERSION,
+        OzoneManagerVersion.SOFTWARE_VERSION,
         OzoneManagerVersion.FUTURE_VERSION,
         true);
 
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index b0e26c49d69..aaaacfa64c0 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -135,7 +135,7 @@ private static Codec<OmKeyInfo> newCodec(boolean 
ignorePipeline) {
     return new DelegatedCodec<>(
         Proto2Codec.get(KeyInfo.getDefaultInstance()),
         OmKeyInfo::getFromProtobuf,
-        k -> k.getProtobuf(ignorePipeline, ClientVersion.CURRENT_VERSION),
+        k -> k.getProtobuf(ignorePipeline, ClientVersion.CURRENT.serialize()),
         OmKeyInfo.class);
   }
 
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java
index 0f10832114e..2b882c55b7b 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java
@@ -55,7 +55,7 @@ private static Codec<RepeatedOmKeyInfo> newCodec(boolean 
ignorePipeline) {
     return new DelegatedCodec<>(
         Proto2Codec.get(RepeatedKeyInfo.getDefaultInstance()),
         RepeatedOmKeyInfo::getFromProto,
-        k -> k.getProto(ignorePipeline, ClientVersion.CURRENT_VERSION),
+        k -> k.getProto(ignorePipeline, ClientVersion.CURRENT.serialize()),
         RepeatedOmKeyInfo.class);
   }
 
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
index f5d8f24e3fd..e549b763441 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
@@ -212,7 +212,7 @@ public static ServiceInfo getFromProtobuf(
     return new ServiceInfo(serviceInfo.getNodeType(),
         serviceInfo.getHostname(),
         serviceInfo.getServicePortsList(),
-        OzoneManagerVersion.fromProtoValue(serviceInfo.getOMVersion()),
+        OzoneManagerVersion.deserialize(serviceInfo.getOMVersion()),
         serviceInfo.hasOmRole() ? serviceInfo.getOmRole() : null,
         serviceInfo.hasServerDefaults() ? 
OzoneFsServerDefaults.getFromProtobuf(
             serviceInfo.getServerDefaults()) : null);
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index 8e1f036d4f2..2d17a968cc4 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -299,7 +299,7 @@ public void close() throws IOException {
   private OMRequest.Builder createOMRequest(Type cmdType) {
     return OMRequest.newBuilder()
         .setCmdType(cmdType)
-        .setVersion(ClientVersion.CURRENT_VERSION)
+        .setVersion(ClientVersion.CURRENT.serialize())
         .setClientId(clientID);
   }
 
@@ -843,7 +843,7 @@ private void updateKey(OmKeyArgs args, long clientId, 
boolean hsync, boolean rec
         .addAllMetadata(KeyValueUtil.toProtobuf(args.getMetadata()))
         .addAllKeyLocations(locationInfoList.stream()
             // TODO use OM version?
-            .map(info -> info.getProtobuf(ClientVersion.CURRENT_VERSION))
+            .map(info -> info.getProtobuf(ClientVersion.CURRENT.serialize()))
             .collect(Collectors.toList()));
 
     setReplicationConfig(args.getReplicationConfig(), keyArgsBuilder);
@@ -1681,7 +1681,7 @@ public OmMultipartCommitUploadPartInfo 
commitMultipartUploadPart(
         .addAllMetadata(KeyValueUtil.toProtobuf(omKeyArgs.getMetadata()))
         .addAllKeyLocations(locationInfoList.stream()
             // TODO use OM version?
-            .map(info -> info.getProtobuf(ClientVersion.CURRENT_VERSION))
+            .map(info -> info.getProtobuf(ClientVersion.CURRENT.serialize()))
             .collect(Collectors.toList()));
     multipartCommitUploadPartRequest.setClientID(clientId);
     multipartCommitUploadPartRequest.setKeyArgs(keyArgs.build());
diff --git 
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestHadoopRpcOMFollowerReadFailoverProxyProvider.java
 
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestHadoopRpcOMFollowerReadFailoverProxyProvider.java
index 3587ca5dacb..34025234fa9 100644
--- 
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestHadoopRpcOMFollowerReadFailoverProxyProvider.java
+++ 
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestHadoopRpcOMFollowerReadFailoverProxyProvider.java
@@ -447,7 +447,7 @@ private static void doWrite(OzoneManagerProtocolPB client) 
throws Exception {
     req.setKeyArgs(keyArgs);
 
     OMRequest omRequest = OMRequest.newBuilder()
-        .setVersion(ClientVersion.CURRENT_VERSION)
+        .setVersion(ClientVersion.CURRENT.serialize())
         .setClientId(ClientId.randomId().toString())
         .setCmdType(Type.CreateKey)
         .setCreateKeyRequest(req)
@@ -466,7 +466,7 @@ private static void doRead(OzoneManagerProtocolPB client) 
throws Exception {
         .setKeyArgs(keyArgs);
 
     OMRequest omRequest = OMRequest.newBuilder()
-        .setVersion(ClientVersion.CURRENT_VERSION)
+        .setVersion(ClientVersion.CURRENT.serialize())
         .setClientId(ClientId.randomId().toString())
         .setCmdType(Type.GetKeyInfo)
         .setGetKeyInfoRequest(req)
diff --git 
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java
 
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java
index 285853a3a76..bc8928f9c4c 100644
--- 
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java
+++ 
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java
@@ -60,7 +60,7 @@ public void protobufConversion() throws IOException {
         RatisReplicationConfig.getInstance(ReplicationFactor.THREE));
 
     OmKeyInfo keyAfterSerialization = OmKeyInfo.getFromProtobuf(
-        key.getProtobuf(ClientVersion.CURRENT_VERSION));
+        key.getProtobuf(ClientVersion.CURRENT.serialize()));
 
     assertNotNull(keyAfterSerialization);
     assertEquals(key, keyAfterSerialization);
@@ -78,7 +78,7 @@ public void getProtobufMessageEC() throws IOException {
     OmKeyInfo key = createOmKeyInfo(
         RatisReplicationConfig.getInstance(ReplicationFactor.THREE));
     OzoneManagerProtocolProtos.KeyInfo omKeyProto =
-        key.getProtobuf(ClientVersion.CURRENT_VERSION);
+        key.getProtobuf(ClientVersion.CURRENT.serialize());
 
     // No EC Config
     assertFalse(omKeyProto.hasEcReplicationConfig());
@@ -95,7 +95,7 @@ public void getProtobufMessageEC() throws IOException {
     // EC Config
     key = createOmKeyInfo(new ECReplicationConfig(3, 2));
     assertFalse(key.isHsync());
-    omKeyProto = key.getProtobuf(ClientVersion.CURRENT_VERSION);
+    omKeyProto = key.getProtobuf(ClientVersion.CURRENT.serialize());
 
     assertEquals(3,
         omKeyProto.getEcReplicationConfig().getData());
diff --git 
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
 
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
index 176d9b6d03b..9393a167f23 100644
--- 
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
+++ 
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
@@ -17,7 +17,7 @@
 
 package org.apache.hadoop.ozone.om.protocolPB;
 
-import static org.apache.hadoop.ozone.ClientVersion.CURRENT_VERSION;
+import static org.apache.hadoop.ozone.ClientVersion.CURRENT;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH_DEFAULT;
 import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -156,7 +156,7 @@ public void testSubmitRequestToServer() throws Exception {
 
     final OMRequest omRequest = OMRequest.newBuilder()
         .setCmdType(Type.ServiceList)
-        .setVersion(CURRENT_VERSION)
+        .setVersion(CURRENT.serialize())
         .setClientId("test")
         .setServiceListRequest(req)
         .build();
@@ -176,7 +176,7 @@ public void testGrpcFailoverProxy() throws Exception {
 
     final OMRequest omRequest = OMRequest.newBuilder()
         .setCmdType(Type.ServiceList)
-        .setVersion(CURRENT_VERSION)
+        .setVersion(CURRENT.serialize())
         .setClientId("test")
         .setServiceListRequest(req)
         .build();
@@ -201,7 +201,7 @@ public void testGrpcFailoverProxyExhaustRetry() throws 
Exception {
 
     final OMRequest omRequest = OMRequest.newBuilder()
         .setCmdType(Type.ServiceList)
-        .setVersion(CURRENT_VERSION)
+        .setVersion(CURRENT.serialize())
         .setClientId("test")
         .setServiceListRequest(req)
         .build();
@@ -248,7 +248,7 @@ public void testGrpcFailoverExceedMaxMesgLen() throws 
Exception {
 
     final OMRequest omRequest = OMRequest.newBuilder()
         .setCmdType(Type.ServiceList)
-        .setVersion(CURRENT_VERSION)
+        .setVersion(CURRENT.serialize())
         .setClientId("test")
         .setServiceListRequest(req)
         .build();
@@ -276,7 +276,7 @@ private static OMRequest arbitraryOmRequest() {
     ServiceListRequest req = ServiceListRequest.newBuilder().build();
     return OMRequest.newBuilder()
         .setCmdType(Type.ServiceList)
-        .setVersion(CURRENT_VERSION)
+        .setVersion(CURRENT.serialize())
         .setClientId("test")
         .setServiceListRequest(req)
         .build();
diff --git 
a/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java
 
b/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java
index fb4023932ee..bee91d46659 100644
--- 
a/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java
+++ 
b/hadoop-ozone/freon/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java
@@ -460,8 +460,8 @@ private DatanodeDetails 
randomDatanodeDetails(ConfigurationSource config)
     DatanodeDetails details = DatanodeDetails.newBuilder()
         .setUuid(UUID.randomUUID())
         .build();
-    details.setInitialVersion(HDDSVersion.CURRENT_VERSION);
-    details.setCurrentVersion(HDDSVersion.CURRENT_VERSION);
+    details.setInitialVersion(HDDSVersion.SOFTWARE_VERSION.serialize());
+    details.setCurrentVersion(HDDSVersion.SOFTWARE_VERSION.serialize());
     details.setHostName(HddsUtils.getHostName(config));
     details.setIpAddress(randomIp());
     details.setStandalonePort(0);
@@ -470,7 +470,7 @@ private DatanodeDetails 
randomDatanodeDetails(ConfigurationSource config)
     details.setVersion(HDDS_VERSION_INFO.getVersion());
     details.setSetupTime(Time.now());
     details.setRevision(HDDS_VERSION_INFO.getRevision());
-    details.setCurrentVersion(HDDSVersion.CURRENT_VERSION);
+    details.setCurrentVersion(HDDSVersion.SOFTWARE_VERSION.serialize());
     return details;
   }
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestScmApplyTransactionFailure.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestScmApplyTransactionFailure.java
index cef98beaf0e..687201e0a14 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestScmApplyTransactionFailure.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestScmApplyTransactionFailure.java
@@ -17,7 +17,7 @@
 
 package org.apache.hadoop.hdds.scm.container;
 
-import static org.apache.hadoop.ozone.ClientVersion.CURRENT_VERSION;
+import static org.apache.hadoop.ozone.ClientVersion.CURRENT;
 import static org.junit.jupiter.api.Assertions.assertInstanceOf;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -94,7 +94,7 @@ public void testAddDuplicatePipelineId()
         replication, PipelineState.OPEN).get(0);
 
     HddsProtos.Pipeline pipelineToCreate =
-        existing.getProtobufMessage(CURRENT_VERSION);
+        existing.getProtobufMessage(CURRENT.serialize());
     Throwable ex = assertThrows(SCMException.class,
         () -> pipelineManager.getStateManager().addPipeline(
             pipelineToCreate));
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
index d90bf3ea803..4a57e96b7cc 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
@@ -341,7 +341,8 @@ public void testDatanodeVersion(boolean flushDelay) throws 
Exception {
       List<HddsDatanodeService> dns = cluster.getHddsDatanodes();
       for (HddsDatanodeService dn : dns) {
         DatanodeDetails details = dn.getDatanodeDetails();
-        assertEquals(DN_OLD_VERSION.serialize(), details.getCurrentVersion());
+        assertEquals(DN_OLD_VERSION,
+            HDDSVersion.deserialize(details.getCurrentVersion()));
       }
 
       String keyName = getKeyName();
@@ -352,7 +353,8 @@ public void testDatanodeVersion(boolean flushDelay) throws 
Exception {
       // Now check 3 DNs in a random pipeline returns the correct DN versions
       List<DatanodeDetails> streamDnDetails = stream.getPipeline().getNodes();
       for (DatanodeDetails details : streamDnDetails) {
-        assertEquals(DN_OLD_VERSION.serialize(), details.getCurrentVersion());
+        assertEquals(DN_OLD_VERSION,
+            HDDSVersion.deserialize(details.getCurrentVersion()));
       }
     }
   }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
index 773ea96c3c7..985e16b10ab 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
@@ -374,7 +374,7 @@ public void testS3Auth() throws Exception {
 
     OMRequest writeRequest = OMRequest.newBuilder()
         .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume)
-        .setVersion(ClientVersion.CURRENT_VERSION)
+        .setVersion(ClientVersion.CURRENT.serialize())
         .setClientId(UUID.randomUUID().toString())
         .setCreateVolumeRequest(CreateVolumeRequest.newBuilder().
             setVolumeInfo(VolumeInfo.newBuilder().setVolume(volumeName)
@@ -396,7 +396,7 @@ public void testS3Auth() throws Exception {
     // Read Request
     OMRequest readRequest = OMRequest.newBuilder()
         .setCmdType(OzoneManagerProtocolProtos.Type.InfoVolume)
-        .setVersion(ClientVersion.CURRENT_VERSION)
+        .setVersion(ClientVersion.CURRENT.serialize())
         .setClientId(UUID.randomUUID().toString())
         .setInfoVolumeRequest(InfoVolumeRequest.newBuilder()
             .setVolumeName(volumeName).build())
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java
index aa6e026eff2..94b160e5c8c 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java
@@ -500,7 +500,7 @@ private void prepareKeyTable(int recordsCount) throws 
IOException {
       OmKeyInfo value = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1",
           key, ReplicationConfig.fromProtoTypeAndFactor(STAND_ALONE,
               HddsProtos.ReplicationFactor.ONE)).build();
-      keyTable.put(key.getBytes(UTF_8), 
value.getProtobuf(ClientVersion.CURRENT_VERSION).toByteArray());
+      keyTable.put(key.getBytes(UTF_8), 
value.getProtobuf(ClientVersion.CURRENT.serialize()).toByteArray());
       // Populate map
       dbMap.put(key, toMap(value));
     }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/checksum/TestContainerCommandReconciliation.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/checksum/TestContainerCommandReconciliation.java
index b632b87a90b..6c467a3ff7f 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/checksum/TestContainerCommandReconciliation.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/checksum/TestContainerCommandReconciliation.java
@@ -512,7 +512,7 @@ public void testDataChecksumReportedAtSCM() throws 
Exception {
     // Check non-zero checksum after container close
     StorageContainerLocationProtocolClientSideTranslatorPB scmClient = 
cluster.getStorageContainerLocationClient();
     List<HddsProtos.SCMContainerReplicaProto> containerReplicas = 
scmClient.getContainerReplicas(containerID,
-        ClientVersion.CURRENT_VERSION);
+        ClientVersion.CURRENT.serialize());
     assertEquals(3, containerReplicas.size());
     for (HddsProtos.SCMContainerReplicaProto containerReplica: 
containerReplicas) {
       assertNotEquals(0, containerReplica.getDataChecksum());
@@ -546,7 +546,7 @@ public void testDataChecksumReportedAtSCM() throws 
Exception {
     scmClient.reconcileContainer(containerID);
     waitForDataChecksumsAtSCM(containerID, 1);
     // Check non-zero checksum after container reconciliation
-    containerReplicas = scmClient.getContainerReplicas(containerID, 
ClientVersion.CURRENT_VERSION);
+    containerReplicas = scmClient.getContainerReplicas(containerID, 
ClientVersion.CURRENT.serialize());
     assertEquals(3, containerReplicas.size());
     for (HddsProtos.SCMContainerReplicaProto containerReplica: 
containerReplicas) {
       assertNotEquals(0, containerReplica.getDataChecksum());
@@ -560,7 +560,7 @@ public void testDataChecksumReportedAtSCM() throws 
Exception {
     }
     cluster.waitForClusterToBeReady();
     waitForDataChecksumsAtSCM(containerID, 1);
-    containerReplicas = scmClient.getContainerReplicas(containerID, 
ClientVersion.CURRENT_VERSION);
+    containerReplicas = scmClient.getContainerReplicas(containerID, 
ClientVersion.CURRENT.serialize());
     assertEquals(3, containerReplicas.size());
     for (HddsProtos.SCMContainerReplicaProto containerReplica: 
containerReplicas) {
       assertNotEquals(0, containerReplica.getDataChecksum());
@@ -572,7 +572,7 @@ private void waitForDataChecksumsAtSCM(long containerID, 
int expectedSize) throw
     GenericTestUtils.waitFor(() -> {
       try {
         Set<Long> dataChecksums = 
cluster.getStorageContainerLocationClient().getContainerReplicas(containerID,
-                ClientVersion.CURRENT_VERSION).stream()
+                ClientVersion.CURRENT.serialize()).stream()
             .map(HddsProtos.SCMContainerReplicaProto::getDataChecksum)
             .collect(Collectors.toSet());
         LOG.info("Waiting for {} total unique checksums from container {} to 
be reported to SCM. Currently {} unique" +
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAFollowerReadWithAllRunning.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAFollowerReadWithAllRunning.java
index 1d1d47c7250..e416d661866 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAFollowerReadWithAllRunning.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAFollowerReadWithAllRunning.java
@@ -168,7 +168,7 @@ public void testFailoverWithSuggestedLeader() throws 
Exception {
         OzoneManagerProtocolProtos.OMRequest.newBuilder()
             .setCmdType(Type.CreateVolume)
             .setCreateVolumeRequest(req)
-            .setVersion(ClientVersion.CURRENT_VERSION)
+            .setVersion(ClientVersion.CURRENT.serialize())
             .setClientId(randomUUID().toString())
             .build();
 
@@ -456,7 +456,7 @@ void testOMResponseLeaderOmNodeId() throws Exception {
         OzoneManagerProtocolProtos.OMRequest.newBuilder()
             .setCmdType(Type.ListVolume)
             .setListVolumeRequest(req)
-            .setVersion(ClientVersion.CURRENT_VERSION)
+            .setVersion(ClientVersion.CURRENT.serialize())
             .setClientId(randomUUID().toString())
             .build();
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java
index 8636fe0c24e..607d0032e80 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java
@@ -340,7 +340,7 @@ public void testFailoverWithSuggestedLeader() throws 
Exception {
     OzoneManagerProtocolProtos.OMRequest readRequest =
         OzoneManagerProtocolProtos.OMRequest.newBuilder()
             .setCmdType(OzoneManagerProtocolProtos.Type.ListVolume)
-            .setVersion(ClientVersion.CURRENT_VERSION)
+            .setVersion(ClientVersion.CURRENT.serialize())
             .setClientId(randomUUID().toString())
             .build();
 
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 9dc01e54f99..e209ae71789 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -1468,7 +1468,7 @@ public ExpiredOpenKeys getExpiredOpenKeys(Duration 
expireThreshold,
                 .map(OmKeyLocationInfoGroup::getLocationList)
                 .map(Collection::stream)
                 .orElseGet(Stream::empty)
-                .map(loc -> loc.getProtobuf(ClientVersion.CURRENT_VERSION))
+                .map(loc -> loc.getProtobuf(ClientVersion.CURRENT.serialize()))
                 .forEach(keyArgs::addKeyLocations);
 
             OzoneManagerProtocolClientSideTranslatorPB.setReplicationConfig(
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 7a9d66f86df..08634010e44 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -3258,7 +3258,7 @@ public List<ServiceInfo> getServiceList() throws 
IOException {
     ServiceInfo.Builder omServiceInfoBuilder = ServiceInfo.newBuilder()
         .setNodeType(HddsProtos.NodeType.OM)
         .setHostname(omRpcAddress.getHostName())
-        .setOmVersion(OzoneManagerVersion.CURRENT)
+        .setOmVersion(OzoneManagerVersion.SOFTWARE_VERSION)
         .addServicePort(ServicePort.newBuilder()
             .setType(ServicePort.Type.RPC)
             .setValue(omRpcAddress.getPort())
@@ -3322,7 +3322,7 @@ public List<ServiceInfo> getServiceList() throws 
IOException {
           // For now assume peer is at the same version.
           // This field needs to be fetched from peer when rolling upgrades
           // are implemented.
-          .setOmVersion(OzoneManagerVersion.CURRENT)
+          .setOmVersion(OzoneManagerVersion.SOFTWARE_VERSION)
           .addServicePort(ServicePort.newBuilder()
               .setType(ServicePort.Type.RPC)
               .setValue(peerNode.getRpcPort())
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
index f794fae7a77..3938394d869 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
@@ -435,7 +435,7 @@ List<String> listKeys(String volumeName, String bucketName, 
String startKey,
        createOMRequest(OzoneManagerProtocolProtos.Type cmdType) throws 
IOException {
     return OzoneManagerProtocolProtos.OMRequest.newBuilder()
         .setClientId(CLIENT_ID.toString())
-        .setVersion(ClientVersion.CURRENT_VERSION)
+        .setVersion(ClientVersion.CURRENT.serialize())
         .setUserInfo(getUserInfo())
         .setCmdType(cmdType);
   }
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
index 718f329aaaf..2049f05ae68 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
@@ -481,8 +481,8 @@ public static OMRequest 
handleCreateBucketWithBucketLayoutDuringPreFinalize(
   )
   public static OMRequest setDefaultBucketLayoutForOlderClients(OMRequest req,
       ValidationContext ctx) {
-    if (ClientVersion.fromProtoValue(req.getVersion())
-        .compareTo(ClientVersion.BUCKET_LAYOUT_SUPPORT) < 0) {
+    if (!ClientVersion.BUCKET_LAYOUT_SUPPORT.isSupportedBy(
+        req.getVersion())) {
       // Older client will default bucket layout to LEGACY to
       // make its operations backward compatible.
       return changeBucketLayout(req, BucketLayout.LEGACY);
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/ValidationCondition.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/ValidationCondition.java
index 87f2188ea1e..9b89597c94c 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/ValidationCondition.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/ValidationCondition.java
@@ -48,7 +48,7 @@ public boolean shouldApply(OMRequest req, ValidationContext 
ctx) {
   OLDER_CLIENT_REQUESTS {
     @Override
     public boolean shouldApply(OMRequest req, ValidationContext ctx) {
-      return req.getVersion() < ClientVersion.CURRENT_VERSION;
+      return !ClientVersion.CURRENT.isSupportedBy(req.getVersion());
     }
   };
 
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/VersionExtractor.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/VersionExtractor.java
index f7b90bd060d..4b9a8de5266 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/VersionExtractor.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/VersionExtractor.java
@@ -49,8 +49,7 @@ public Class<? extends Annotation> getValidatorClass() {
   CLIENT_VERSION_EXTRACTOR {
     @Override
     public ComponentVersion extractVersion(OMRequest req, ValidationContext 
ctx) {
-      return req.getVersion() > ClientVersion.CURRENT_VERSION ?
-          ClientVersion.FUTURE_VERSION : 
ClientVersion.fromProtoValue(req.getVersion());
+      return ClientVersion.deserialize(req.getVersion());
     }
 
     @Override
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java
index e99b3626960..4f184f49011 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java
@@ -461,14 +461,14 @@ private OzoneManagerProtocolProtos.PurgePathRequest 
wrapPurgeRequest(
 
     for (OmKeyInfo purgeFile : purgeDeletedFiles) {
       purgePathsRequest.addDeletedSubFiles(
-          purgeFile.getProtobuf(true, ClientVersion.CURRENT_VERSION));
+          purgeFile.getProtobuf(true, ClientVersion.CURRENT.serialize()));
     }
 
     // Add these directories to deletedDirTable, so that its sub-paths will be
     // traversed in next iteration to ensure cleanup all sub-children.
     for (OmKeyInfo dir : markDirsAsDeleted) {
       purgePathsRequest.addMarkDeletedSubDirs(
-          dir.getProtobuf(ClientVersion.CURRENT_VERSION));
+          dir.getProtobuf(ClientVersion.CURRENT.serialize()));
     }
 
     return purgePathsRequest.build();
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java
index d7797d20869..8e92821cd54 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java
@@ -331,7 +331,7 @@ private Pair<Pair<Integer, Long>, Boolean> 
submitPurgeKeysRequest(
         keyToUpdate.setKey(keyToModify.getKey());
         List<OzoneManagerProtocolProtos.KeyInfo> keyInfos =
             keyToModify.getValue().getOmKeyInfoList().stream()
-                .map(k -> k.getProtobuf(ClientVersion.CURRENT_VERSION))
+                .map(k -> k.getProtobuf(ClientVersion.CURRENT.serialize()))
                 .collect(Collectors.toList());
         keyToUpdate.addAllKeyInfos(keyInfos);
         keyToUpdate.setBucketId(keyToModify.getValue().getBucketId());
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java
index d4f97b23eeb..ea2f7f1cde0 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java
@@ -254,7 +254,7 @@ private OMRequest createCommitKeyRequest(
           .setCmdType(Type.CommitKey)
           .setCommitKeyRequest(request)
           .setClientId(clientId.toString())
-          .setVersion(ClientVersion.CURRENT_VERSION)
+          .setVersion(ClientVersion.CURRENT.serialize())
           .build();
     }
 
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java
index d7db018e0f5..9669b862bd3 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java
@@ -196,14 +196,14 @@ public BackgroundTaskResult call() throws 
InterruptedException {
               for (Table.KeyValue<String, List<OmKeyInfo>> deletedEntry : 
deletedKeyEntries) {
                 
deletedKeys.add(SnapshotMoveKeyInfos.newBuilder().setKey(deletedEntry.getKey())
                     .addAllKeyInfos(deletedEntry.getValue()
-                        .stream().map(val -> 
val.getProtobuf(ClientVersion.CURRENT_VERSION))
+                        .stream().map(val -> 
val.getProtobuf(ClientVersion.CURRENT.serialize()))
                         .collect(Collectors.toList())).build());
               }
 
               // Convert deletedDirEntries to SnapshotMoveKeyInfos.
               for (Table.KeyValue<String, OmKeyInfo> deletedDirEntry : 
deletedDirEntries) {
                 
deletedDirs.add(SnapshotMoveKeyInfos.newBuilder().setKey(deletedDirEntry.getKey())
-                    
.addKeyInfos(deletedDirEntry.getValue().getProtobuf(ClientVersion.CURRENT_VERSION)).build());
+                    
.addKeyInfos(deletedDirEntry.getValue().getProtobuf(ClientVersion.CURRENT.serialize())).build());
               }
 
               // Convert renamedEntries to KeyValue.
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
index c7e80f166ae..74fbc8093ba 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
@@ -780,7 +780,7 @@ public static OMRequest.Builder newCreateBucketRequest(
     req.setBucketInfo(bucketInfo);
     return OMRequest.newBuilder()
         .setCreateBucketRequest(req)
-        .setVersion(ClientVersion.CURRENT_VERSION)
+        .setVersion(ClientVersion.CURRENT.serialize())
         .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket)
         .setClientId(UUID.randomUUID().toString());
   }
@@ -1374,7 +1374,8 @@ public static OMRequest moveSnapshotTableKeyRequest(UUID 
snapshotId,
               .setKey(deletedKey.getKey())
               .addAllKeyInfos(
                   deletedKey.getValue().stream()
-                  .map(omKeyInfo -> 
omKeyInfo.getProtobuf(ClientVersion.CURRENT_VERSION)).collect(Collectors.toList()))
+                      .map(omKeyInfo -> 
omKeyInfo.getProtobuf(ClientVersion.CURRENT.serialize()))
+                      .collect(Collectors.toList()))
               .build();
       deletedMoveKeys.add(snapshotMoveKeyInfos);
     }
@@ -1386,7 +1387,7 @@ public static OMRequest moveSnapshotTableKeyRequest(UUID 
snapshotId,
               .setKey(deletedKey.getKey())
               .addAllKeyInfos(
                   deletedKey.getValue().stream()
-                      .map(omKeyInfo -> 
omKeyInfo.getProtobuf(ClientVersion.CURRENT_VERSION))
+                      .map(omKeyInfo -> 
omKeyInfo.getProtobuf(ClientVersion.CURRENT.serialize()))
                       .collect(Collectors.toList()))
               .build();
       deletedDirMoveKeys.add(snapshotMoveKeyInfos);
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java
index 590d1250819..0ae3cb61f01 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java
@@ -377,7 +377,7 @@ private KeyArgs getNewKeyArgs(OmKeyInfo omKeyInfo, long 
deltaLength) throws IOEx
         .setDataSize(keyArgs.getDataSize())
         .addAllMetadata(KeyValueUtil.toProtobuf(keyArgs.getMetadata()))
         .addAllKeyLocations(locationInfoList.stream()
-            .map(info -> info.getProtobuf(ClientVersion.CURRENT_VERSION))
+            .map(info -> info.getProtobuf(ClientVersion.CURRENT.serialize()))
             .collect(Collectors.toList()));
     setReplicationConfig(keyArgs.getReplicationConfig(), keyArgsBuilder);
     return keyArgsBuilder.build();
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java
index 4692039ef0c..cbd68962d0c 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java
@@ -207,14 +207,14 @@ private PurgePathRequest wrapPurgeRequest(
 
     for (OmKeyInfo purgeFile : purgeDeletedFiles) {
       purgePathsRequest.addDeletedSubFiles(
-          purgeFile.getProtobuf(true, ClientVersion.CURRENT_VERSION));
+          purgeFile.getProtobuf(true, ClientVersion.CURRENT.serialize()));
     }
 
     // Add these directories to deletedDirTable, so that its sub-paths will be
     // traversed in next iteration to ensure cleanup all sub-children.
     for (OmKeyInfo dir : markDirsAsDeleted) {
       purgePathsRequest.addMarkDeletedSubDirs(
-          dir.getProtobuf(ClientVersion.CURRENT_VERSION));
+          dir.getProtobuf(ClientVersion.CURRENT.serialize()));
     }
 
     return purgePathsRequest.build();
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestRequestValidations.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestRequestValidations.java
index dca87f88733..75420d0474e 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestRequestValidations.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestRequestValidations.java
@@ -284,11 +284,11 @@ private RequestValidations 
loadEmptyValidations(ValidationContext ctx) {
   }
 
   private int olderClientVersion() {
-    return ClientVersion.CURRENT_VERSION - 1;
+    return ClientVersion.CURRENT.serialize() - 1;
   }
 
   private int currentClientVersion() {
-    return ClientVersion.CURRENT_VERSION;
+    return ClientVersion.CURRENT.serialize();
   }
 
   private OMRequest aCreateKeyRequest(int clientVersion) {
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestVersionExtractor.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestVersionExtractor.java
index ba971e5bea3..bb842083484 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestVersionExtractor.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestVersionExtractor.java
@@ -63,7 +63,7 @@ void testClientVersionExtractor(ClientVersion 
expectedClientVersion) {
   @ValueSource(ints = {1, 2, 5, 10, 1000, 10000})
   void testClientVersionExtractorForFutureValues(int futureVersion) {
     OMRequest request = mock(OMRequest.class);
-    when(request.getVersion()).thenReturn(ClientVersion.CURRENT_VERSION + 
futureVersion);
+    when(request.getVersion()).thenReturn(ClientVersion.CURRENT.serialize() + 
futureVersion);
     ComponentVersion version =
         VersionExtractor.CLIENT_VERSION_EXTRACTOR.extractVersion(request, 
null);
     assertEquals(ClientVersion.FUTURE_VERSION, version);
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java
index 0c88e379e68..94219e7517c 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java
@@ -147,13 +147,13 @@ public void testMoveTableKeysToNextSnapshot(boolean 
nextSnapshotExists) throws E
           .forEachRemaining(entry -> {
             
deletedTable.add(OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder().setKey(entry.getKey())
                 
.addAllKeyInfos(entry.getValue().getOmKeyInfoList().stream().map(omKeyInfo -> 
omKeyInfo.getProtobuf(
-                    
ClientVersion.CURRENT_VERSION)).collect(Collectors.toList())).build());
+                    
ClientVersion.CURRENT.serialize())).collect(Collectors.toList())).build());
           });
 
       snapshot.getMetadataManager().getDeletedDirTable().iterator()
           .forEachRemaining(entry -> {
             
deletedDirTable.add(OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder().setKey(entry.getKey())
-                
.addKeyInfos(entry.getValue().getProtobuf(ClientVersion.CURRENT_VERSION)).build());
+                
.addKeyInfos(entry.getValue().getProtobuf(ClientVersion.CURRENT.serialize())).build());
           });
       
snapshot.getMetadataManager().getSnapshotRenamedTable().iterator().forEachRemaining(entry
 -> {
         
renamedTable.add(HddsProtos.KeyValue.newBuilder().setKey(entry.getKey()).setValue(entry.getValue()).build());
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingService.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingService.java
index c14596f891c..e1fa94a4d57 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingService.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingService.java
@@ -223,7 +223,7 @@ private List<SnapshotMoveKeyInfos> 
createLargeDeletedKeys(int count) {
       SnapshotMoveKeyInfos moveKeyInfo = SnapshotMoveKeyInfos.newBuilder()
           .setKey(largeKeyName)
           .addAllKeyInfos(keyInfos.stream()
-              .map(k -> k.getProtobuf(ClientVersion.CURRENT_VERSION))
+              .map(k -> k.getProtobuf(ClientVersion.CURRENT.serialize()))
               .collect(Collectors.toList()))
           .build();
       deletedKeys.add(moveKeyInfo);
@@ -258,7 +258,7 @@ private List<SnapshotMoveKeyInfos> 
createLargeDeletedDirs(int count) {
       
       SnapshotMoveKeyInfos moveDirInfo = SnapshotMoveKeyInfos.newBuilder()
           .setKey(largeDirName)
-          .addKeyInfos(dirInfo.getProtobuf(ClientVersion.CURRENT_VERSION))
+          .addKeyInfos(dirInfo.getProtobuf(ClientVersion.CURRENT.serialize()))
           .build();
       deletedDirs.add(moveDirInfo);
     }
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
index 6aacc779bea..201e0541ad9 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
@@ -357,7 +357,7 @@ private Response getDecommissionStatusResponse(String uuid, 
String ipAddress) th
     Response.ResponseBuilder builder = Response.status(Response.Status.OK);
     Map<String, Object> responseMap = new HashMap<>();
     Stream<HddsProtos.Node> allNodes = scmClient.queryNode(DECOMMISSIONING,
-        null, HddsProtos.QueryScope.CLUSTER, "", 
ClientVersion.CURRENT_VERSION).stream();
+        null, HddsProtos.QueryScope.CLUSTER, "", 
ClientVersion.CURRENT.serialize()).stream();
     List<HddsProtos.Node> decommissioningNodes =
         DecommissionUtils.getDecommissioningNodesList(allNodes, uuid, 
ipAddress);
     String metricsJson = 
scmClient.getMetrics("Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics");
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineManager.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineManager.java
index 3f45d61a37c..58df5a67530 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineManager.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineManager.java
@@ -166,7 +166,7 @@ public boolean addPipeline(Pipeline pipeline) throws 
IOException {
       if (containsPipeline(pipeline.getId())) {
         return false;
       }
-      
getStateManager().addPipeline(pipeline.getProtobufMessage(ClientVersion.CURRENT_VERSION));
+      
getStateManager().addPipeline(pipeline.getProtobufMessage(ClientVersion.CURRENT.serialize()));
       return true;
     } finally {
       releaseWriteLock();
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/StorageContainerServiceProviderImpl.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/StorageContainerServiceProviderImpl.java
index edd1c1f702b..10fa08216a6 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/StorageContainerServiceProviderImpl.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/StorageContainerServiceProviderImpl.java
@@ -113,7 +113,7 @@ public List<ContainerWithPipeline> 
getExistContainerWithPipelinesInBatch(
   @Override
   public List<HddsProtos.Node> getNodes() throws IOException {
     return scmClient.queryNode(null, null, HddsProtos.QueryScope.CLUSTER,
-        "", ClientVersion.CURRENT_VERSION);
+        "", ClientVersion.CURRENT.serialize());
   }
 
   @Override


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to