This is an automated email from the ASF dual-hosted git repository.

umamahesh pushed a commit to branch HDDS-3816-ec
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/HDDS-3816-ec by this push:
     new c9fa9bd  HDDS-5874 EC: Integrate the Codec changes into EC Streams. 
(#2777)
c9fa9bd is described below

commit c9fa9bdf0f941c5ae7c2272e6df6b13a05ee15cf
Author: Uma Maheswara Rao G <[email protected]>
AuthorDate: Sat Nov 6 15:01:38 2021 -0700

    HDDS-5874 EC: Integrate the Codec changes into EC Streams. (#2777)
---
 .../hadoop/ozone/client/io/ECKeyOutputStream.java  | 28 +++------
 .../hadoop/ozone/client/TestOzoneECClient.java     | 73 +++-------------------
 .../ozone/client/rpc/TestECKeyOutputStream.java    |  1 -
 3 files changed, 14 insertions(+), 88 deletions(-)

diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java
index 8fedde1..ed4b96e 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java
@@ -25,20 +25,13 @@ import java.util.List;
 
 import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.hdds.client.ECReplicationConfig;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.OzoneClientConfig;
 import org.apache.hadoop.hdds.scm.XceiverClientFactory;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.io.ByteBufferPool;
 import org.apache.hadoop.io.ElasticByteBufferPool;
-import org.apache.hadoop.io.erasurecode.CodecUtil;
-import org.apache.hadoop.io.erasurecode.ECSchema;
-import org.apache.hadoop.io.erasurecode.ErasureCodecOptions;
-import org.apache.hadoop.io.erasurecode.codec.RSErasureCodec;
-import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
@@ -48,6 +41,8 @@ import 
org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.apache.ozone.erasurecode.CodecRegistry;
+import org.apache.ozone.erasurecode.rawcoder.RawErasureEncoder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -127,14 +122,9 @@ public class ECKeyOutputStream extends KeyOutputStream {
             unsafeByteBufferConversion, xceiverClientManager, handler.getId());
 
     this.writeOffset = 0;
-    OzoneConfiguration conf = new OzoneConfiguration();
-    ECSchema schema =
-        new ECSchema(ecCodec.toString(), numDataBlks, numParityBlks);
-    ErasureCodecOptions options = new ErasureCodecOptions(schema);
-    RSErasureCodec codec = new RSErasureCodec(conf, options);
-    this.encoder = CodecUtil.createRawEncoder(conf,
-        SystemErasureCodingPolicies.getPolicies().get(1).getCodecName(),
-        codec.getCoderOptions());
+    this.encoder = CodecRegistry.getInstance()
+        .getCodecFactory(replicationConfig.getCodec().name().toLowerCase())
+        .createEncoder(replicationConfig);
   }
 
   /**
@@ -696,16 +686,12 @@ public class ECKeyOutputStream extends KeyOutputStream {
   private static class ECChunkBuffers {
     private final ByteBuffer[] dataBuffers;
     private final ByteBuffer[] parityBuffers;
-    private final int dataBlks;
-    private final int parityBlks;
     private int cellSize;
 
     ECChunkBuffers(int cellSize, int numData, int numParity) {
       this.cellSize = cellSize;
-      this.parityBlks = numParity;
-      this.dataBlks = numData;
-      dataBuffers = new ByteBuffer[this.dataBlks];
-      parityBuffers = new ByteBuffer[this.parityBlks];
+      dataBuffers = new ByteBuffer[numData];
+      parityBuffers = new ByteBuffer[numParity];
       allocateBuffers(cellSize, dataBuffers);
     }
 
diff --git 
a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java
 
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java
index 0b2a8d8..94e514d 100644
--- 
a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java
+++ 
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java
@@ -24,14 +24,7 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.XceiverClientFactory;
-import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
-import org.apache.hadoop.io.erasurecode.CodecUtil;
-import org.apache.hadoop.io.erasurecode.ECSchema;
-import org.apache.hadoop.io.erasurecode.ErasureCodecOptions;
-import org.apache.hadoop.io.erasurecode.codec.RSErasureCodec;
-import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.client.io.ECKeyOutputStream;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
@@ -39,6 +32,8 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.rpc.RpcClient;
 import org.apache.hadoop.ozone.om.protocolPB.OmTransport;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.ozone.erasurecode.rawcoder.RSRawErasureCoderFactory;
+import org.apache.ozone.erasurecode.rawcoder.RawErasureEncoder;
 import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import org.junit.After;
 import org.junit.Assert;
@@ -48,7 +43,6 @@ import org.junit.Test;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.security.cert.X509Certificate;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
@@ -75,21 +69,17 @@ public class TestOzoneECClient {
   private byte[][] inputChunks = new byte[dataBlocks][chunkSize];
   private final XceiverClientFactory factoryStub =
       new MockXceiverClientFactory();
-  private MockOmTransport transportStub = null;
-  private ECSchema schema = new ECSchema("rs", dataBlocks, parityBlocks);
-  private ErasureCodecOptions options = new ErasureCodecOptions(schema);
   private OzoneConfiguration conf = new OzoneConfiguration();
-  private RSErasureCodec codec = new RSErasureCodec(conf, options);
-  private final RawErasureEncoder encoder = CodecUtil.createRawEncoder(conf,
-      SystemErasureCodingPolicies.getPolicies().get(1).getCodecName(),
-      codec.getCoderOptions());
+  private MockOmTransport transportStub = new MockOmTransport(
+      new MultiNodePipelineBlockAllocator(conf, dataBlocks + parityBlocks));
+  private final RawErasureEncoder encoder =
+      new RSRawErasureCoderFactory().createEncoder(
+          new ECReplicationConfig(dataBlocks, parityBlocks));
 
   @Before
   public void init() throws IOException {
     conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 2,
         StorageUnit.KB);
-    transportStub = new MockOmTransport(
-        new MultiNodePipelineBlockAllocator(conf, dataBlocks + parityBlocks));
 
     client = new OzoneClient(conf, new RpcClient(conf, null) {
 
@@ -526,53 +516,4 @@ public class TestOzoneECClient {
     }
     return bucket;
   }
-
-  private void updatePipelineToKeepSingleNode(int keepingNodeIndex) {
-    Map<String, Map<String, Map<String, OzoneManagerProtocolProtos.KeyInfo>>>
-        keys = ((MockOmTransport) transportStub).getKeys();
-    Map<String, Map<String, OzoneManagerProtocolProtos.KeyInfo>> vol =
-        keys.get(keys.keySet().iterator().next());
-
-    Map<String, OzoneManagerProtocolProtos.KeyInfo> buck =
-        vol.get(vol.keySet().iterator().next());
-    OzoneManagerProtocolProtos.KeyInfo keyInfo =
-        buck.get(buck.keySet().iterator().next());
-    HddsProtos.Pipeline.Builder builder =
-        HddsProtos.Pipeline.newBuilder().setFactor(keyInfo.getFactor())
-            
.setType(keyInfo.getType()).setId(HddsProtos.PipelineID.newBuilder()
-            .setUuid128(HddsProtos.UUID.newBuilder().setLeastSigBits(1L)
-                .setMostSigBits(1L).build()).build());
-
-    // Keeping only the given position node in pipeline.
-    builder.addMembers(HddsProtos.DatanodeDetailsProto.newBuilder().setUuid128(
-        HddsProtos.UUID.newBuilder().setLeastSigBits(keepingNodeIndex)
-            .setMostSigBits(keepingNodeIndex).build()).setHostName("localhost")
-        .setIpAddress("1.2.3.4").addPorts(
-            HddsProtos.Port.newBuilder().setName("EC")
-                .setValue(1234 + keepingNodeIndex).build()).build());
-
-    HddsProtos.Pipeline pipeline = builder.build();
-    List<OzoneManagerProtocolProtos.KeyLocation> results = new ArrayList<>();
-    results.add(OzoneManagerProtocolProtos.KeyLocation.newBuilder()
-        .setPipeline(pipeline).setBlockID(
-            HddsProtos.BlockID.newBuilder().setBlockCommitSequenceId(1L)
-                .setContainerBlockID(
-                    HddsProtos.ContainerBlockID.newBuilder().setContainerID(1L)
-                        .setLocalID(0L).build()).build()).setOffset(0L)
-        .setLength(keyInfo.getDataSize()).build());
-
-    final OzoneManagerProtocolProtos.KeyInfo keyInfo1 =
-        OzoneManagerProtocolProtos.KeyInfo.newBuilder()
-            .setVolumeName(keyInfo.getVolumeName())
-            .setBucketName(keyInfo.getBucketName())
-            .setKeyName(keyInfo.getKeyName())
-            .setCreationTime(keyInfo.getCreationTime())
-            .setModificationTime(keyInfo.getModificationTime())
-            .setType(keyInfo.getType()).setFactor(keyInfo.getFactor())
-            .setDataSize(keyInfo.getDataSize()).setLatestVersion(0L)
-            .addKeyLocationList(
-                OzoneManagerProtocolProtos.KeyLocationList.newBuilder()
-                    .addAllKeyLocations(results)).build();
-    buck.put(keyInfo.getKeyName(), keyInfo1);
-  }
 }
\ No newline at end of file
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java
index 6f12b62..e8b3d11 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java
@@ -69,7 +69,6 @@ public class TestECKeyOutputStream {
   private static String bucketName;
   private static String keyString;
   private static int dataBlocks = 3;
-  private static int parityBlocks = 2;
   private static int inputSize = dataBlocks * chunkSize;
   private static byte[][] inputChunks = new byte[dataBlocks][chunkSize];
 

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to