This is an automated email from the ASF dual-hosted git repository.

sodonnell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new a05543b1b0 HDDS-7122. Add validation for EC chunk size (#4463)
a05543b1b0 is described below

commit a05543b1b0776396844a82994dc4cbee4765f0ef
Author: Doroszlai, Attila <[email protected]>
AuthorDate: Fri Mar 24 16:54:52 2023 +0100

    HDDS-7122. Add validation for EC chunk size (#4463)
---
 .../hadoop/hdds/client/ECReplicationConfig.java    |  15 ++-
 .../hdds/client/ReplicationConfigValidator.java    |  52 +++++-----
 .../hadoop/hdds/client/TestReplicationConfig.java  |  10 +-
 .../client/TestReplicationConfigValidator.java     | 114 +++++++++++++++------
 .../hadoop/ozone/client/TestOzoneClient.java       |   5 +
 .../hadoop/ozone/client/TestOzoneECClient.java     |  41 +++++---
 .../hadoop/fs/ozone/TestRootedOzoneFileSystem.java |   6 +-
 .../hdds/scm/storage/TestContainerCommandsEC.java  |   5 +-
 .../ozone/client/rpc/TestECKeyOutputStream.java    |   2 +-
 .../ozone/container/TestECContainerRecovery.java   |   2 +-
 .../commandhandler/TestBlockDeletion.java          |   3 +-
 11 files changed, 166 insertions(+), 89 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java
index 5dcabdd84f..883daf5866 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java
@@ -85,7 +85,7 @@ public class ECReplicationConfig implements ReplicationConfig 
{
    * Create an ECReplicationConfig object from a string representing the
    * various parameters. Acceptable patterns are like:
    *     rs-3-2-1024k
-   *     RS-3-2-2048
+   *     RS-3-2-2048k
    *     XOR-10-4-4096K
    * IllegalArgumentException will be thrown if the passed string does not
    * match the defined pattern.
@@ -95,7 +95,7 @@ public class ECReplicationConfig implements ReplicationConfig 
{
     final Matcher matcher = STRING_FORMAT.matcher(string);
     if (!matcher.matches()) {
       throw new IllegalArgumentException("EC replication config should be " +
-          "defined in the form rs-3-2-1024k, rs-6-3-1024; or rs-10-4-1024k." +
+          "defined in the form rs-3-2-1024k, rs-6-3-1024k; or rs-10-4-1024k." +
           " Provided configuration was: " + string);
     }
 
@@ -150,7 +150,7 @@ public class ECReplicationConfig implements 
ReplicationConfig {
     return getCodec() + EC_REPLICATION_PARAMS_DELIMITER
         + getData() + EC_REPLICATION_PARAMS_DELIMITER
         + getParity() + EC_REPLICATION_PARAMS_DELIMITER
-        + getEcChunkSize();
+        + chunkKB();
   }
 
   public HddsProtos.ECReplicationConfig toProto() {
@@ -199,11 +199,16 @@ public class ECReplicationConfig implements 
ReplicationConfig {
   @Override
   public String toString() {
     return HddsProtos.ReplicationType.EC + "{"
-        + codec + "-" + data + "-" + parity + "-" + ecChunkSize + "}";
+        + codec + "-" + data + "-" + parity + "-" + chunkKB() + "}";
   }
 
   @Override
   public String configFormat() {
-    return HddsProtos.ReplicationType.EC.name() + "/" + data + "-" + parity;
+    return HddsProtos.ReplicationType.EC.name()
+        + "/" + data + "-" + parity + "-" + chunkKB();
+  }
+
+  private String chunkKB() {
+    return ecChunkSize / 1024 + "k";
   }
 }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfigValidator.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfigValidator.java
index e690663912..b5bdd5f0f1 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfigValidator.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfigValidator.java
@@ -22,8 +22,8 @@ import org.apache.hadoop.hdds.conf.ConfigGroup;
 import org.apache.hadoop.hdds.conf.ConfigTag;
 import org.apache.hadoop.hdds.conf.ConfigType;
 import org.apache.hadoop.hdds.conf.PostConstruct;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 
+import java.util.Objects;
 import java.util.regex.Pattern;
 
 /**
@@ -33,46 +33,50 @@ import java.util.regex.Pattern;
 public class ReplicationConfigValidator {
 
   @Config(key = "allowed-configs",
-      defaultValue = "^((STANDALONE|RATIS)/(ONE|THREE))|(EC/(3-2|6-3|10-4))$",
+      defaultValue = "^((STANDALONE|RATIS)/(ONE|THREE))|"
+          + "(EC/(3-2|6-3|10-4)-(512|1024|2048|4096)k)"
+          + "$",
       type = ConfigType.STRING,
       description = "Regular expression to restrict enabled " +
           "replication schemes",
       tags = ConfigTag.STORAGE)
   private String validationPattern;
 
-  private Pattern validationRegexp;
+  private Pattern compiledValidationPattern;
+
+  public void disableValidation() {
+    setValidationPattern("");
+  }
+
+  public void setValidationPattern(String pattern) {
+    if (!Objects.equals(pattern, validationPattern)) {
+      validationPattern = pattern;
+      compilePattern();
+    }
+  }
 
   @PostConstruct
   public void init() {
+    compilePattern();
+  }
+
+  private void compilePattern() {
     if (validationPattern != null && !validationPattern.equals("")) {
-      validationRegexp = Pattern.compile(validationPattern);
+      compiledValidationPattern = Pattern.compile(validationPattern);
+    } else {
+      compiledValidationPattern = null;
     }
   }
 
   public ReplicationConfig validate(ReplicationConfig replicationConfig) {
-    if (validationRegexp == null) {
+    if (compiledValidationPattern == null) {
       return replicationConfig;
     }
-    if (!validationRegexp.matcher(
-            replicationConfig.configFormat()).matches()) {
-      String replication = replicationConfig.getReplication();
-      if (HddsProtos.ReplicationType.EC ==
-                replicationConfig.getReplicationType()) {
-        ECReplicationConfig ecConfig =
-              (ECReplicationConfig) replicationConfig;
-        replication =  ecConfig.getCodec() + "-" + ecConfig.getData() +
-                "-" + ecConfig.getParity() + "-{CHUNK_SIZE}";
-        //EC type checks data-parity
-        throw new IllegalArgumentException(
-                "Invalid data-parity replication config " +
-                        "for type " + replicationConfig.getReplicationType() +
-                        " and replication " + replication + "." +
-                        " Supported data-parity are 3-2,6-3,10-4");
-      }
-      //Non-EC type
+    String input = replicationConfig.configFormat();
+    if (!compiledValidationPattern.matcher(input).matches()) {
       throw new IllegalArgumentException("Invalid replication config " +
-              "for type " + replicationConfig.getReplicationType() +
-              " and replication " + replication);
+          input + ". Config must match the pattern defined by " +
+          "ozone.replication.allowed-configs (" + validationPattern + ")");
     }
     return replicationConfig;
   }
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
index ac0c7ab002..fa5e9e60ab 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
@@ -59,11 +59,11 @@ public class TestReplicationConfig {
   public static Stream<Arguments> ecType() {
     return Stream.of(
         arguments("RS", 3, 2, MB),
-        arguments("RS", 3, 2, KB),
+        arguments("RS", 3, 2, 2 * MB),
         arguments("RS", 6, 3, MB),
-        arguments("RS", 6, 3, KB),
+        arguments("RS", 6, 3, 2 * MB),
         arguments("RS", 10, 4, MB),
-        arguments("RS", 10, 4, KB)
+        arguments("RS", 10, 4, 2 * MB)
     );
   }
 
@@ -155,7 +155,7 @@ public class TestReplicationConfig {
         codec) + ECReplicationConfig.EC_REPLICATION_PARAMS_DELIMITER
             + data + ECReplicationConfig.EC_REPLICATION_PARAMS_DELIMITER
             + parity + ECReplicationConfig.EC_REPLICATION_PARAMS_DELIMITER
-            + chunkSize, config.getReplication());
+            + chunkSize / 1024 + "k", config.getReplication());
   }
 
   @ParameterizedTest
@@ -391,7 +391,7 @@ public class TestReplicationConfig {
   private String ecDescriptor(String codec, int data, int parity,
       int chunkSize) {
     return codec.toUpperCase() + "-" + data + "-" + parity + "-" +
-        (chunkSize == MB ? "1024K" : "1024");
+        (chunkSize / 1024) + "k";
   }
 
 }
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfigValidator.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfigValidator.java
index 8fa355835e..5b1dcf6ab7 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfigValidator.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfigValidator.java
@@ -17,59 +17,113 @@
  */
 package org.apache.hadoop.hdds.client;
 
+import org.apache.hadoop.hdds.client.ECReplicationConfig.EcCodec;
 import org.apache.hadoop.hdds.conf.InMemoryConfiguration;
 import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
-import org.apache.ozone.test.GenericTestUtils;
+import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
+
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.List;
 
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ZERO;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 
 /**
  * Test ReplicationConfig validator.
  */
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
 class TestReplicationConfigValidator {
 
-  @Test
-  void testValidation() {
-    MutableConfigurationSource config = new InMemoryConfiguration();
+  private ReplicationConfigValidator defaultValidator;
 
-    final ReplicationConfigValidator validator =
-        config.getObject(ReplicationConfigValidator.class);
-    String ecConfig1 = "rs-3-2-1024k";
-    String ecConfig2 = "xor-6-3-2048k";
-    //Supported data-parity are 3-2,6-3,10-4
-    String invalidEcConfig1 = "xor-6-4-1024k";
+  private ReplicationConfigValidator disabledValidator;
 
-    validator.validate(RatisReplicationConfig.getInstance(THREE));
-    validator.validate(RatisReplicationConfig.getInstance(ONE));
-    validator.validate(StandaloneReplicationConfig.getInstance(THREE));
-    validator.validate(StandaloneReplicationConfig.getInstance(ONE));
-    validator.validate(new ECReplicationConfig(ecConfig1));
-    validator.validate(new ECReplicationConfig(ecConfig2));
-    try {
-      validator.validate(new ECReplicationConfig(invalidEcConfig1));
-    } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains(
-              "Invalid data-parity replication " +
-          "config for type EC and replication xor-6-4-{CHUNK_SIZE}. " +
-                      "Supported data-parity are 3-2,6-3,10-4", ex);
+  @BeforeAll
+  void setup() {
+    defaultValidator = new InMemoryConfiguration()
+        .getObject(ReplicationConfigValidator.class);
+
+    MutableConfigurationSource disabled = new InMemoryConfiguration();
+    disabled.set("ozone.replication.allowed-configs", "");
+    disabledValidator = disabled
+        .getObject(ReplicationConfigValidator.class);
+  }
+
+  static List<String> validConfigsForEC() {
+    List<String> configs = new LinkedList<>();
+    for (EcCodec codec : EcCodec.values()) {
+      for (String dataParity : Arrays.asList("3-2", "6-3", "10-4")) {
+        String[] parts = dataParity.split("-");
+        int data = Integer.parseInt(parts[0]);
+        int parity = Integer.parseInt(parts[1]);
+        for (int chunkSize : Arrays.asList(512, 1024, 2048, 4096)) {
+          ReplicationConfig config =
+              new ECReplicationConfig(data, parity, codec, chunkSize * 1024);
+          configs.add(config.getReplication());
+        }
+      }
     }
+    return configs;
+  }
 
+  static String[] invalidConfigsForEC() {
+    return new String[]{
+        "rs-6-4-1024k", // invalid data-parity
+        "xor-3-2-1024", // invalid chunk size
+        "rs-6-3-1234k", // invalid chunk size
+        // invalid codec is always rejected by ECReplicationConfig
+    };
   }
 
   @Test
-  void testWithoutValidation() {
-    MutableConfigurationSource config = new InMemoryConfiguration();
-    config.set("ozone.replication.allowed-configs", "");
+  void acceptsRatis() {
+    defaultValidator.validate(RatisReplicationConfig.getInstance(THREE));
+    defaultValidator.validate(RatisReplicationConfig.getInstance(ONE));
+  }
 
-    final ReplicationConfigValidator validator =
-        config.getObject(ReplicationConfigValidator.class);
+  @Test
+  void acceptsStandalone() {
+    defaultValidator.validate(StandaloneReplicationConfig.getInstance(THREE));
+    defaultValidator.validate(StandaloneReplicationConfig.getInstance(ONE));
+  }
 
-    validator.validate(RatisReplicationConfig.getInstance(THREE));
-    validator.validate(StandaloneReplicationConfig.getInstance(ONE));
+  @ParameterizedTest
+  @MethodSource("validConfigsForEC")
+  void acceptsValidEC(String config) {
+    defaultValidator.validate(new ECReplicationConfig(config));
+  }
+
+  @ParameterizedTest
+  @MethodSource("invalidConfigsForEC")
+  void rejectsInvalidEC(String config) {
+    assertThrows(IllegalArgumentException.class,
+        () -> defaultValidator.validate(new ECReplicationConfig(config)));
+  }
 
+  @ParameterizedTest
+  @MethodSource("invalidConfigsForEC")
+  void disabledAcceptsInvalidEC(String config) {
+    disabledValidator.validate(new ECReplicationConfig(config));
+  }
+
+  @Test
+  void disabledAcceptsRatis() {
+    disabledValidator.validate(RatisReplicationConfig.getInstance(ONE));
+    disabledValidator.validate(RatisReplicationConfig.getInstance(THREE));
+  }
+
+  @Test
+  void disabledAcceptsStandalone() {
+    disabledValidator.validate(StandaloneReplicationConfig.getInstance(ONE));
+    disabledValidator.validate(StandaloneReplicationConfig.getInstance(THREE));
+    disabledValidator.validate(StandaloneReplicationConfig.getInstance(ZERO));
   }
 
   @Test
diff --git 
a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java
 
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java
index 68ff3534f9..c0804ffeac 100644
--- 
a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java
+++ 
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.client;
 
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationConfigValidator;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -193,6 +194,10 @@ public class TestOzoneClient {
   public void testPutKeyWithECReplicationConfig() throws IOException {
     close();
     OzoneConfiguration config = new OzoneConfiguration();
+    ReplicationConfigValidator validator =
+        config.getObject(ReplicationConfigValidator.class);
+    validator.disableValidation();
+    config.setFromObject(validator);
     config.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 2,
         StorageUnit.KB);
     int data = 3;
diff --git 
a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java
 
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java
index e86e70d70e..98e7b059db 100644
--- 
a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java
+++ 
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
 import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationConfigValidator;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -84,7 +85,7 @@ public class TestOzoneECClient {
   private byte[][] inputChunks = new byte[dataBlocks][chunkSize];
   private final XceiverClientFactory factoryStub =
       new MockXceiverClientFactory();
-  private OzoneConfiguration conf = new OzoneConfiguration();
+  private OzoneConfiguration conf = createConfiguration();
   private MultiNodePipelineBlockAllocator allocator =
       new MultiNodePipelineBlockAllocator(conf, dataBlocks + parityBlocks, 15);
   private final MockOmTransport transportStub = new MockOmTransport(allocator);
@@ -94,8 +95,6 @@ public class TestOzoneECClient {
 
   @Before
   public void init() throws IOException {
-    conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 2,
-        StorageUnit.KB);
     createNewClient(conf, transportStub);
   }
 
@@ -633,8 +632,7 @@ public class TestOzoneECClient {
 
   @Test
   public void testStripeWriteRetriesOn2Failures() throws Exception {
-    OzoneConfiguration con = new OzoneConfiguration();
-    con.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 2, 
StorageUnit.KB);
+    OzoneConfiguration con = createConfiguration();
     // Cluster has 15 nodes. So, first we will create 3 block groups with
     // distinct nodes in each. Block Group 1:  0-4, Block Group 2: 5-9, Block
     // Group 3: 10-14
@@ -656,8 +654,7 @@ public class TestOzoneECClient {
 
   @Test
   public void testStripeWriteRetriesOn3Failures() throws Exception {
-    OzoneConfiguration con = new OzoneConfiguration();
-    con.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 2, 
StorageUnit.KB);
+    OzoneConfiguration con = createConfiguration();
 
     int[] nodesIndexesToMarkFailure = new int[3];
     nodesIndexesToMarkFailure[0] = 0;
@@ -680,8 +677,7 @@ public class TestOzoneECClient {
   // nodes in allocateBlock request.
   @Test(expected = IllegalStateException.class)
   public void testStripeWriteRetriesOnAllNodeFailures() throws Exception {
-    OzoneConfiguration con = new OzoneConfiguration();
-    con.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 2, 
StorageUnit.KB);
+    OzoneConfiguration con = createConfiguration();
 
     // After writing first stripe, we will mark all nodes as bad in the 
cluster.
     int clusterSize = 5;
@@ -698,8 +694,7 @@ public class TestOzoneECClient {
   @Test
   public void testStripeWriteRetriesOn4FailuresWith3RetriesAllowed()
       throws Exception {
-    OzoneConfiguration con = new OzoneConfiguration();
-    con.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 2, 
StorageUnit.KB);
+    OzoneConfiguration con = createConfiguration();
     con.setInt(OzoneConfigKeys.OZONE_CLIENT_MAX_EC_STRIPE_WRITE_RETRIES, 3);
 
     int[] nodesIndexesToMarkFailure = new int[4];
@@ -860,7 +855,7 @@ public class TestOzoneECClient {
   private void testExcludeFailedDN(IntStream failedDNIndex,
       IntStream closedDNIndex) throws Exception {
     close();
-    OzoneConfiguration con = new OzoneConfiguration();
+    OzoneConfiguration con = createConfiguration();
     MultiNodePipelineBlockAllocator blkAllocator =
         new MultiNodePipelineBlockAllocator(con, dataBlocks + parityBlocks, 
10);
     createNewClient(con, blkAllocator);
@@ -915,7 +910,7 @@ public class TestOzoneECClient {
   public void testLargeWriteOfMultipleStripesWithStripeFailure()
       throws Exception {
     close();
-    OzoneConfiguration con = new OzoneConfiguration();
+    OzoneConfiguration con = createConfiguration();
     // block size of 3KB could hold 3 full stripes
     con.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 3, 
StorageUnit.KB);
     con.setInt(OzoneConfigKeys.OZONE_CLIENT_MAX_EC_STRIPE_WRITE_RETRIES, 3);
@@ -996,7 +991,7 @@ public class TestOzoneECClient {
   public void testPartialStripeWithPartialChunkRetry()
       throws IOException {
     close();
-    OzoneConfiguration con = new OzoneConfiguration();
+    OzoneConfiguration con = createConfiguration();
     // block size of 3KB could hold 3 full stripes
     con.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 3, 
StorageUnit.KB);
     con.setInt(OzoneConfigKeys.OZONE_CLIENT_MAX_EC_STRIPE_WRITE_RETRIES, 3);
@@ -1062,10 +1057,8 @@ public class TestOzoneECClient {
   public void testDiscardPreAllocatedBlocksPreventRetryExceeds()
       throws Exception {
     close();
-    OzoneConfiguration con = new OzoneConfiguration();
+    OzoneConfiguration con = createConfiguration();
     int maxRetries = 3;
-    con.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE,
-        2, StorageUnit.KB);
     con.setInt(OzoneConfigKeys.OZONE_CLIENT_MAX_EC_STRIPE_WRITE_RETRIES,
         maxRetries);
     MultiNodePipelineBlockAllocator blkAllocator =
@@ -1235,4 +1228,18 @@ public class TestOzoneECClient {
     GenericTestUtils.waitFor(() -> ecOut.getFlushCheckpoint() == checkpoint,
         100, 10000);
   }
+
+  private static OzoneConfiguration createConfiguration() {
+    OzoneConfiguration conf = new OzoneConfiguration();
+
+    ReplicationConfigValidator validator =
+        conf.getObject(ReplicationConfigValidator.class);
+    validator.disableValidation();
+    conf.setFromObject(validator);
+
+    conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 2,
+        StorageUnit.KB);
+
+    return conf;
+  }
 }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
index 1051ac87cb..d6cd274a07 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
@@ -337,7 +337,7 @@ public class TestRootedOzoneFileSystem {
     // write some test data into bucket
     try (OzoneOutputStream outputStream = objectStore.getVolume(volumeName).
             getBucket(bucketName).createKey(key, 1,
-                    new ECReplicationConfig("RS-3-2-1024"),
+                    new ECReplicationConfig("RS-3-2-1024k"),
                     new HashMap<>())) {
       outputStream.write(RandomUtils.nextBytes(1));
     }
@@ -2058,7 +2058,7 @@ public class TestRootedOzoneFileSystem {
     builder.setBucketLayout(BucketLayout.LEGACY);
     builder.setDefaultReplicationConfig(
         new DefaultReplicationConfig(
-            new ECReplicationConfig("RS-3-2-1024")));
+            new ECReplicationConfig("RS-3-2-1024k")));
     BucketArgs omBucketArgs = builder.build();
     String vol = UUID.randomUUID().toString();
     String buck = UUID.randomUUID().toString();
@@ -2117,7 +2117,7 @@ public class TestRootedOzoneFileSystem {
     // write some test data into bucket
     try (OzoneOutputStream outputStream = objectStore.getVolume(volumeName).
             getBucket(bucketName).createKey(key, 1,
-                    new ECReplicationConfig("RS-3-2-1024"),
+                    new ECReplicationConfig("RS-3-2-1024k"),
                     new HashMap<>())) {
       outputStream.write(RandomUtils.nextBytes(1));
     }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
index e89f10396f..9f96448f3b 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
@@ -125,7 +125,7 @@ public class TestContainerCommandsEC {
   private static final int EC_DATA = 3;
   private static final int EC_PARITY = 2;
   private static final EcCodec EC_CODEC = EcCodec.RS;
-  private static final int EC_CHUNK_SIZE = 1024;
+  private static final int EC_CHUNK_SIZE = 1024 * 1024;
   private static final int STRIPE_DATA_SIZE = EC_DATA * EC_CHUNK_SIZE;
   private static final int NUM_DN = EC_DATA + EC_PARITY + 3;
   private static byte[][] inputChunks = new byte[EC_DATA][EC_CHUNK_SIZE];
@@ -533,7 +533,8 @@ public class TestContainerCommandsEC {
       inputChunks[i] = getBytesWith(i + 1, EC_CHUNK_SIZE);
     }
     try (OzoneOutputStream out = bucket.createKey(keyString, 4096,
-        new ECReplicationConfig(3, 2, EcCodec.RS, 1024), new HashMap<>())) {
+        new ECReplicationConfig(3, 2, EcCodec.RS, EC_CHUNK_SIZE),
+        new HashMap<>())) {
       Assert.assertTrue(out.getOutputStream() instanceof KeyOutputStream);
       for (int i = 0; i < numChunks; i++) {
         out.write(inputChunks[i]);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java
index 504e53b3ad..a429b94d5a 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java
@@ -87,7 +87,7 @@ public class TestECKeyOutputStream {
    */
   @BeforeClass
   public static void init() throws Exception {
-    chunkSize = 1024;
+    chunkSize = 1024 * 1024;
     flushSize = 2 * chunkSize;
     maxFlushSize = 2 * flushSize;
     blockSize = 2 * maxFlushSize;
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
index 73d9252e57..b50f2ac8d6 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
@@ -97,7 +97,7 @@ public class TestECContainerRecovery {
    */
   @BeforeAll
   public static void init() throws Exception {
-    chunkSize = 1024;
+    chunkSize = 1024 * 1024;
     flushSize = 2 * chunkSize;
     maxFlushSize = 2 * flushSize;
     blockSize = 2 * maxFlushSize;
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index c761e4a8bf..39775fb617 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -120,7 +120,8 @@ public class TestBlockDeletion {
     GenericTestUtils.setLogLevel(SCMBlockDeletingService.LOG, Level.DEBUG);
     GenericTestUtils.setLogLevel(LegacyReplicationManager.LOG, Level.DEBUG);
 
-    conf.set("ozone.replication.allowed-configs", "^(RATIS/THREE)|(EC/2-1)$");
+    conf.set("ozone.replication.allowed-configs",
+        "^(RATIS/THREE)|(EC/2-1-256k)$");
     conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
         TimeUnit.MILLISECONDS);
     DatanodeConfiguration datanodeConfiguration = conf.getObject(


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to