vinothchandar commented on code in PR #10492:
URL: https://github.com/apache/hudi/pull/10492#discussion_r1454069459


##########
hudi-utilities/src/test/java/org/apache/hudi/utilities/deltastreamer/TestHoodieDeltaStreamerSchemaEvolutionQuick.java:
##########
@@ -59,25 +59,34 @@ public void teardown() throws Exception {
   }
 
   protected static Stream<Arguments> testArgs() {
+    boolean fullTest = false;
     Stream.Builder<Arguments> b = Stream.builder();
-    //only testing row-writer enabled for now
-    for (Boolean rowWriterEnable : new Boolean[] {true}) {
-      for (Boolean nullForDeletedCols : new Boolean[] {false, true}) {
-        for (Boolean useKafkaSource : new Boolean[] {false, true}) {
-          for (Boolean addFilegroups : new Boolean[] {false, true}) {
-            for (Boolean multiLogFiles : new Boolean[] {false, true}) {
-              for (Boolean shouldCluster : new Boolean[] {false, true}) {
-                for (String tableType : new String[] {"COPY_ON_WRITE", 
"MERGE_ON_READ"}) {
-                  if (!multiLogFiles || tableType.equals("MERGE_ON_READ")) {
-                    b.add(Arguments.of(tableType, shouldCluster, false, 
rowWriterEnable, addFilegroups, multiLogFiles, useKafkaSource, 
nullForDeletedCols));
+    if (fullTest) {
+      //only testing row-writer enabled for now
+      for (Boolean rowWriterEnable : new Boolean[] {true}) {
+        for (Boolean nullForDeletedCols : new Boolean[] {false, true}) {
+          for (Boolean useKafkaSource : new Boolean[] {false, true}) {
+            for (Boolean addFilegroups : new Boolean[] {false, true}) {
+              for (Boolean multiLogFiles : new Boolean[] {false, true}) {
+                for (Boolean shouldCluster : new Boolean[] {false, true}) {
+                  for (String tableType : new String[] {"COPY_ON_WRITE", 
"MERGE_ON_READ"}) {
+                    if (!multiLogFiles || tableType.equals("MERGE_ON_READ")) {
+                      b.add(Arguments.of(tableType, shouldCluster, false, 
rowWriterEnable, addFilegroups, multiLogFiles, useKafkaSource, 
nullForDeletedCols));
+                    }
                   }
                 }
+                b.add(Arguments.of("MERGE_ON_READ", false, true, 
rowWriterEnable, addFilegroups, multiLogFiles, useKafkaSource, 
nullForDeletedCols));
               }
-              b.add(Arguments.of("MERGE_ON_READ", false, true, 
rowWriterEnable, addFilegroups, multiLogFiles, useKafkaSource, 
nullForDeletedCols));
             }
           }
         }
       }
+    } else {

Review Comment:
                             ```
                             String tableType = COW, MOR
                             Boolean shouldCluster = true
                             Boolean shouldCompact = true
                             Boolean rowWriterEnable = true
                             Boolean addFilegroups = true
                             Boolean multiLogFiles = true
                             Boolean useKafkaSource= false, true
                             Boolean allowNullForDeletedCols=false,true
                             ```
                             
                             I wonder if we just do sth like this. with new 
file groups, multiple log files, alongside cluster and compaction, should be 
the more complex (superset) scenario. no?
                             
                             



##########
hudi-utilities/src/test/java/org/apache/hudi/utilities/deltastreamer/TestHoodieDeltaStreamerSchemaEvolutionQuick.java:
##########
@@ -97,19 +106,27 @@ protected static Stream<Arguments> testReorderedColumn() {
   }
 
   protected static Stream<Arguments> testParamsWithSchemaTransformer() {
+    boolean fullTest = false;
     Stream.Builder<Arguments> b = Stream.builder();
-    for (Boolean useTransformer : new Boolean[] {false, true}) {
-      for (Boolean setSchema : new Boolean[] {false, true}) {
-        for (Boolean rowWriterEnable : new Boolean[] {true}) {
-          for (Boolean nullForDeletedCols : new Boolean[] {false, true}) {
-            for (Boolean useKafkaSource : new Boolean[] {false, true}) {
-              for (String tableType : new String[] {"COPY_ON_WRITE", 
"MERGE_ON_READ"}) {
-                b.add(Arguments.of(tableType, rowWriterEnable, useKafkaSource, 
nullForDeletedCols, useTransformer, setSchema));
+    if (fullTest) {
+      for (Boolean useTransformer : new Boolean[] {false, true}) {
+        for (Boolean setSchema : new Boolean[] {false, true}) {
+          for (Boolean rowWriterEnable : new Boolean[] {true}) {
+            for (Boolean nullForDeletedCols : new Boolean[] {false, true}) {
+              for (Boolean useKafkaSource : new Boolean[] {false, true}) {
+                for (String tableType : new String[] {"COPY_ON_WRITE", 
"MERGE_ON_READ"}) {
+                  b.add(Arguments.of(tableType, rowWriterEnable, 
useKafkaSource, nullForDeletedCols, useTransformer, setSchema));
+                }
               }
             }
           }
         }
       }
+    } else {
+      b.add(Arguments.of("COPY_ON_WRITE", true, true, true, true, true));

Review Comment:
   similar. could we just come up with the "superset"/higher complexity 
combination here. or is that it?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@hudi.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to