This is an automated email from the ASF dual-hosted git repository.

yihua pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git


The following commit(s) were added to refs/heads/master by this push:
     new 56aded81287 [HUDI-7599] add bootstrap mor legacy reader back to 
default source (#10990)
56aded81287 is described below

commit 56aded81287f295cfee692f16be0adc6f175902e
Author: Jon Vexler <jbvex...@gmail.com>
AuthorDate: Fri Apr 12 14:31:10 2024 -0400

    [HUDI-7599] add bootstrap mor legacy reader back to default source (#10990)
    
    Co-authored-by: Jonathan Vexler <=>
---
 .../src/main/scala/org/apache/hudi/DefaultSource.scala       | 12 ++++++++++--
 .../hudi/functional/TestNewHoodieParquetFileFormat.java      | 12 +++---------
 2 files changed, 13 insertions(+), 11 deletions(-)

diff --git 
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/DefaultSource.scala
 
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/DefaultSource.scala
index be3d2f4ed4b..8efa8e28867 100644
--- 
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/DefaultSource.scala
+++ 
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/DefaultSource.scala
@@ -299,14 +299,22 @@ object DefaultSource {
               new IncrementalRelation(sqlContext, parameters, userSchema, 
metaClient)
             }
 
-          case (MERGE_ON_READ, QUERY_TYPE_SNAPSHOT_OPT_VAL, _) =>
+          case (MERGE_ON_READ, QUERY_TYPE_SNAPSHOT_OPT_VAL, false) =>
             if (useNewParquetFileFormat) {
               new HoodieMergeOnReadSnapshotHadoopFsRelationFactory(
-                sqlContext, metaClient, parameters, userSchema, 
isBootstrappedTable).build()
+                sqlContext, metaClient, parameters, userSchema, isBootstrap = 
false).build()
             } else {
               new MergeOnReadSnapshotRelation(sqlContext, parameters, 
metaClient, globPaths, userSchema)
             }
 
+          case (MERGE_ON_READ, QUERY_TYPE_SNAPSHOT_OPT_VAL, true) =>
+            if (useNewParquetFileFormat) {
+              new HoodieMergeOnReadSnapshotHadoopFsRelationFactory(
+                sqlContext, metaClient, parameters, userSchema, isBootstrap = 
true).build()
+            } else {
+              HoodieBootstrapMORRelation(sqlContext, userSchema, globPaths, 
metaClient, parameters)
+            }
+
           case (MERGE_ON_READ, QUERY_TYPE_INCREMENTAL_OPT_VAL, _) =>
             if (useNewParquetFileFormat) {
               new HoodieMergeOnReadIncrementalHadoopFsRelationFactory(
diff --git 
a/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/functional/TestNewHoodieParquetFileFormat.java
 
b/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/functional/TestNewHoodieParquetFileFormat.java
index ce462c93d1b..be2b6ff949e 100644
--- 
a/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/functional/TestNewHoodieParquetFileFormat.java
+++ 
b/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/functional/TestNewHoodieParquetFileFormat.java
@@ -24,7 +24,6 @@ import org.apache.hudi.common.model.HoodieTableType;
 import org.apache.spark.sql.Dataset;
 import org.apache.spark.sql.Row;
 import org.apache.spark.sql.SaveMode;
-import org.junit.jupiter.api.Disabled;
 import org.junit.jupiter.api.Tag;
 import org.junit.jupiter.params.ParameterizedTest;
 import org.junit.jupiter.params.provider.Arguments;
@@ -39,18 +38,13 @@ import static 
org.apache.hudi.common.model.HoodieTableType.MERGE_ON_READ;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
 @Tag("functional")
-@Disabled("HUDI-6756")
 public class TestNewHoodieParquetFileFormat extends TestBootstrapReadBase {
 
   private static Stream<Arguments> testArgs() {
     Stream.Builder<Arguments> b = Stream.builder();
-    HoodieTableType[] tableType = {COPY_ON_WRITE, MERGE_ON_READ};
-    Integer[] nPartitions = {0, 1, 2};
-    for (HoodieTableType tt : tableType) {
-      for (Integer n : nPartitions) {
-        b.add(Arguments.of(tt, n));
-      }
-    }
+    b.add(Arguments.of(MERGE_ON_READ, 0));
+    b.add(Arguments.of(COPY_ON_WRITE, 1));
+    b.add(Arguments.of(MERGE_ON_READ, 2));
     return b.build();
   }
 

Reply via email to