pvary commented on code in PR #14435:
URL: https://github.com/apache/iceberg/pull/14435#discussion_r2665201801


##########
spark/v4.0/spark/src/test/java/org/apache/iceberg/spark/actions/TestRewriteDataFilesAction.java:
##########
@@ -2657,4 +2682,411 @@ public boolean matches(RewriteFileGroup argument) {
       return groupIDs.contains(argument.info().globalIndex());
     }
   }
+
+  @TestTemplate
+  public void testBinPackUsesCorrectRunnerBasedOnOption() {
+    Table table = createTable(4);
+    shouldHaveFiles(table, 4);
+
+    // Test that binPack() respects the configuration option
+    // When enabled, should use SparkParquetFileMergeRunner
+    RewriteDataFiles.Result resultWithMerger =
+        basicRewrite(table)
+            .option(RewriteDataFiles.USE_PARQUET_ROW_GROUP_MERGE, "true")
+            .binPack()
+            .execute();
+
+    assertThat(resultWithMerger.rewrittenDataFilesCount()).isEqualTo(4);
+    assertThat(resultWithMerger.addedDataFilesCount()).isGreaterThan(0);
+
+    // Write more data to the table so we can test again
+    writeRecords(100, SCALE);
+
+    // When disabled, should use SparkBinPackFileRewriteRunner
+    RewriteDataFiles.Result resultWithoutMerger =
+        basicRewrite(table)
+            .option(RewriteDataFiles.USE_PARQUET_ROW_GROUP_MERGE, "false")
+            .binPack()
+            .execute();
+
+    // Should rewrite the newly added files
+    assertThat(resultWithoutMerger.rewrittenDataFilesCount()).isGreaterThan(0);
+  }
+
+  @TestTemplate
+  public void testParquetFileMergerExplicitlyEnabledAndDisabled() {
+    Table table = createTable(4);
+    shouldHaveFiles(table, 4);
+
+    long countBefore = currentData().size();
+
+    // Test explicitly enabling ParquetFileMerger
+    RewriteDataFiles.Result resultEnabled =
+        basicRewrite(table)
+            .option(RewriteDataFiles.USE_PARQUET_ROW_GROUP_MERGE, "true")
+            .binPack()
+            .execute();
+
+    assertThat(resultEnabled.rewrittenDataFilesCount()).isEqualTo(4);
+    assertThat(resultEnabled.addedDataFilesCount()).isGreaterThan(0);
+    assertThat(currentData()).hasSize((int) countBefore);
+
+    // Write more data for second test
+    writeRecords(4, SCALE);
+
+    // Test explicitly disabling ParquetFileMerger
+    RewriteDataFiles.Result resultDisabled =
+        basicRewrite(table)
+            .option(RewriteDataFiles.USE_PARQUET_ROW_GROUP_MERGE, "false")
+            .binPack()
+            .execute();
+
+    assertThat(resultDisabled.rewrittenDataFilesCount()).isGreaterThan(0);
+    assertThat(resultDisabled.addedDataFilesCount()).isGreaterThan(0);
+  }
+
+  @TestTemplate
+  public void 
testParquetFileMergerProduceConsistentRowLineageWithBinPackMerger()
+      throws IOException {
+    // Test that both binpack and ParquetFileMerger convert virtual row IDs to 
physical
+    // and produce equivalent results for row lineage preservation
+    assumeThat(formatVersion).isGreaterThanOrEqualTo(3);
+
+    // Test binpack approach
+    Table binpackTable = createTable(4);
+    shouldHaveFiles(binpackTable, 4);
+    verifyInitialVirtualRowIds(binpackTable);
+    long binpackCountBefore = currentData().size();
+
+    RewriteDataFiles.Result binpackResult =

Review Comment:
   Shouldn't we split these tests based on `useParquetFileMerger`? Why do we 
cram `USE_PARQUET_ROW_GROUP_MERGE` `true` and `false` in the same test?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to