pvary commented on code in PR #14435: URL: https://github.com/apache/iceberg/pull/14435#discussion_r2675640982
########## spark/v4.0/spark/src/test/java/org/apache/iceberg/spark/actions/TestSparkParquetFileMergeRunner.java: ########## @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.spark.actions; + +import static org.apache.iceberg.types.Types.NestedField.optional; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.File; +import java.util.Collections; +import org.apache.hadoop.conf.Configuration; +import org.apache.iceberg.DataFile; +import org.apache.iceberg.DeleteFile; +import org.apache.iceberg.FileFormat; +import org.apache.iceberg.FileScanTask; +import org.apache.iceberg.Schema; +import org.apache.iceberg.Table; +import org.apache.iceberg.actions.RewriteFileGroup; +import org.apache.iceberg.hadoop.HadoopTables; +import org.apache.iceberg.relocated.com.google.common.collect.Lists; +import org.apache.iceberg.relocated.com.google.common.collect.Sets; +import org.apache.iceberg.spark.TestBase; +import org.apache.iceberg.types.Types; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +public class TestSparkParquetFileMergeRunner extends TestBase { + + private static final HadoopTables TABLES = new HadoopTables(new Configuration()); + private static final Schema SCHEMA = + new Schema( + optional(1, "c1", Types.IntegerType.get()), + optional(2, "c2", Types.StringType.get()), + optional(3, "c3", Types.StringType.get())); + + @TempDir private File tableDir; + private String tableLocation; + + @BeforeEach + public void setupTableLocation() { + this.tableLocation = tableDir.toURI().toString(); + } + + @Test + public void testCanMergeAndGetSchemaReturnsFalseForSortedTable() { + // Create a table with a sort order + Table table = TABLES.create(SCHEMA, tableLocation); + table.updateProperties().set("write.metadata.metrics.default", "full").commit(); + table + .replaceSortOrder() + .asc("c1") // Sort by column c1 in ascending order + .commit(); + + // Verify the table has a sort order + assertThat(table.sortOrder().isSorted()).isTrue(); + + // Create a mock RewriteFileGroup with Parquet files but no deletes + RewriteFileGroup group = mock(RewriteFileGroup.class); + DataFile parquetFile1 = mock(DataFile.class); + + when(parquetFile1.format()).thenReturn(FileFormat.PARQUET); + when(parquetFile1.specId()).thenReturn(0); + when(parquetFile1.fileSizeInBytes()).thenReturn(100L); + when(parquetFile1.path()).thenReturn(tableLocation + "/data/file1.parquet"); + when(group.rewrittenFiles()).thenReturn(Sets.newHashSet(parquetFile1)); + when(group.expectedOutputFiles()).thenReturn(1); + when(group.maxOutputFileSize()).thenReturn(Long.MAX_VALUE); + when(group.fileScanTasks()).thenReturn(Collections.emptyList()); + + // Create runner and test canMergeAndGetSchema + SparkParquetFileMergeRunner runner = new SparkParquetFileMergeRunner(spark, table); + + // Should return null because table has sort order + assertThat(runner.canMergeAndGetSchema(group)).isNull(); + } + + @Test + public void testCanMergeAndGetSchemaReturnsFalseForFilesWithDeleteFiles() { + // Create an unsorted table + Table table = TABLES.create(SCHEMA, tableLocation); + + // Verify the table has no sort order + assertThat(table.sortOrder().isUnsorted()).isTrue(); + + // Create a mock RewriteFileGroup with Parquet files that have delete files + RewriteFileGroup group = mock(RewriteFileGroup.class); + FileScanTask task1 = mock(FileScanTask.class); + DeleteFile deleteFile = mock(DeleteFile.class); + DataFile parquetFile1 = mock(DataFile.class); + + when(task1.deletes()).thenReturn(Lists.newArrayList(deleteFile)); // Has delete files + when(group.fileScanTasks()).thenReturn(Lists.newArrayList(task1)); + when(group.expectedOutputFiles()).thenReturn(1); + when(parquetFile1.format()).thenReturn(FileFormat.PARQUET); + when(group.rewrittenFiles()).thenReturn(Sets.newHashSet(parquetFile1)); + + // Create runner and test canMergeAndGetSchema + SparkParquetFileMergeRunner runner = new SparkParquetFileMergeRunner(spark, table); + + // Should return null because files have delete files + assertThat(runner.canMergeAndGetSchema(group)).isNull(); + } + + @Test + public void testCanMergeAndGetSchemaPassesInitialChecksForValidGroup() { + // Create an unsorted table + Table table = TABLES.create(SCHEMA, tableLocation); + + // Verify the table has no sort order + assertThat(table.sortOrder().isUnsorted()).isTrue(); + + // Create a mock RewriteFileGroup with Parquet files and no deletes + RewriteFileGroup group = mock(RewriteFileGroup.class); + DataFile parquetFile1 = mock(DataFile.class); + DataFile parquetFile2 = mock(DataFile.class); + FileScanTask task1 = mock(FileScanTask.class); + FileScanTask task2 = mock(FileScanTask.class); + + when(parquetFile1.format()).thenReturn(FileFormat.PARQUET); + when(parquetFile1.specId()).thenReturn(0); + when(parquetFile1.fileSizeInBytes()).thenReturn(100L); + when(parquetFile1.path()).thenReturn(tableLocation + "/data/file1.parquet"); + when(parquetFile2.format()).thenReturn(FileFormat.PARQUET); + when(parquetFile2.specId()).thenReturn(0); + when(parquetFile2.fileSizeInBytes()).thenReturn(200L); + when(parquetFile2.path()).thenReturn(tableLocation + "/data/file2.parquet"); + when(task1.deletes()).thenReturn(Collections.emptyList()); + when(task2.deletes()).thenReturn(Collections.emptyList()); + when(group.rewrittenFiles()).thenReturn(Sets.newHashSet(parquetFile1, parquetFile2)); + when(group.fileScanTasks()).thenReturn(Lists.newArrayList(task1, task2)); + when(group.expectedOutputFiles()).thenReturn(1); + when(group.maxOutputFileSize()).thenReturn(Long.MAX_VALUE); + + // Verify the initial checks pass (expectedOutputFiles, sortOrder, deletes) + assertThat(group.expectedOutputFiles()).isEqualTo(1); + assertThat(table.sortOrder().isUnsorted()).isTrue(); + boolean hasDeletes = group.fileScanTasks().stream().anyMatch(task -> !task.deletes().isEmpty()); + assertThat(hasDeletes).isFalse(); + + // Note: ParquetFileMerger.canMergeAndGetSchema would return null because + // mock files don't exist on disk, but the initial validation checks all pass Review Comment: Then we need another check, or we should create the files in the tests -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
