RussellSpitzer commented on code in PR #9731:
URL: https://github.com/apache/iceberg/pull/9731#discussion_r1494800722
##########
spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/actions/TestRewriteManifestsAction.java:
##########
@@ -466,6 +472,164 @@ public void testRewriteLargeManifestsPartitionedTable()
throws IOException {
assertThat(newManifests).hasSizeGreaterThanOrEqualTo(2);
}
+ @TestTemplate
+ public void
testRewriteManifestsPartitionedTableWithInvalidSortColumnsThowsException()
+ throws IOException {
+ PartitionSpec spec =
PartitionSpec.builderFor(SCHEMA).identity("c1").bucket("c3", 10).build();
+ Map<String, String> options = Maps.newHashMap();
+ options.put(TableProperties.FORMAT_VERSION, String.valueOf(formatVersion));
+ options.put(TableProperties.SNAPSHOT_ID_INHERITANCE_ENABLED,
snapshotIdInheritanceEnabled);
+ Table table = TABLES.create(SCHEMA, spec, options, tableLocation);
+
+ SparkActions actions = SparkActions.get();
+
+ // c2 is not a partition column, cannot use for sorting
+ List<String> badSortKeys1 = ImmutableList.of("c1", "c2");
+ assertThatThrownBy(
+ () ->
+ actions
+ .rewriteManifests(table)
+ .rewriteIf(manifest -> true)
+ .sort(badSortKeys1)
+ .execute())
+ .isInstanceOf(IllegalArgumentException.class)
+ .message()
+ .contains("Invalid sorting columns");
+
+ // c3_bucket is the hidden internal partition name, sort() expects the
un-hidden partition
+ // column names
+ List<String> badSortKeys2 = ImmutableList.of("c1", "c3_bucket");
+ assertThatThrownBy(
+ () ->
+ actions
+ .rewriteManifests(table)
+ .rewriteIf(manifest -> true)
+ .sort(badSortKeys2)
+ .execute())
+ .isInstanceOf(IllegalArgumentException.class)
+ .message()
+ .contains("Invalid sorting columns");
+ }
+
+ @TestTemplate
+ public void testRewriteManifestsPartitionedTableWithCustomSorting() throws
IOException {
+ Random random = new Random();
+
+ PartitionSpec spec =
+ PartitionSpec.builderFor(SCHEMA).identity("c1").truncate("c2",
3).bucket("c3", 10).build();
+ Map<String, String> options = Maps.newHashMap();
+ options.put(TableProperties.FORMAT_VERSION, String.valueOf(formatVersion));
+ options.put(TableProperties.SNAPSHOT_ID_INHERITANCE_ENABLED,
snapshotIdInheritanceEnabled);
+ Table table = TABLES.create(SCHEMA, spec, options, tableLocation);
+
+ List<DataFile> dataFiles = Lists.newArrayList();
+ for (int fileOrdinal = 0; fileOrdinal < 1000; fileOrdinal++) {
+ dataFiles.add(
+ newDataFile(
+ table,
+ TestHelpers.Row.of(
+ new Object[] {
+ fileOrdinal, String.valueOf(random.nextInt() * 100),
random.nextInt(10)
+ })));
+ }
+ ManifestFile appendManifest = writeManifest(table, dataFiles);
+ table.newFastAppend().appendManifest(appendManifest).commit();
+
+ List<ManifestFile> manifests =
table.currentSnapshot().allManifests(table.io());
+ assertThat(manifests).as("Should have 1 manifests before
rewrite").hasSize(1);
+
+ // Capture the c3 partition's lower and upper bounds - used for later test
assertions
+ Integer c3PartitionMin =
+ Conversions.fromByteBuffer(
+ Types.IntegerType.get(),
manifests.get(0).partitions().get(2).lowerBound());
+ Integer c3PartitionMax =
+ Conversions.fromByteBuffer(
+ Types.IntegerType.get(),
manifests.get(0).partitions().get(2).upperBound());
+
+ // Set the target manifest size to a small value to force splitting
records into multiple files
+ table
+ .updateProperties()
+ .set(
+ TableProperties.MANIFEST_TARGET_SIZE_BYTES,
+ String.valueOf(manifests.get(0).length() / 2))
+ .commit();
+
+ SparkActions actions = SparkActions.get();
+
+ String stagingLocation = java.nio.file.Files.createTempDirectory(temp,
"junit").toString();
Review Comment:
Any reason why we are using another directory here? We could just use our
table location (or a sub directory within there)
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]