gaborkaszab commented on code in PR #14998:
URL: https://github.com/apache/iceberg/pull/14998#discussion_r2754360102
##########
core/src/test/java/org/apache/iceberg/PartitionStatsHandlerTestBase.java:
##########
@@ -661,105 +450,110 @@ public void testFullComputeFallbackWithInvalidStats()
throws Exception {
assertThat(partitionStats.get(0).dataFileCount()).isEqualTo(2);
}
- /**
- * @deprecated will be removed in 1.12.0
- */
@Test
- @Deprecated
- public void testV2toV3SchemaEvolution() throws Exception {
- Table testTable =
- TestTables.create(
- tempDir("schema_evolution"), "schema_evolution", SCHEMA, SPEC, 2,
fileFormatProperty);
+ public void testAppendWithAllValues() {
+ BasePartitionStatistics stats1 =
+ createStats(100L, 15, 1000L, 2L, 500, 1L, 200, 15L, 1625077800000L,
12345L);
+ BasePartitionStatistics stats2 =
+ createStats(200L, 7, 500L, 1L, 100, 0L, 50, 7L, 1625077900000L,
12346L);
- // write stats file using v2 schema
- DataFile dataFile =
- FileGenerationUtil.generateDataFile(testTable,
TestHelpers.Row.of("foo", "A"));
- testTable.newAppend().appendFile(dataFile).commit();
- PartitionStatisticsFile statisticsFile =
- PartitionStatsHandler.computeAndWriteStatsFile(
- testTable, testTable.currentSnapshot().snapshotId());
+ PartitionStatsHandler.appendStats(stats1, stats2);
- Types.StructType partitionSchema = Partitioning.partitionType(testTable);
+ validateStats(stats1, 300L, 22, 1500L, 3L, 600, 1L, 250, 22L,
1625077900000L, 12346L);
+ }
- // read with v2 schema
- Schema v2Schema = PartitionStatistics.schema(partitionSchema, 2);
- List<PartitionStats> partitionStatsV2;
- try (CloseableIterable<PartitionStats> recordIterator =
- PartitionStatsHandler.readPartitionStatsFile(
- v2Schema, testTable.io().newInputFile(statisticsFile.path()))) {
- partitionStatsV2 = Lists.newArrayList(recordIterator);
- }
+ @Test
+ public void testAppendWithThisNullOptionalField() {
+ BasePartitionStatistics stats1 =
+ createStats(100L, 15, 1000L, 2L, 500, 1L, 200, null, null, null);
+ BasePartitionStatistics stats2 =
+ createStats(100L, 7, 500L, 1L, 100, 0L, 50, 7L, 1625077900000L,
12346L);
- // read with v3 schema
- Schema v3Schema = PartitionStatistics.schema(partitionSchema, 3);
- List<PartitionStats> partitionStatsV3;
- try (CloseableIterable<PartitionStats> recordIterator =
- PartitionStatsHandler.readPartitionStatsFile(
- v3Schema, testTable.io().newInputFile(statisticsFile.path()))) {
- partitionStatsV3 = Lists.newArrayList(recordIterator);
- }
+ PartitionStatsHandler.appendStats(stats1, stats2);
- assertThat(partitionStatsV2).hasSameSizeAs(partitionStatsV3);
- Comparator<StructLike> comparator = Comparators.forType(partitionSchema);
- for (int i = 0; i < partitionStatsV2.size(); i++) {
- assertThat(isEqual(comparator, partitionStatsV2.get(i),
partitionStatsV3.get(i))).isTrue();
- }
+ validateStats(stats1, 200L, 22, 1500L, 3L, 600, 1L, 250, 7L,
1625077900000L, 12346L);
}
- private static void computeAndValidatePartitionStats(
- Table testTable, Schema recordSchema, Tuple... expectedValues) throws
IOException {
- // compute and commit partition stats file
- Snapshot currentSnapshot = testTable.currentSnapshot();
- PartitionStatisticsFile result =
PartitionStatsHandler.computeAndWriteStatsFile(testTable);
-
testTable.updatePartitionStatistics().setPartitionStatistics(result).commit();
- assertThat(result.snapshotId()).isEqualTo(currentSnapshot.snapshotId());
+ @Test
+ public void testAppendWithBothNullOptionalFields() {
Review Comment:
These tests are from the removed PartitionStats to keep coverage
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]