pvary commented on code in PR #15633:
URL: https://github.com/apache/iceberg/pull/15633#discussion_r2959135392


##########
data/src/test/java/org/apache/iceberg/data/BaseFormatModelTests.java:
##########
@@ -317,7 +321,316 @@ void 
testPositionDeleteWriterEngineWriteGenericRead(FileFormat fileFormat) throw
     DataTestHelpers.assertEquals(positionDeleteSchema.asStruct(), records, 
readRecords);
   }
 
+  @ParameterizedTest
+  @FieldSource("FILE_FORMATS")
+  /* Write with Generic Record, read with projected engine type T (narrow 
schema) */
+  void testReaderBuilderProjection(FileFormat fileFormat) throws IOException {
+    DataGenerator dataGenerator = new DataGenerators.DefaultSchema();
+    Schema fullSchema = dataGenerator.schema();
+
+    List<Types.NestedField> columns = fullSchema.columns();
+    List<Types.NestedField> projectedColumns =
+        IntStream.range(0, columns.size())
+            .filter(i -> i % 2 == 1)
+            .mapToObj(columns::get)
+            .collect(Collectors.toList());
+    if (projectedColumns.isEmpty()) {
+      projectedColumns = ImmutableList.of(columns.get(columns.size() - 1));
+    }
+
+    Schema projectedSchema = new Schema(projectedColumns);
+
+    List<Record> genericRecords = dataGenerator.generateRecords();
+    writeGenericRecords(fileFormat, fullSchema, genericRecords);
+
+    List<Record> projectedGenericRecords = projectRecords(genericRecords, 
projectedSchema);
+    List<T> expectedEngineRecords =
+        convertToEngineRecords(projectedGenericRecords, projectedSchema);
+
+    InputFile inputFile = encryptedFile.encryptingOutputFile().toInputFile();
+    List<T> readRecords;
+    try (CloseableIterable<T> reader =
+        FormatModelRegistry.readBuilder(fileFormat, engineType(), inputFile)
+            .project(projectedSchema)
+            .engineProjection(engineSchema(projectedSchema))
+            .build()) {
+      readRecords = ImmutableList.copyOf(reader);
+    }
+
+    assertEquals(projectedSchema, expectedEngineRecords, readRecords);
+  }
+
+  @ParameterizedTest
+  @FieldSource("FILE_FORMATS")
+  void testReaderBuilderFilter(FileFormat fileFormat) throws IOException {
+
+    assumeSupports(fileFormat, "filter");
+
+    DataGenerator dataGenerator = new DataGenerators.DefaultSchema();
+    Schema schema = dataGenerator.schema();
+
+    List<Record> genericRecords = dataGenerator.generateRecords();
+    writeGenericRecords(fileFormat, schema, genericRecords);
+
+    // Construct a filter condition that is smaller than the minimum value to 
achieve file-level
+    // filtering.
+    Types.NestedField firstField = schema.columns().get(0);
+    Expression filter = filterFieldExpression(firstField, schema, 
genericRecords);
+
+    InputFile inputFile = encryptedFile.encryptingOutputFile().toInputFile();
+    List<T> readRecords;
+    try (CloseableIterable<T> reader =
+        FormatModelRegistry.readBuilder(fileFormat, engineType(), inputFile)
+            .project(schema)
+            .engineProjection(engineSchema(schema))
+            .filter(filter)
+            .build()) {
+      readRecords = ImmutableList.copyOf(reader);
+    }
+
+    assertThat(readRecords).isEmpty();
+  }
+
+  @ParameterizedTest
+  @FieldSource("FILE_FORMATS")
+  /*
+   * Write with Generic Record, then read using an upper-cased column name in 
the filter to verify
+   * caseSensitive behavior.
+   */
+  void testReaderBuilderCaseSensitive(FileFormat fileFormat) throws 
IOException {
+
+    assumeSupports(fileFormat, "caseSensitive");
+
+    DataGenerator dataGenerator = new DataGenerators.DefaultSchema();
+    Schema schema = dataGenerator.schema();
+
+    List<Record> genericRecords = dataGenerator.generateRecords();
+    writeGenericRecords(fileFormat, schema, genericRecords);
+
+    // Build a filter using the upper-cased name of the first column.
+    Types.NestedField firstField = schema.columns().get(0);
+    Object filterValue = genericRecords.get(0).getField(firstField.name());
+    Expression upperCaseFilter = 
Expressions.equal(firstField.name().toUpperCase(), filterValue);
+
+    InputFile inputFile = encryptedFile.encryptingOutputFile().toInputFile();
+
+    // caseSensitive=false: upper-cased column name must be resolved correctly.
+    List<T> readRecords;
+    try (CloseableIterable<T> reader =
+        FormatModelRegistry.readBuilder(fileFormat, engineType(), inputFile)
+            .project(schema)
+            .engineProjection(engineSchema(schema))
+            .filter(upperCaseFilter)
+            .caseSensitive(false)
+            .build()) {
+      readRecords = ImmutableList.copyOf(reader);
+    }
+
+    assertThat(readRecords).isNotEmpty();
+
+    // caseSensitive=true: upper-cased column name cannot be resolved → must 
throw.
+    assertThatThrownBy(
+            () -> {
+              try (CloseableIterable<T> reader =
+                  FormatModelRegistry.readBuilder(fileFormat, engineType(), 
inputFile)
+                      .project(schema)
+                      .engineProjection(engineSchema(schema))
+                      .filter(upperCaseFilter)
+                      .caseSensitive(true)
+                      .build()) {
+                ImmutableList.copyOf(reader);
+              }
+            })
+        .isInstanceOf(ValidationException.class)
+        .hasMessageContaining("Cannot find field '%s'", 
firstField.name().toUpperCase());
+  }
+
+  @ParameterizedTest
+  @FieldSource("FILE_FORMATS")
+  /*
+   * Write with Generic Record, then read using split to verify that the split 
range is respected.
+   * Reading with a zero-length split at the end of the file should return no 
records, while reading
+   * with the full file range should return all records.
+   */
+  void testReaderBuilderSplit(FileFormat fileFormat) throws IOException {
+    DataGenerator dataGenerator = new DataGenerators.DefaultSchema();
+    Schema schema = dataGenerator.schema();
+
+    List<Record> genericRecords = dataGenerator.generateRecords();
+    writeGenericRecords(fileFormat, schema, genericRecords);
+
+    InputFile inputFile = encryptedFile.encryptingOutputFile().toInputFile();
+    long fileLength = inputFile.getLength();
+
+    // split(fileLength, 0): empty range at the end of the file → no records 
should be returned
+    List<T> emptyReadRecords;
+    try (CloseableIterable<T> reader =
+        FormatModelRegistry.readBuilder(fileFormat, engineType(), inputFile)
+            .project(schema)
+            .engineProjection(engineSchema(schema))
+            .split(fileLength, 0)
+            .build()) {
+      emptyReadRecords = ImmutableList.copyOf(reader);
+    }
+
+    assertThat(emptyReadRecords).isEmpty();
+
+    // split(0, fileLength): full file range → all records should be returned
+    List<T> fullReadRecords;
+    try (CloseableIterable<T> reader =
+        FormatModelRegistry.readBuilder(fileFormat, engineType(), inputFile)
+            .project(schema)
+            .engineProjection(engineSchema(schema))
+            .split(0, fileLength)
+            .build()) {
+      fullReadRecords = ImmutableList.copyOf(reader);
+    }
+
+    assertEquals(schema, convertToEngineRecords(genericRecords, schema), 
fullReadRecords);
+  }
+
+  @ParameterizedTest
+  @FieldSource("FILE_FORMATS")
+  /*
+   * Verifies the contract of recordsPerBatch: recordsPerBatch is a hint for 
vectorized readers. The
+   * total number of records returned must be unaffected regardless of the 
batch size value.
+   */
+  void testReaderBuilderRecordsPerBatch(FileFormat fileFormat) throws 
IOException {
+
+    assumeSupports(fileFormat, "recordsPerBatch");
+
+    DataGenerator dataGenerator = new DataGenerators.DefaultSchema();
+    Schema schema = dataGenerator.schema();
+
+    List<Record> genericRecords = dataGenerator.generateRecords();
+    writeGenericRecords(fileFormat, schema, genericRecords);
+
+    InputFile inputFile = encryptedFile.encryptingOutputFile().toInputFile();
+    List<T> expectedEngineRecords = convertToEngineRecords(genericRecords, 
schema);
+
+    List<T> smallBatchRecords;
+    try (CloseableIterable<T> reader =
+        FormatModelRegistry.readBuilder(fileFormat, engineType(), inputFile)
+            .project(schema)
+            .engineProjection(engineSchema(schema))
+            .recordsPerBatch(1)
+            .build()) {
+      smallBatchRecords = ImmutableList.copyOf(reader);
+    }
+
+    assertEquals(schema, expectedEngineRecords, smallBatchRecords);
+
+    List<T> largeBatchRecords;
+    try (CloseableIterable<T> reader =
+        FormatModelRegistry.readBuilder(fileFormat, engineType(), inputFile)
+            .project(schema)
+            .engineProjection(engineSchema(schema))
+            .recordsPerBatch(genericRecords.size() + 1)
+            .build()) {
+      largeBatchRecords = ImmutableList.copyOf(reader);
+    }
+
+    assertEquals(schema, expectedEngineRecords, largeBatchRecords);
+  }
+
+  @ParameterizedTest
+  @FieldSource("FILE_FORMATS")
+  /* Verifies the contract of reuseContainers */
+  void testReaderBuilderReuseContainers(FileFormat fileFormat) throws 
IOException {
+
+    assumeSupports(fileFormat, "reuseContainers");
+
+    DataGenerator dataGenerator = new DataGenerators.DefaultSchema();
+    Schema schema = dataGenerator.schema();
+    List<Record> genericRecords = dataGenerator.generateRecords();
+    // Need at least 2 records to verify container reuse
+    assumeThat(genericRecords.size() >= 2).isTrue();
+    writeGenericRecords(fileFormat, schema, genericRecords);
+
+    InputFile inputFile = encryptedFile.encryptingOutputFile().toInputFile();
+
+    // Without reuseContainers: every record must be a distinct object instance
+    List<T> noReuseRecords;
+    try (CloseableIterable<T> reader =
+        FormatModelRegistry.readBuilder(fileFormat, engineType(), inputFile)
+            .project(schema)
+            .engineProjection(engineSchema(schema))
+            .build()) {
+      noReuseRecords = ImmutableList.copyOf(reader);
+    }
+
+    for (int i = 0; i < noReuseRecords.size() - 1; i++) {
+      assertThat(noReuseRecords.get(i)).isNotSameAs(noReuseRecords.get(i + 1));
+    }
+
+    // With reuseContainers: all collected elements must be the same object 
instance
+    List<T> reuseRecords;
+    try (CloseableIterable<T> reader =
+        FormatModelRegistry.readBuilder(fileFormat, engineType(), inputFile)
+            .project(schema)
+            .engineProjection(engineSchema(schema))
+            .reuseContainers()
+            .build()) {
+      reuseRecords = ImmutableList.copyOf(reader);
+    }
+
+    T first = reuseRecords.get(0);
+    for (int i = 1; i < reuseRecords.size(); i++) {
+      assertThat(reuseRecords.get(i)).isSameAs(first);
+    }
+  }
+
+  private void writeGenericRecords(FileFormat fileFormat, Schema schema, 
List<Record> records)
+      throws IOException {
+    FileWriterBuilder<DataWriter<Record>, Object> writerBuilder =
+        FormatModelRegistry.dataWriteBuilder(fileFormat, Record.class, 
encryptedFile);
+
+    DataWriter<Record> writer =
+        
writerBuilder.schema(schema).spec(PartitionSpec.unpartitioned()).build();
+
+    try (writer) {
+      for (Record record : records) {
+        writer.write(record);
+      }
+    }
+
+    DataFile dataFile = writer.toDataFile();
+    assertThat(dataFile).isNotNull();
+    assertThat(dataFile.recordCount()).isEqualTo(records.size());
+    assertThat(dataFile.format()).isEqualTo(fileFormat);
+  }
+
+  private List<Record> projectRecords(List<Record> records, Schema 
projectedSchema) {
+    return records.stream()
+        .map(
+            record -> {
+              Record projected = 
GenericRecord.create(projectedSchema.asStruct());
+              for (Types.NestedField field : projectedSchema.columns()) {
+                projected.setField(field.name(), 
record.getField(field.name()));
+              }
+              return projected;
+            })
+        .collect(Collectors.toList());

Review Comment:
   ```suggestion
       return records.stream()
           .map(
               record -> {
                 Record projected = 
GenericRecord.create(projectedSchema.asStruct());
                 projectedSchema.columns().forEach(
                     field -> projected.setField(field.name(), 
record.getField(field.name())));
                 return projected;
               })
           .toList();
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to