This is an automated email from the ASF dual-hosted git repository.
szehon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iceberg.git
The following commit(s) were added to refs/heads/master by this push:
new 3def7f1d4f API, Core, Flink, Parquet, Spark: Use enhanced for loop
(#6476)
3def7f1d4f is described below
commit 3def7f1d4f2db5878159f42403113262292a4528
Author: Vikash Kumar <[email protected]>
AuthorDate: Thu Jan 12 00:15:10 2023 +0530
API, Core, Flink, Parquet, Spark: Use enhanced for loop (#6476)
---
.../java/org/apache/iceberg/metrics/FixedReservoirHistogram.java | 6 +++---
api/src/main/java/org/apache/iceberg/types/JavaHashes.java | 7 +++----
.../java/org/apache/iceberg/util/ZOrderByteUtilsBenchmark.java | 8 ++++----
core/src/main/java/org/apache/iceberg/util/ParallelIterable.java | 3 +--
.../org/apache/iceberg/flink/sink/DeltaManifestsSerializer.java | 4 ++--
.../flink/source/enumerator/IcebergEnumeratorStateSerializer.java | 4 ++--
.../org/apache/iceberg/flink/sink/TestRowDataPartitionKey.java | 4 ++--
.../java/org/apache/iceberg/flink/source/reader/ReaderUtil.java | 4 ++--
.../org/apache/iceberg/flink/sink/DeltaManifestsSerializer.java | 4 ++--
.../flink/source/enumerator/IcebergEnumeratorStateSerializer.java | 4 ++--
.../org/apache/iceberg/flink/sink/TestRowDataPartitionKey.java | 4 ++--
.../java/org/apache/iceberg/flink/source/reader/ReaderUtil.java | 4 ++--
.../org/apache/iceberg/flink/sink/DeltaManifestsSerializer.java | 4 ++--
.../flink/source/enumerator/IcebergEnumeratorStateSerializer.java | 4 ++--
.../org/apache/iceberg/flink/sink/TestRowDataPartitionKey.java | 4 ++--
.../java/org/apache/iceberg/flink/source/reader/ReaderUtil.java | 4 ++--
.../main/java/org/apache/iceberg/parquet/ParquetValueReaders.java | 4 ++--
.../main/java/org/apache/iceberg/spark/data/SparkOrcWriter.java | 4 ++--
.../org/apache/iceberg/spark/data/TestSparkParquetReader.java | 4 ++--
.../main/java/org/apache/iceberg/spark/data/SparkOrcWriter.java | 4 ++--
.../org/apache/iceberg/spark/data/TestSparkParquetReader.java | 4 ++--
.../main/java/org/apache/iceberg/spark/data/SparkOrcWriter.java | 4 ++--
.../java/org/apache/iceberg/spark/source/metrics/NumDeletes.java | 4 ++--
.../java/org/apache/iceberg/spark/source/metrics/NumSplits.java | 4 ++--
.../org/apache/iceberg/spark/data/TestSparkParquetReader.java | 4 ++--
.../main/java/org/apache/iceberg/spark/data/SparkOrcWriter.java | 4 ++--
.../java/org/apache/iceberg/spark/source/metrics/NumDeletes.java | 4 ++--
.../java/org/apache/iceberg/spark/source/metrics/NumSplits.java | 4 ++--
.../org/apache/iceberg/spark/data/TestSparkParquetReader.java | 4 ++--
29 files changed, 61 insertions(+), 63 deletions(-)
diff --git
a/api/src/main/java/org/apache/iceberg/metrics/FixedReservoirHistogram.java
b/api/src/main/java/org/apache/iceberg/metrics/FixedReservoirHistogram.java
index 4a8d638cd9..b85071325d 100644
--- a/api/src/main/java/org/apache/iceberg/metrics/FixedReservoirHistogram.java
+++ b/api/src/main/java/org/apache/iceberg/metrics/FixedReservoirHistogram.java
@@ -66,10 +66,10 @@ public class FixedReservoirHistogram implements Histogram {
double sum = 0.0d;
double sumSquares = 0.0d;
- for (int i = 0; i < values.length; ++i) {
- sum += values[i];
+ for (long x : values) {
+ sum += x;
// Convert to double value to avoid potential overflow of square
- double value = (double) values[i];
+ double value = (double) x;
sumSquares += value * value;
}
diff --git a/api/src/main/java/org/apache/iceberg/types/JavaHashes.java
b/api/src/main/java/org/apache/iceberg/types/JavaHashes.java
index f495a50a73..c251989900 100644
--- a/api/src/main/java/org/apache/iceberg/types/JavaHashes.java
+++ b/api/src/main/java/org/apache/iceberg/types/JavaHashes.java
@@ -109,10 +109,9 @@ public class JavaHashes {
}
int result = 17;
- int len = list.size();
- result = 37 * result + len;
- for (int i = 0; i < len; i += 1) {
- result = 37 * result + elementHash.hash(list.get(i));
+ result = 37 * result + list.size();
+ for (Object o : list) {
+ result = 37 * result + elementHash.hash(o);
}
return result;
}
diff --git
a/core/src/jmh/java/org/apache/iceberg/util/ZOrderByteUtilsBenchmark.java
b/core/src/jmh/java/org/apache/iceberg/util/ZOrderByteUtilsBenchmark.java
index 9221a36b13..b65a12899d 100644
--- a/core/src/jmh/java/org/apache/iceberg/util/ZOrderByteUtilsBenchmark.java
+++ b/core/src/jmh/java/org/apache/iceberg/util/ZOrderByteUtilsBenchmark.java
@@ -75,9 +75,9 @@ public class ZOrderByteUtilsBenchmark {
int outputSize = ZOrderByteUtils.PRIMITIVE_BUFFER_SIZE * 4;
ByteBuffer outputBuffer = ByteBuffer.allocate(outputSize);
- for (int i = 0; i < fourColumnInput.length; i++) {
+ for (byte[][] columnsBinary : fourColumnInput) {
byte[] interleavedBytes =
- ZOrderByteUtils.interleaveBits(fourColumnInput[i], outputSize,
outputBuffer);
+ ZOrderByteUtils.interleaveBits(columnsBinary, outputSize,
outputBuffer);
blackhole.consume(interleavedBytes);
}
}
@@ -114,9 +114,9 @@ public class ZOrderByteUtilsBenchmark {
int outputSize = 8;
ByteBuffer outputBuffer = ByteBuffer.allocate(outputSize);
- for (int i = 0; i < fourColumnInput.length; i++) {
+ for (byte[][] columnsBinary : fourColumnInput) {
byte[] interleavedBytes =
- ZOrderByteUtils.interleaveBits(fourColumnInput[i], outputSize,
outputBuffer);
+ ZOrderByteUtils.interleaveBits(columnsBinary, outputSize,
outputBuffer);
blackhole.consume(interleavedBytes);
}
}
diff --git a/core/src/main/java/org/apache/iceberg/util/ParallelIterable.java
b/core/src/main/java/org/apache/iceberg/util/ParallelIterable.java
index 16a8b7110e..108757b415 100644
--- a/core/src/main/java/org/apache/iceberg/util/ParallelIterable.java
+++ b/core/src/main/java/org/apache/iceberg/util/ParallelIterable.java
@@ -85,8 +85,7 @@ public class ParallelIterable<T> extends CloseableGroup
implements CloseableIter
this.closed = true;
// cancel background tasks
- for (int i = 0; i < taskFutures.length; i += 1) {
- Future<?> taskFuture = taskFutures[i];
+ for (Future<?> taskFuture : taskFutures) {
if (taskFuture != null && !taskFuture.isDone()) {
taskFuture.cancel(true);
}
diff --git
a/flink/v1.14/flink/src/main/java/org/apache/iceberg/flink/sink/DeltaManifestsSerializer.java
b/flink/v1.14/flink/src/main/java/org/apache/iceberg/flink/sink/DeltaManifestsSerializer.java
index c4d6e713bb..92ca284b12 100644
---
a/flink/v1.14/flink/src/main/java/org/apache/iceberg/flink/sink/DeltaManifestsSerializer.java
+++
b/flink/v1.14/flink/src/main/java/org/apache/iceberg/flink/sink/DeltaManifestsSerializer.java
@@ -66,8 +66,8 @@ class DeltaManifestsSerializer implements
SimpleVersionedSerializer<DeltaManifes
CharSequence[] referencedDataFiles = deltaManifests.referencedDataFiles();
out.writeInt(referencedDataFiles.length);
- for (int i = 0; i < referencedDataFiles.length; i++) {
- out.writeUTF(referencedDataFiles[i].toString());
+ for (CharSequence referencedDataFile : referencedDataFiles) {
+ out.writeUTF(referencedDataFile.toString());
}
return binaryOut.toByteArray();
diff --git
a/flink/v1.14/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java
b/flink/v1.14/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java
index a6adc02ff6..9998bee99f 100644
---
a/flink/v1.14/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java
+++
b/flink/v1.14/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java
@@ -175,8 +175,8 @@ public class IcebergEnumeratorStateSerializer
DataOutputSerializer out, int[] enumerationSplitCountHistory) throws
IOException {
out.writeInt(enumerationSplitCountHistory.length);
if (enumerationSplitCountHistory.length > 0) {
- for (int i = 0; i < enumerationSplitCountHistory.length; ++i) {
- out.writeInt(enumerationSplitCountHistory[i]);
+ for (int enumerationSplitCount : enumerationSplitCountHistory) {
+ out.writeInt(enumerationSplitCount);
}
}
}
diff --git
a/flink/v1.14/flink/src/test/java/org/apache/iceberg/flink/sink/TestRowDataPartitionKey.java
b/flink/v1.14/flink/src/test/java/org/apache/iceberg/flink/sink/TestRowDataPartitionKey.java
index b6c785cb14..79be96d209 100644
---
a/flink/v1.14/flink/src/test/java/org/apache/iceberg/flink/sink/TestRowDataPartitionKey.java
+++
b/flink/v1.14/flink/src/test/java/org/apache/iceberg/flink/sink/TestRowDataPartitionKey.java
@@ -219,8 +219,8 @@ public class TestRowDataPartitionKey {
List<Record> records = RandomGenericData.generate(nestedSchema, 10, 1994);
List<RowData> rows =
Lists.newArrayList(RandomRowData.convert(nestedSchema, records));
- for (int i = 0; i < SUPPORTED_PRIMITIVES.size(); i++) {
- String column = String.format("nested.%s", SUPPORTED_PRIMITIVES.get(i));
+ for (String supportedPrimitive : SUPPORTED_PRIMITIVES) {
+ String column = String.format("nested.%s", supportedPrimitive);
PartitionSpec spec =
PartitionSpec.builderFor(nestedSchema).identity(column).build();
Class<?>[] javaClasses = spec.javaClasses();
diff --git
a/flink/v1.14/flink/src/test/java/org/apache/iceberg/flink/source/reader/ReaderUtil.java
b/flink/v1.14/flink/src/test/java/org/apache/iceberg/flink/source/reader/ReaderUtil.java
index b2ccbff2d6..f2e89428a9 100644
---
a/flink/v1.14/flink/src/test/java/org/apache/iceberg/flink/source/reader/ReaderUtil.java
+++
b/flink/v1.14/flink/src/test/java/org/apache/iceberg/flink/source/reader/ReaderUtil.java
@@ -107,10 +107,10 @@ public class ReaderUtil {
GenericAppenderFactory appenderFactory)
throws IOException {
List<FileScanTask> fileTasks =
Lists.newArrayListWithCapacity(recordBatchList.size());
- for (int i = 0; i < recordBatchList.size(); ++i) {
+ for (List<Record> recordBatch : recordBatchList) {
FileScanTask fileTask =
ReaderUtil.createFileTask(
- recordBatchList.get(i), temporaryFolder.newFile(), fileFormat,
appenderFactory);
+ recordBatch, temporaryFolder.newFile(), fileFormat,
appenderFactory);
fileTasks.add(fileTask);
}
diff --git
a/flink/v1.15/flink/src/main/java/org/apache/iceberg/flink/sink/DeltaManifestsSerializer.java
b/flink/v1.15/flink/src/main/java/org/apache/iceberg/flink/sink/DeltaManifestsSerializer.java
index c4d6e713bb..92ca284b12 100644
---
a/flink/v1.15/flink/src/main/java/org/apache/iceberg/flink/sink/DeltaManifestsSerializer.java
+++
b/flink/v1.15/flink/src/main/java/org/apache/iceberg/flink/sink/DeltaManifestsSerializer.java
@@ -66,8 +66,8 @@ class DeltaManifestsSerializer implements
SimpleVersionedSerializer<DeltaManifes
CharSequence[] referencedDataFiles = deltaManifests.referencedDataFiles();
out.writeInt(referencedDataFiles.length);
- for (int i = 0; i < referencedDataFiles.length; i++) {
- out.writeUTF(referencedDataFiles[i].toString());
+ for (CharSequence referencedDataFile : referencedDataFiles) {
+ out.writeUTF(referencedDataFile.toString());
}
return binaryOut.toByteArray();
diff --git
a/flink/v1.15/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java
b/flink/v1.15/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java
index a6adc02ff6..9998bee99f 100644
---
a/flink/v1.15/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java
+++
b/flink/v1.15/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java
@@ -175,8 +175,8 @@ public class IcebergEnumeratorStateSerializer
DataOutputSerializer out, int[] enumerationSplitCountHistory) throws
IOException {
out.writeInt(enumerationSplitCountHistory.length);
if (enumerationSplitCountHistory.length > 0) {
- for (int i = 0; i < enumerationSplitCountHistory.length; ++i) {
- out.writeInt(enumerationSplitCountHistory[i]);
+ for (int enumerationSplitCount : enumerationSplitCountHistory) {
+ out.writeInt(enumerationSplitCount);
}
}
}
diff --git
a/flink/v1.15/flink/src/test/java/org/apache/iceberg/flink/sink/TestRowDataPartitionKey.java
b/flink/v1.15/flink/src/test/java/org/apache/iceberg/flink/sink/TestRowDataPartitionKey.java
index b6c785cb14..79be96d209 100644
---
a/flink/v1.15/flink/src/test/java/org/apache/iceberg/flink/sink/TestRowDataPartitionKey.java
+++
b/flink/v1.15/flink/src/test/java/org/apache/iceberg/flink/sink/TestRowDataPartitionKey.java
@@ -219,8 +219,8 @@ public class TestRowDataPartitionKey {
List<Record> records = RandomGenericData.generate(nestedSchema, 10, 1994);
List<RowData> rows =
Lists.newArrayList(RandomRowData.convert(nestedSchema, records));
- for (int i = 0; i < SUPPORTED_PRIMITIVES.size(); i++) {
- String column = String.format("nested.%s", SUPPORTED_PRIMITIVES.get(i));
+ for (String supportedPrimitive : SUPPORTED_PRIMITIVES) {
+ String column = String.format("nested.%s", supportedPrimitive);
PartitionSpec spec =
PartitionSpec.builderFor(nestedSchema).identity(column).build();
Class<?>[] javaClasses = spec.javaClasses();
diff --git
a/flink/v1.15/flink/src/test/java/org/apache/iceberg/flink/source/reader/ReaderUtil.java
b/flink/v1.15/flink/src/test/java/org/apache/iceberg/flink/source/reader/ReaderUtil.java
index b2ccbff2d6..f2e89428a9 100644
---
a/flink/v1.15/flink/src/test/java/org/apache/iceberg/flink/source/reader/ReaderUtil.java
+++
b/flink/v1.15/flink/src/test/java/org/apache/iceberg/flink/source/reader/ReaderUtil.java
@@ -107,10 +107,10 @@ public class ReaderUtil {
GenericAppenderFactory appenderFactory)
throws IOException {
List<FileScanTask> fileTasks =
Lists.newArrayListWithCapacity(recordBatchList.size());
- for (int i = 0; i < recordBatchList.size(); ++i) {
+ for (List<Record> recordBatch : recordBatchList) {
FileScanTask fileTask =
ReaderUtil.createFileTask(
- recordBatchList.get(i), temporaryFolder.newFile(), fileFormat,
appenderFactory);
+ recordBatch, temporaryFolder.newFile(), fileFormat,
appenderFactory);
fileTasks.add(fileTask);
}
diff --git
a/flink/v1.16/flink/src/main/java/org/apache/iceberg/flink/sink/DeltaManifestsSerializer.java
b/flink/v1.16/flink/src/main/java/org/apache/iceberg/flink/sink/DeltaManifestsSerializer.java
index c4d6e713bb..92ca284b12 100644
---
a/flink/v1.16/flink/src/main/java/org/apache/iceberg/flink/sink/DeltaManifestsSerializer.java
+++
b/flink/v1.16/flink/src/main/java/org/apache/iceberg/flink/sink/DeltaManifestsSerializer.java
@@ -66,8 +66,8 @@ class DeltaManifestsSerializer implements
SimpleVersionedSerializer<DeltaManifes
CharSequence[] referencedDataFiles = deltaManifests.referencedDataFiles();
out.writeInt(referencedDataFiles.length);
- for (int i = 0; i < referencedDataFiles.length; i++) {
- out.writeUTF(referencedDataFiles[i].toString());
+ for (CharSequence referencedDataFile : referencedDataFiles) {
+ out.writeUTF(referencedDataFile.toString());
}
return binaryOut.toByteArray();
diff --git
a/flink/v1.16/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java
b/flink/v1.16/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java
index a6adc02ff6..9998bee99f 100644
---
a/flink/v1.16/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java
+++
b/flink/v1.16/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java
@@ -175,8 +175,8 @@ public class IcebergEnumeratorStateSerializer
DataOutputSerializer out, int[] enumerationSplitCountHistory) throws
IOException {
out.writeInt(enumerationSplitCountHistory.length);
if (enumerationSplitCountHistory.length > 0) {
- for (int i = 0; i < enumerationSplitCountHistory.length; ++i) {
- out.writeInt(enumerationSplitCountHistory[i]);
+ for (int enumerationSplitCount : enumerationSplitCountHistory) {
+ out.writeInt(enumerationSplitCount);
}
}
}
diff --git
a/flink/v1.16/flink/src/test/java/org/apache/iceberg/flink/sink/TestRowDataPartitionKey.java
b/flink/v1.16/flink/src/test/java/org/apache/iceberg/flink/sink/TestRowDataPartitionKey.java
index b6c785cb14..79be96d209 100644
---
a/flink/v1.16/flink/src/test/java/org/apache/iceberg/flink/sink/TestRowDataPartitionKey.java
+++
b/flink/v1.16/flink/src/test/java/org/apache/iceberg/flink/sink/TestRowDataPartitionKey.java
@@ -219,8 +219,8 @@ public class TestRowDataPartitionKey {
List<Record> records = RandomGenericData.generate(nestedSchema, 10, 1994);
List<RowData> rows =
Lists.newArrayList(RandomRowData.convert(nestedSchema, records));
- for (int i = 0; i < SUPPORTED_PRIMITIVES.size(); i++) {
- String column = String.format("nested.%s", SUPPORTED_PRIMITIVES.get(i));
+ for (String supportedPrimitive : SUPPORTED_PRIMITIVES) {
+ String column = String.format("nested.%s", supportedPrimitive);
PartitionSpec spec =
PartitionSpec.builderFor(nestedSchema).identity(column).build();
Class<?>[] javaClasses = spec.javaClasses();
diff --git
a/flink/v1.16/flink/src/test/java/org/apache/iceberg/flink/source/reader/ReaderUtil.java
b/flink/v1.16/flink/src/test/java/org/apache/iceberg/flink/source/reader/ReaderUtil.java
index b2ccbff2d6..f2e89428a9 100644
---
a/flink/v1.16/flink/src/test/java/org/apache/iceberg/flink/source/reader/ReaderUtil.java
+++
b/flink/v1.16/flink/src/test/java/org/apache/iceberg/flink/source/reader/ReaderUtil.java
@@ -107,10 +107,10 @@ public class ReaderUtil {
GenericAppenderFactory appenderFactory)
throws IOException {
List<FileScanTask> fileTasks =
Lists.newArrayListWithCapacity(recordBatchList.size());
- for (int i = 0; i < recordBatchList.size(); ++i) {
+ for (List<Record> recordBatch : recordBatchList) {
FileScanTask fileTask =
ReaderUtil.createFileTask(
- recordBatchList.get(i), temporaryFolder.newFile(), fileFormat,
appenderFactory);
+ recordBatch, temporaryFolder.newFile(), fileFormat,
appenderFactory);
fileTasks.add(fileTask);
}
diff --git
a/parquet/src/main/java/org/apache/iceberg/parquet/ParquetValueReaders.java
b/parquet/src/main/java/org/apache/iceberg/parquet/ParquetValueReaders.java
index 7d795b7598..c1f76e7bdb 100644
--- a/parquet/src/main/java/org/apache/iceberg/parquet/ParquetValueReaders.java
+++ b/parquet/src/main/java/org/apache/iceberg/parquet/ParquetValueReaders.java
@@ -727,8 +727,8 @@ public class ParquetValueReaders {
@Override
public final void setPageSource(PageReadStore pageStore, long rowPosition)
{
- for (int i = 0; i < readers.length; i += 1) {
- readers[i].setPageSource(pageStore, rowPosition);
+ for (ParquetValueReader<?> reader : readers) {
+ reader.setPageSource(pageStore, rowPosition);
}
}
diff --git
a/spark/v2.4/spark/src/main/java/org/apache/iceberg/spark/data/SparkOrcWriter.java
b/spark/v2.4/spark/src/main/java/org/apache/iceberg/spark/data/SparkOrcWriter.java
index 6a8c7f1d3c..60868b8700 100644
---
a/spark/v2.4/spark/src/main/java/org/apache/iceberg/spark/data/SparkOrcWriter.java
+++
b/spark/v2.4/spark/src/main/java/org/apache/iceberg/spark/data/SparkOrcWriter.java
@@ -134,8 +134,8 @@ public class SparkOrcWriter implements
OrcRowWriter<InternalRow> {
super(writers);
this.fieldGetters = Lists.newArrayListWithExpectedSize(orcTypes.size());
- for (int i = 0; i < orcTypes.size(); i++) {
- fieldGetters.add(createFieldGetter(orcTypes.get(i)));
+ for (TypeDescription orcType : orcTypes) {
+ fieldGetters.add(createFieldGetter(orcType));
}
}
diff --git
a/spark/v2.4/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
b/spark/v2.4/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
index ac284d4733..85ee155048 100644
---
a/spark/v2.4/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
+++
b/spark/v2.4/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
@@ -88,9 +88,9 @@ public class TestSparkParquetReader extends AvroDataTest {
.createReaderFunc(type -> SparkParquetReaders.buildReader(schema,
type))
.build()) {
Iterator<InternalRow> rows = reader.iterator();
- for (int i = 0; i < expected.size(); i += 1) {
+ for (GenericData.Record record : expected) {
Assert.assertTrue("Should have expected number of rows",
rows.hasNext());
- assertEqualsUnsafe(schema.asStruct(), expected.get(i), rows.next());
+ assertEqualsUnsafe(schema.asStruct(), record, rows.next());
}
Assert.assertFalse("Should not have extra rows", rows.hasNext());
}
diff --git
a/spark/v3.1/spark/src/main/java/org/apache/iceberg/spark/data/SparkOrcWriter.java
b/spark/v3.1/spark/src/main/java/org/apache/iceberg/spark/data/SparkOrcWriter.java
index 6a8c7f1d3c..60868b8700 100644
---
a/spark/v3.1/spark/src/main/java/org/apache/iceberg/spark/data/SparkOrcWriter.java
+++
b/spark/v3.1/spark/src/main/java/org/apache/iceberg/spark/data/SparkOrcWriter.java
@@ -134,8 +134,8 @@ public class SparkOrcWriter implements
OrcRowWriter<InternalRow> {
super(writers);
this.fieldGetters = Lists.newArrayListWithExpectedSize(orcTypes.size());
- for (int i = 0; i < orcTypes.size(); i++) {
- fieldGetters.add(createFieldGetter(orcTypes.get(i)));
+ for (TypeDescription orcType : orcTypes) {
+ fieldGetters.add(createFieldGetter(orcType));
}
}
diff --git
a/spark/v3.1/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
b/spark/v3.1/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
index ac284d4733..85ee155048 100644
---
a/spark/v3.1/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
+++
b/spark/v3.1/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
@@ -88,9 +88,9 @@ public class TestSparkParquetReader extends AvroDataTest {
.createReaderFunc(type -> SparkParquetReaders.buildReader(schema,
type))
.build()) {
Iterator<InternalRow> rows = reader.iterator();
- for (int i = 0; i < expected.size(); i += 1) {
+ for (GenericData.Record record : expected) {
Assert.assertTrue("Should have expected number of rows",
rows.hasNext());
- assertEqualsUnsafe(schema.asStruct(), expected.get(i), rows.next());
+ assertEqualsUnsafe(schema.asStruct(), record, rows.next());
}
Assert.assertFalse("Should not have extra rows", rows.hasNext());
}
diff --git
a/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/data/SparkOrcWriter.java
b/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/data/SparkOrcWriter.java
index 6a8c7f1d3c..60868b8700 100644
---
a/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/data/SparkOrcWriter.java
+++
b/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/data/SparkOrcWriter.java
@@ -134,8 +134,8 @@ public class SparkOrcWriter implements
OrcRowWriter<InternalRow> {
super(writers);
this.fieldGetters = Lists.newArrayListWithExpectedSize(orcTypes.size());
- for (int i = 0; i < orcTypes.size(); i++) {
- fieldGetters.add(createFieldGetter(orcTypes.get(i)));
+ for (TypeDescription orcType : orcTypes) {
+ fieldGetters.add(createFieldGetter(orcType));
}
}
diff --git
a/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/source/metrics/NumDeletes.java
b/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/source/metrics/NumDeletes.java
index 5654ae3ed5..000499874b 100644
---
a/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/source/metrics/NumDeletes.java
+++
b/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/source/metrics/NumDeletes.java
@@ -38,8 +38,8 @@ public class NumDeletes implements CustomMetric {
@Override
public String aggregateTaskMetrics(long[] taskMetrics) {
long sum = initialValue;
- for (int i = 0; i < taskMetrics.length; i++) {
- sum += taskMetrics[i];
+ for (long taskMetric : taskMetrics) {
+ sum += taskMetric;
}
return NumberFormat.getIntegerInstance().format(sum);
diff --git
a/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/source/metrics/NumSplits.java
b/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/source/metrics/NumSplits.java
index b4f0fc7a44..41d7c1e8db 100644
---
a/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/source/metrics/NumSplits.java
+++
b/spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/source/metrics/NumSplits.java
@@ -36,8 +36,8 @@ public class NumSplits implements CustomMetric {
@Override
public String aggregateTaskMetrics(long[] taskMetrics) {
long sum = initialValue;
- for (int i = 0; i < taskMetrics.length; i++) {
- sum += taskMetrics[i];
+ for (long taskMetric : taskMetrics) {
+ sum += taskMetric;
}
return NumberFormat.getIntegerInstance().format(sum);
diff --git
a/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
b/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
index ac284d4733..85ee155048 100644
---
a/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
+++
b/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
@@ -88,9 +88,9 @@ public class TestSparkParquetReader extends AvroDataTest {
.createReaderFunc(type -> SparkParquetReaders.buildReader(schema,
type))
.build()) {
Iterator<InternalRow> rows = reader.iterator();
- for (int i = 0; i < expected.size(); i += 1) {
+ for (GenericData.Record record : expected) {
Assert.assertTrue("Should have expected number of rows",
rows.hasNext());
- assertEqualsUnsafe(schema.asStruct(), expected.get(i), rows.next());
+ assertEqualsUnsafe(schema.asStruct(), record, rows.next());
}
Assert.assertFalse("Should not have extra rows", rows.hasNext());
}
diff --git
a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/data/SparkOrcWriter.java
b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/data/SparkOrcWriter.java
index 6a8c7f1d3c..60868b8700 100644
---
a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/data/SparkOrcWriter.java
+++
b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/data/SparkOrcWriter.java
@@ -134,8 +134,8 @@ public class SparkOrcWriter implements
OrcRowWriter<InternalRow> {
super(writers);
this.fieldGetters = Lists.newArrayListWithExpectedSize(orcTypes.size());
- for (int i = 0; i < orcTypes.size(); i++) {
- fieldGetters.add(createFieldGetter(orcTypes.get(i)));
+ for (TypeDescription orcType : orcTypes) {
+ fieldGetters.add(createFieldGetter(orcType));
}
}
diff --git
a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/metrics/NumDeletes.java
b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/metrics/NumDeletes.java
index 5654ae3ed5..000499874b 100644
---
a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/metrics/NumDeletes.java
+++
b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/metrics/NumDeletes.java
@@ -38,8 +38,8 @@ public class NumDeletes implements CustomMetric {
@Override
public String aggregateTaskMetrics(long[] taskMetrics) {
long sum = initialValue;
- for (int i = 0; i < taskMetrics.length; i++) {
- sum += taskMetrics[i];
+ for (long taskMetric : taskMetrics) {
+ sum += taskMetric;
}
return NumberFormat.getIntegerInstance().format(sum);
diff --git
a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/metrics/NumSplits.java
b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/metrics/NumSplits.java
index b4f0fc7a44..41d7c1e8db 100644
---
a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/metrics/NumSplits.java
+++
b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/metrics/NumSplits.java
@@ -36,8 +36,8 @@ public class NumSplits implements CustomMetric {
@Override
public String aggregateTaskMetrics(long[] taskMetrics) {
long sum = initialValue;
- for (int i = 0; i < taskMetrics.length; i++) {
- sum += taskMetrics[i];
+ for (long taskMetric : taskMetrics) {
+ sum += taskMetric;
}
return NumberFormat.getIntegerInstance().format(sum);
diff --git
a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
index 7dc697f9d1..024ce3a60c 100644
---
a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
+++
b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/data/TestSparkParquetReader.java
@@ -87,9 +87,9 @@ public class TestSparkParquetReader extends AvroDataTest {
.createReaderFunc(type -> SparkParquetReaders.buildReader(schema,
type))
.build()) {
Iterator<InternalRow> rows = reader.iterator();
- for (int i = 0; i < expected.size(); i += 1) {
+ for (GenericData.Record record : expected) {
Assert.assertTrue("Should have expected number of rows",
rows.hasNext());
- assertEqualsUnsafe(schema.asStruct(), expected.get(i), rows.next());
+ assertEqualsUnsafe(schema.asStruct(), record, rows.next());
}
Assert.assertFalse("Should not have extra rows", rows.hasNext());
}