szehon-ho commented on code in PR #6344: URL: https://github.com/apache/iceberg/pull/6344#discussion_r1051211196
########## spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/ChangelogIterator.java: ########## @@ -0,0 +1,195 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.spark; + +import java.io.Serializable; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; +import org.apache.iceberg.ChangelogOperation; +import org.apache.iceberg.relocated.com.google.common.collect.Iterators; +import org.apache.spark.sql.Row; +import org.apache.spark.sql.RowFactory; +import org.apache.spark.sql.catalyst.expressions.GenericInternalRow; +import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema; + +/** + * An iterator that transforms rows from changelog tables within a single Spark task. It assumes + * that rows are sorted by identifier columns and change type. + * + * <p>It removes the carry-over rows. Carry-over rows are unchanged rows in a snapshot but showed as + * delete-rows and insert-rows in a changelog table due to the copy-on-write(COW) mechanism. For + * example, there are row1 (id=1, data='a') and row2 (id=2, data='b') in a data file, if we only + * delete row2, the COW will copy row1 to a new data file and delete the whole old data file. The + * changelog table will have two delete-rows(row1 and row2), and one insert-row(row1). Row1 is a + * carry-over row. + * + * <p>The iterator marks the delete-row and insert-row to be the update-rows. For example, these two + * rows + * + * <ul> + * <li>(id=1, data='a', op='DELETE') + * <li>(id=1, data='b', op='INSERT') + * </ul> + * + * <p>will be marked as update-rows: + * + * <ul> + * <li>(id=1, data='a', op='UPDATE_BEFORE') + * <li>(id=1, data='b', op='UPDATE_AFTER') + * </ul> + */ +public class ChangelogIterator implements Iterator<Row>, Serializable { + private static final String DELETE = ChangelogOperation.DELETE.name(); + private static final String INSERT = ChangelogOperation.INSERT.name(); + private static final String UPDATE_BEFORE = ChangelogOperation.UPDATE_BEFORE.name(); + private static final String UPDATE_AFTER = ChangelogOperation.UPDATE_AFTER.name(); + + private final Iterator<Row> rowIterator; + private final int changeTypeIndex; + private final List<Integer> partitionIdx; + + private Row cachedRow = null; + + private ChangelogIterator( + Iterator<Row> rowIterator, int changeTypeIndex, List<Integer> partitionIdx) { + this.rowIterator = rowIterator; + this.changeTypeIndex = changeTypeIndex; + this.partitionIdx = partitionIdx; + } + + public static Iterator<Row> iterator( + Iterator<Row> rowIterator, int changeTypeIndex, List<Integer> partitionIdx) { Review Comment: can we still change name from partitionIdx to identifierFieldIdx? Also a javadoc here on arguments will be useful. ########## spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/SparkTestBase.java: ########## @@ -112,11 +112,11 @@ protected List<Object[]> sql(String query, Object... args) { return rowsToJava(rows); } - protected List<Object[]> rowsToJava(List<Row> rows) { - return rows.stream().map(this::toJava).collect(Collectors.toList()); + public static List<Object[]> rowsToJava(List<Row> rows) { Review Comment: Style: I feel its messy now to have this class now as inherited and a util class. Especially some methods are changed to static and other methods are not, even though they call the static ones. What do you think? Maybe we can make a separate base class for the helper methods like 'SparkTestHelperBase' and have both SparkTestBase and your test inherit from it (to avoid changing all the tests)? ########## spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/ChangelogIterator.java: ########## @@ -0,0 +1,195 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.spark; + +import java.io.Serializable; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; +import org.apache.iceberg.ChangelogOperation; +import org.apache.iceberg.relocated.com.google.common.collect.Iterators; +import org.apache.spark.sql.Row; +import org.apache.spark.sql.RowFactory; +import org.apache.spark.sql.catalyst.expressions.GenericInternalRow; +import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema; + +/** + * An iterator that transforms rows from changelog tables within a single Spark task. It assumes + * that rows are sorted by identifier columns and change type. + * + * <p>It removes the carry-over rows. Carry-over rows are unchanged rows in a snapshot but showed as + * delete-rows and insert-rows in a changelog table due to the copy-on-write(COW) mechanism. For + * example, there are row1 (id=1, data='a') and row2 (id=2, data='b') in a data file, if we only + * delete row2, the COW will copy row1 to a new data file and delete the whole old data file. The Review Comment: NIt: 'whole' old file sounds strange, I would just omit it. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
