szehon-ho commented on code in PR #6344: URL: https://github.com/apache/iceberg/pull/6344#discussion_r1046346355
########## spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/ChangelogIterator.java: ########## @@ -0,0 +1,162 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.spark; + +import java.io.Serializable; +import java.util.Iterator; +import java.util.List; +import org.apache.iceberg.ChangelogOperation; +import org.apache.spark.sql.Row; +import org.apache.spark.sql.RowFactory; +import org.apache.spark.sql.catalyst.expressions.GenericInternalRow; +import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema; + +/** + * An iterator that transforms rows from changelog tables within a single Spark task. + * + * <p>It marks the carry-over rows to null to for filtering out later. Carry-over rows are unchanged + * rows in a snapshot but showed as delete-rows and insert-rows in a changelog table due to the + * copy-on-write(COW) mechanism. For example, there are row1 (id=1, data='a') and row2 (id=2, + * data='b') in a data file, if we only delete row2, the COW will copy row1 to a new data file and + * delete the whole old data file. The changelog table will have two delete-rows(row1 and row2), and + * one insert-row(row1). Row1 is a carry-over row. + * + * <p>The iterator marks the delete-row and insert-row to be the update-rows. For example, these two + * rows + * + * <ul> + * <li>(id=1, data='a', op='DELETE') + * <li>(id=1, data='b', op='INSERT') + * </ul> + * + * will be marked as update-rows: + * + * <ul> + * <li>(id=1, data='a', op='UPDATE_BEFORE') + * <li>(id=1, data='b', op='UPDATE_AFTER') + * </ul> + */ +public class ChangelogIterator implements Iterator<Row>, Serializable { + private static final String DELETE = ChangelogOperation.DELETE.name(); + private static final String INSERT = ChangelogOperation.INSERT.name(); + private static final String UPDATE_BEFORE = ChangelogOperation.UPDATE_BEFORE.name(); + private static final String UPDATE_AFTER = ChangelogOperation.UPDATE_AFTER.name(); + + private final Iterator<Row> rowIterator; + private final int changeTypeIndex; + private final List<Integer> partitionIdx; + + private Row cachedRow = null; + + public ChangelogIterator( + Iterator<Row> rowIterator, int changeTypeIndex, List<Integer> partitionIdx) { + this.rowIterator = rowIterator; + this.changeTypeIndex = changeTypeIndex; + this.partitionIdx = partitionIdx; + } + + @Override + public boolean hasNext() { + if (cachedRow != null) { + return true; + } + return rowIterator.hasNext(); + } + + @Override + public Row next() { + // if there is an updated cached row, return it directly + if (updated(cachedRow)) { + Row row = cachedRow; + cachedRow = null; + return row; + } + + Row currentRow = currentRow(); + + if (rowIterator.hasNext()) { + GenericRowWithSchema nextRow = (GenericRowWithSchema) rowIterator.next(); + cachedRow = nextRow; + + if (updateOrCarryoverRecord(currentRow, nextRow)) { + Row[] rows = update((GenericRowWithSchema) currentRow, nextRow); + + currentRow = rows[0]; + cachedRow = rows[1]; + } + } + + return currentRow; + } + + private Row[] update(GenericRowWithSchema currentRow, GenericRowWithSchema nextRow) { Review Comment: How about something like createUpdateChangelog() to be more descriptive? "update" sounds like it is updating something. ########## spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/ChangelogIterator.java: ########## @@ -0,0 +1,162 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.spark; + +import java.io.Serializable; +import java.util.Iterator; +import java.util.List; +import org.apache.iceberg.ChangelogOperation; +import org.apache.spark.sql.Row; +import org.apache.spark.sql.RowFactory; +import org.apache.spark.sql.catalyst.expressions.GenericInternalRow; +import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema; + +/** + * An iterator that transforms rows from changelog tables within a single Spark task. + * + * <p>It marks the carry-over rows to null to for filtering out later. Carry-over rows are unchanged + * rows in a snapshot but showed as delete-rows and insert-rows in a changelog table due to the + * copy-on-write(COW) mechanism. For example, there are row1 (id=1, data='a') and row2 (id=2, + * data='b') in a data file, if we only delete row2, the COW will copy row1 to a new data file and + * delete the whole old data file. The changelog table will have two delete-rows(row1 and row2), and + * one insert-row(row1). Row1 is a carry-over row. + * + * <p>The iterator marks the delete-row and insert-row to be the update-rows. For example, these two + * rows + * + * <ul> + * <li>(id=1, data='a', op='DELETE') + * <li>(id=1, data='b', op='INSERT') + * </ul> + * + * will be marked as update-rows: + * + * <ul> + * <li>(id=1, data='a', op='UPDATE_BEFORE') + * <li>(id=1, data='b', op='UPDATE_AFTER') + * </ul> + */ +public class ChangelogIterator implements Iterator<Row>, Serializable { + private static final String DELETE = ChangelogOperation.DELETE.name(); + private static final String INSERT = ChangelogOperation.INSERT.name(); + private static final String UPDATE_BEFORE = ChangelogOperation.UPDATE_BEFORE.name(); + private static final String UPDATE_AFTER = ChangelogOperation.UPDATE_AFTER.name(); + + private final Iterator<Row> rowIterator; + private final int changeTypeIndex; + private final List<Integer> partitionIdx; + + private Row cachedRow = null; + + public ChangelogIterator( + Iterator<Row> rowIterator, int changeTypeIndex, List<Integer> partitionIdx) { + this.rowIterator = rowIterator; + this.changeTypeIndex = changeTypeIndex; + this.partitionIdx = partitionIdx; + } + + @Override + public boolean hasNext() { + if (cachedRow != null) { + return true; + } + return rowIterator.hasNext(); + } + + @Override + public Row next() { + // if there is an updated cached row, return it directly + if (updated(cachedRow)) { + Row row = cachedRow; + cachedRow = null; + return row; + } + + Row currentRow = currentRow(); + + if (rowIterator.hasNext()) { + GenericRowWithSchema nextRow = (GenericRowWithSchema) rowIterator.next(); + cachedRow = nextRow; + + if (updateOrCarryoverRecord(currentRow, nextRow)) { + Row[] rows = update((GenericRowWithSchema) currentRow, nextRow); + + currentRow = rows[0]; + cachedRow = rows[1]; + } + } + + return currentRow; + } + + private Row[] update(GenericRowWithSchema currentRow, GenericRowWithSchema nextRow) { + GenericInternalRow deletedRow = new GenericInternalRow(currentRow.values()); + GenericInternalRow insertedRow = new GenericInternalRow(nextRow.values()); + + if (isCarryoverRecord(deletedRow, insertedRow)) { + // set carry-over rows to null for filtering out later + return new Row[] {null, null}; + } else { + deletedRow.update(changeTypeIndex, UPDATE_BEFORE); + insertedRow.update(changeTypeIndex, UPDATE_AFTER); + + return new Row[] { + RowFactory.create(deletedRow.values()), RowFactory.create(insertedRow.values()) + }; + } + } + + private boolean isCarryoverRecord(GenericInternalRow deletedRow, GenericInternalRow insertedRow) { + // set the change_type to the same value + deletedRow.update(changeTypeIndex, ""); Review Comment: Style: I still don't prefer to do a side-effect like this (mutate the variable in the method) just for comparison. Caller may be taken by surprise. One way is probably to construct deletedRow and insertedRow with the "" values to begin with. But I guess Java will be a bit verbose: ``` new GenericInternalRow(IntStream.range(0, row.values().length) .mapToObj(i -> (i == changeTypeIndex) ? "" : row.values()[i]) .toArray()) ``` Another option is to move the update("") back to the base method? ``` GenericInternalRow deletedRow = new GenericInternalRow(currentRow.values()); deletedRow.update(changeTypeIndex, ""); ``` At least in this case when you read the update() method, it is more clear what the state is without having to scroll to the helper methods. ########## spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/ChangelogIterator.java: ########## @@ -0,0 +1,162 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.spark; + +import java.io.Serializable; +import java.util.Iterator; +import java.util.List; +import org.apache.iceberg.ChangelogOperation; +import org.apache.spark.sql.Row; +import org.apache.spark.sql.RowFactory; +import org.apache.spark.sql.catalyst.expressions.GenericInternalRow; +import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema; + +/** + * An iterator that transforms rows from changelog tables within a single Spark task. + * + * <p>It marks the carry-over rows to null to for filtering out later. Carry-over rows are unchanged + * rows in a snapshot but showed as delete-rows and insert-rows in a changelog table due to the + * copy-on-write(COW) mechanism. For example, there are row1 (id=1, data='a') and row2 (id=2, Review Comment: Thanks for comment. Just wanted to check, why is it not possible for MOR? Data file + position delete of same row? ########## spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/ChangelogIterator.java: ########## @@ -0,0 +1,133 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.spark; + +import java.io.Serializable; +import java.util.Iterator; +import java.util.List; +import org.apache.iceberg.ChangelogOperation; +import org.apache.spark.sql.Row; +import org.apache.spark.sql.RowFactory; +import org.apache.spark.sql.catalyst.expressions.GenericInternalRow; +import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema; + +public class ChangelogIterator implements Iterator<Row>, Serializable { + private static final String DELETE = ChangelogOperation.DELETE.name(); + private static final String INSERT = ChangelogOperation.INSERT.name(); + + private final Iterator<Row> rowIterator; + private final int changeTypeIndex; + private final List<Integer> partitionIdx; + private final boolean markUpdatedRows; + + private Row cachedRow = null; + + public ChangelogIterator( + Iterator<Row> rowIterator, + int changeTypeIndex, + List<Integer> partitionIdx, + boolean markUpdatedRows) { + this.rowIterator = rowIterator; + this.changeTypeIndex = changeTypeIndex; + this.partitionIdx = partitionIdx; + this.markUpdatedRows = markUpdatedRows; + } + + @Override + public boolean hasNext() { + if (cachedRow != null) { + return true; + } + return rowIterator.hasNext(); + } + + @Override + public Row next() { + // if there is a processed cached row, return it directly + if (cachedRow != null + && !cachedRow.getString(changeTypeIndex).equals(DELETE) + && !cachedRow.getString(changeTypeIndex).equals(INSERT)) { + Row row = cachedRow; + cachedRow = null; + return row; + } + + Row currentRow = currentRow(); + + if (rowIterator.hasNext()) { + GenericRowWithSchema nextRow = (GenericRowWithSchema) rowIterator.next(); + + if (withinPartition(currentRow, nextRow) + && currentRow.getString(changeTypeIndex).equals(DELETE) + && nextRow.getString(changeTypeIndex).equals(INSERT)) { + + GenericInternalRow deletedRow = + new GenericInternalRow(((GenericRowWithSchema) currentRow).values()); + GenericInternalRow insertedRow = new GenericInternalRow(nextRow.values()); + + // set the change_type to the same value + deletedRow.update(changeTypeIndex, ""); + insertedRow.update(changeTypeIndex, ""); + + if (deletedRow.equals(insertedRow)) { + // remove two carry-over rows + currentRow = null; + this.cachedRow = null; + } else if (markUpdatedRows) { + // mark the updated rows + deletedRow.update(changeTypeIndex, ChangelogOperation.UPDATE_BEFORE.name()); + currentRow = RowFactory.create(deletedRow.values()); + + insertedRow.update(changeTypeIndex, ChangelogOperation.UPDATE_AFTER.name()); + this.cachedRow = RowFactory.create(insertedRow.values()); + } else { + // recover the values of change type + deletedRow.update(changeTypeIndex, DELETE); + insertedRow.update(changeTypeIndex, INSERT); + this.cachedRow = nextRow; + } + + } else { + this.cachedRow = nextRow; + } + } + + return currentRow; + } + + private Row currentRow() { + if (cachedRow != null) { + Row row = cachedRow; + cachedRow = null; + return row; + } else { + return rowIterator.next(); + } + } + + private boolean withinPartition(Row currentRow, Row nextRow) { Review Comment: Are we ok with the name 'partition'? Wont it be overloaded in Iceberg? ########## spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/ChangelogIterator.java: ########## @@ -0,0 +1,162 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.spark; + +import java.io.Serializable; +import java.util.Iterator; +import java.util.List; +import org.apache.iceberg.ChangelogOperation; +import org.apache.spark.sql.Row; +import org.apache.spark.sql.RowFactory; +import org.apache.spark.sql.catalyst.expressions.GenericInternalRow; +import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema; + +/** + * An iterator that transforms rows from changelog tables within a single Spark task. + * + * <p>It marks the carry-over rows to null to for filtering out later. Carry-over rows are unchanged + * rows in a snapshot but showed as delete-rows and insert-rows in a changelog table due to the + * copy-on-write(COW) mechanism. For example, there are row1 (id=1, data='a') and row2 (id=2, + * data='b') in a data file, if we only delete row2, the COW will copy row1 to a new data file and + * delete the whole old data file. The changelog table will have two delete-rows(row1 and row2), and + * one insert-row(row1). Row1 is a carry-over row. + * + * <p>The iterator marks the delete-row and insert-row to be the update-rows. For example, these two + * rows + * + * <ul> + * <li>(id=1, data='a', op='DELETE') + * <li>(id=1, data='b', op='INSERT') + * </ul> + * + * will be marked as update-rows: + * + * <ul> + * <li>(id=1, data='a', op='UPDATE_BEFORE') + * <li>(id=1, data='b', op='UPDATE_AFTER') + * </ul> + */ +public class ChangelogIterator implements Iterator<Row>, Serializable { + private static final String DELETE = ChangelogOperation.DELETE.name(); + private static final String INSERT = ChangelogOperation.INSERT.name(); + private static final String UPDATE_BEFORE = ChangelogOperation.UPDATE_BEFORE.name(); + private static final String UPDATE_AFTER = ChangelogOperation.UPDATE_AFTER.name(); + + private final Iterator<Row> rowIterator; + private final int changeTypeIndex; + private final List<Integer> partitionIdx; + + private Row cachedRow = null; + + public ChangelogIterator( + Iterator<Row> rowIterator, int changeTypeIndex, List<Integer> partitionIdx) { + this.rowIterator = rowIterator; + this.changeTypeIndex = changeTypeIndex; + this.partitionIdx = partitionIdx; + } + + @Override + public boolean hasNext() { + if (cachedRow != null) { + return true; + } + return rowIterator.hasNext(); + } + + @Override + public Row next() { + // if there is an updated cached row, return it directly + if (updated(cachedRow)) { Review Comment: How about 'cachedUpdateChangelog' ? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
