aokolnychyi commented on a change in pull request #2193:
URL: https://github.com/apache/iceberg/pull/2193#discussion_r568309937



##########
File path: 
spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/analysis/AlignUpdateTable.scala
##########
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.spark.sql.catalyst.analysis
+
+import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, UpdateTable}
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.internal.SQLConf
+
+case class AlignUpdateTable(conf: SQLConf) extends Rule[LogicalPlan] with 
AssignmentAlignmentSupport {
+
+  override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
+    case u: UpdateTable if u.resolved =>
+      u.copy(assignments = alignAssignments(u.table, u.assignments))

Review comment:
       We apply the same logic as for MERGE.

##########
File path: 
spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/optimizer/RewriteDelete.scala
##########
@@ -55,8 +54,6 @@ case class RewriteDelete(spark: SparkSession) extends 
Rule[LogicalPlan] with Rew
   import ExtendedDataSourceV2Implicits._
   import RewriteRowLevelOperationHelper._
 
-  override def resolver: Resolver = spark.sessionState.conf.resolver

Review comment:
       Moved this to parent as I need both `conf` and `resolver` now.

##########
File path: 
spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/optimizer/RewriteMergeInto.scala
##########
@@ -224,22 +216,6 @@ case class RewriteMergeInto(spark: SparkSession) extends 
Rule[LogicalPlan] with
     }
     !(actions.size == 1 && hasUnconditionalDelete(actions.headOption))
   }
-
-  private def buildWritePlan(childPlan: LogicalPlan, table: Table): 
LogicalPlan = {

Review comment:
       Moved to parent.

##########
File path: 
spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/optimizer/RewriteUpdate.scala
##########
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.spark.sql.catalyst.optimizer
+
+import org.apache.spark.sql.AnalysisException
+import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.expressions.Alias
+import org.apache.spark.sql.catalyst.expressions.Expression
+import org.apache.spark.sql.catalyst.expressions.If
+import org.apache.spark.sql.catalyst.expressions.SubqueryExpression
+import org.apache.spark.sql.catalyst.plans.logical.Assignment
+import org.apache.spark.sql.catalyst.plans.logical.Filter
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.catalyst.plans.logical.Project
+import org.apache.spark.sql.catalyst.plans.logical.ReplaceData
+import org.apache.spark.sql.catalyst.plans.logical.UpdateTable
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.catalyst.utils.PlanUtils.isIcebergRelation
+import org.apache.spark.sql.catalyst.utils.RewriteRowLevelOperationHelper
+import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
+import 
org.apache.spark.sql.execution.datasources.v2.ExtendedDataSourceV2Implicits
+
+// TODO: should be part of early scan push down after the delete condition is 
optimized
+case class RewriteUpdate(spark: SparkSession) extends Rule[LogicalPlan] with 
RewriteRowLevelOperationHelper {
+
+  import ExtendedDataSourceV2Implicits._
+
+  // TODO: can we do any better for no-op updates? when conditions evaluate to 
false/true?

Review comment:
       Right now, `UPDATE t SET ... WHERE false` with result in a job and a 
commit with no changes. We may want to handle such cases differently but I am 
not sure it is a big deal.

##########
File path: 
spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/optimizer/RewriteUpdate.scala
##########
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.spark.sql.catalyst.optimizer
+
+import org.apache.spark.sql.AnalysisException
+import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.expressions.Alias
+import org.apache.spark.sql.catalyst.expressions.Expression
+import org.apache.spark.sql.catalyst.expressions.If
+import org.apache.spark.sql.catalyst.expressions.SubqueryExpression
+import org.apache.spark.sql.catalyst.plans.logical.Assignment
+import org.apache.spark.sql.catalyst.plans.logical.Filter
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.catalyst.plans.logical.Project
+import org.apache.spark.sql.catalyst.plans.logical.ReplaceData
+import org.apache.spark.sql.catalyst.plans.logical.UpdateTable
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.catalyst.utils.PlanUtils.isIcebergRelation
+import org.apache.spark.sql.catalyst.utils.RewriteRowLevelOperationHelper
+import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
+import 
org.apache.spark.sql.execution.datasources.v2.ExtendedDataSourceV2Implicits
+
+// TODO: should be part of early scan push down after the delete condition is 
optimized
+case class RewriteUpdate(spark: SparkSession) extends Rule[LogicalPlan] with 
RewriteRowLevelOperationHelper {
+
+  import ExtendedDataSourceV2Implicits._
+
+  // TODO: can we do any better for no-op updates? when conditions evaluate to 
false/true?
+  override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
+    case UpdateTable(r: DataSourceV2Relation, assignments, Some(cond))
+        if isIcebergRelation(r) && SubqueryExpression.hasSubquery(cond) =>
+      throw new AnalysisException("UPDATE statements with subqueries are not 
currently supported")
+
+    case UpdateTable(r: DataSourceV2Relation, assignments, Some(cond)) if 
isIcebergRelation(r) =>
+      val writeInfo = newWriteInfo(r.schema)
+      val mergeBuilder = r.table.asMergeable.newMergeBuilder("update", 
writeInfo)
+
+      val matchingRowsPlanBuilder = scanRelation => Filter(cond, scanRelation)
+      val scanPlan = buildDynamicFilterScanPlan(spark, r.table, r.output, 
mergeBuilder, cond, matchingRowsPlanBuilder)
+
+      val updateProjection = buildUpdateProjection(r, scanPlan, assignments, 
cond)
+
+      val mergeWrite = mergeBuilder.asWriteBuilder.buildForBatch()
+      val writePlan = buildWritePlan(updateProjection, r.table)
+      ReplaceData(r, mergeWrite, writePlan)
+  }
+
+  private def buildUpdateProjection(
+      relation: DataSourceV2Relation,
+      scanPlan: LogicalPlan,
+      assignments: Seq[Assignment],
+      cond: Expression): LogicalPlan = {
+
+    // this method relies on the fact that the assignments have been aligned 
before
+    require(relation.output.size == assignments.size, "assignments must be 
aligned")
+
+    // Spark is going to execute the condition for each column but it seems we 
cannot avoid this

Review comment:
       Any ideas whether we can avoid this are welcome. I did check the 
generated code and the condition was evaluated for every column.

##########
File path: 
spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/optimizer/RewriteUpdate.scala
##########
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.spark.sql.catalyst.optimizer
+
+import org.apache.spark.sql.AnalysisException
+import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.expressions.Alias
+import org.apache.spark.sql.catalyst.expressions.Expression
+import org.apache.spark.sql.catalyst.expressions.If
+import org.apache.spark.sql.catalyst.expressions.SubqueryExpression
+import org.apache.spark.sql.catalyst.plans.logical.Assignment
+import org.apache.spark.sql.catalyst.plans.logical.Filter
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.catalyst.plans.logical.Project
+import org.apache.spark.sql.catalyst.plans.logical.ReplaceData
+import org.apache.spark.sql.catalyst.plans.logical.UpdateTable
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.catalyst.utils.PlanUtils.isIcebergRelation
+import org.apache.spark.sql.catalyst.utils.RewriteRowLevelOperationHelper
+import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
+import 
org.apache.spark.sql.execution.datasources.v2.ExtendedDataSourceV2Implicits
+
+// TODO: should be part of early scan push down after the delete condition is 
optimized
+case class RewriteUpdate(spark: SparkSession) extends Rule[LogicalPlan] with 
RewriteRowLevelOperationHelper {
+
+  import ExtendedDataSourceV2Implicits._
+
+  // TODO: can we do any better for no-op updates? when conditions evaluate to 
false/true?
+  override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
+    case UpdateTable(r: DataSourceV2Relation, assignments, Some(cond))
+        if isIcebergRelation(r) && SubqueryExpression.hasSubquery(cond) =>
+      throw new AnalysisException("UPDATE statements with subqueries are not 
currently supported")
+
+    case UpdateTable(r: DataSourceV2Relation, assignments, Some(cond)) if 
isIcebergRelation(r) =>
+      val writeInfo = newWriteInfo(r.schema)
+      val mergeBuilder = r.table.asMergeable.newMergeBuilder("update", 
writeInfo)
+
+      val matchingRowsPlanBuilder = scanRelation => Filter(cond, scanRelation)
+      val scanPlan = buildDynamicFilterScanPlan(spark, r.table, r.output, 
mergeBuilder, cond, matchingRowsPlanBuilder)
+
+      val updateProjection = buildUpdateProjection(r, scanPlan, assignments, 
cond)
+
+      val mergeWrite = mergeBuilder.asWriteBuilder.buildForBatch()
+      val writePlan = buildWritePlan(updateProjection, r.table)
+      ReplaceData(r, mergeWrite, writePlan)
+  }
+
+  private def buildUpdateProjection(
+      relation: DataSourceV2Relation,
+      scanPlan: LogicalPlan,
+      assignments: Seq[Assignment],
+      cond: Expression): LogicalPlan = {
+
+    // this method relies on the fact that the assignments have been aligned 
before
+    require(relation.output.size == assignments.size, "assignments must be 
aligned")
+
+    // Spark is going to execute the condition for each column but it seems we 
cannot avoid this

Review comment:
       Any ideas whether we can avoid this are welcome. I did check the 
generated code and the condition was evaluated for every column that is subject 
to change.

##########
File path: 
spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/utils/RewriteRowLevelOperationHelper.scala
##########
@@ -197,6 +203,22 @@ trait RewriteRowLevelOperationHelper extends 
PredicateHelper with Logging {
       }
     }
   }
+
+  protected def buildWritePlan(childPlan: LogicalPlan, table: Table): 
LogicalPlan = {

Review comment:
       Copied as it was.

##########
File path: 
spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/optimizer/RewriteUpdate.scala
##########
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.spark.sql.catalyst.optimizer
+
+import org.apache.spark.sql.AnalysisException
+import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.expressions.Alias
+import org.apache.spark.sql.catalyst.expressions.Expression
+import org.apache.spark.sql.catalyst.expressions.If
+import org.apache.spark.sql.catalyst.expressions.SubqueryExpression
+import org.apache.spark.sql.catalyst.plans.logical.Assignment
+import org.apache.spark.sql.catalyst.plans.logical.Filter
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.catalyst.plans.logical.Project
+import org.apache.spark.sql.catalyst.plans.logical.ReplaceData
+import org.apache.spark.sql.catalyst.plans.logical.UpdateTable
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.catalyst.utils.PlanUtils.isIcebergRelation
+import org.apache.spark.sql.catalyst.utils.RewriteRowLevelOperationHelper
+import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
+import 
org.apache.spark.sql.execution.datasources.v2.ExtendedDataSourceV2Implicits
+
+// TODO: should be part of early scan push down after the delete condition is 
optimized
+case class RewriteUpdate(spark: SparkSession) extends Rule[LogicalPlan] with 
RewriteRowLevelOperationHelper {
+
+  import ExtendedDataSourceV2Implicits._
+
+  // TODO: can we do any better for no-op updates? when conditions evaluate to 
false/true?
+  override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
+    case UpdateTable(r: DataSourceV2Relation, assignments, Some(cond))
+        if isIcebergRelation(r) && SubqueryExpression.hasSubquery(cond) =>
+      throw new AnalysisException("UPDATE statements with subqueries are not 
currently supported")

Review comment:
       Yeah, I am already looking into this. I did not want to add a condition 
to the branch below so that it can stay as is.

##########
File path: 
spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/optimizer/RewriteUpdate.scala
##########
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.spark.sql.catalyst.optimizer
+
+import org.apache.spark.sql.AnalysisException
+import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.expressions.Alias
+import org.apache.spark.sql.catalyst.expressions.Expression
+import org.apache.spark.sql.catalyst.expressions.If
+import org.apache.spark.sql.catalyst.expressions.SubqueryExpression
+import org.apache.spark.sql.catalyst.plans.logical.Assignment
+import org.apache.spark.sql.catalyst.plans.logical.Filter
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.catalyst.plans.logical.Project
+import org.apache.spark.sql.catalyst.plans.logical.ReplaceData
+import org.apache.spark.sql.catalyst.plans.logical.UpdateTable
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.catalyst.utils.PlanUtils.isIcebergRelation
+import org.apache.spark.sql.catalyst.utils.RewriteRowLevelOperationHelper
+import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
+import 
org.apache.spark.sql.execution.datasources.v2.ExtendedDataSourceV2Implicits
+
+// TODO: should be part of early scan push down after the delete condition is 
optimized
+case class RewriteUpdate(spark: SparkSession) extends Rule[LogicalPlan] with 
RewriteRowLevelOperationHelper {
+
+  import ExtendedDataSourceV2Implicits._
+
+  // TODO: can we do any better for no-op updates? when conditions evaluate to 
false/true?
+  override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
+    case UpdateTable(r: DataSourceV2Relation, assignments, Some(cond))
+        if isIcebergRelation(r) && SubqueryExpression.hasSubquery(cond) =>
+      throw new AnalysisException("UPDATE statements with subqueries are not 
currently supported")
+
+    case UpdateTable(r: DataSourceV2Relation, assignments, Some(cond)) if 
isIcebergRelation(r) =>
+      val writeInfo = newWriteInfo(r.schema)
+      val mergeBuilder = r.table.asMergeable.newMergeBuilder("update", 
writeInfo)
+
+      val matchingRowsPlanBuilder = scanRelation => Filter(cond, scanRelation)
+      val scanPlan = buildDynamicFilterScanPlan(spark, r.table, r.output, 
mergeBuilder, cond, matchingRowsPlanBuilder)
+
+      val updateProjection = buildUpdateProjection(r, scanPlan, assignments, 
cond)
+
+      val mergeWrite = mergeBuilder.asWriteBuilder.buildForBatch()
+      val writePlan = buildWritePlan(updateProjection, r.table)
+      ReplaceData(r, mergeWrite, writePlan)
+  }
+
+  private def buildUpdateProjection(
+      relation: DataSourceV2Relation,
+      scanPlan: LogicalPlan,
+      assignments: Seq[Assignment],
+      cond: Expression): LogicalPlan = {
+
+    // this method relies on the fact that the assignments have been aligned 
before
+    require(relation.output.size == assignments.size, "assignments must be 
aligned")
+
+    // Spark is going to execute the condition for each column but it seems we 
cannot avoid this

Review comment:
       You mean using `MergeInto` node? Let me add a TODO item here. I think we 
would need to test this out at some reasonable scale.

##########
File path: 
spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/optimizer/OptimizeConditionsInRowLevelOperations.scala
##########
@@ -35,6 +35,10 @@ object OptimizeConditionsInRowLevelOperations extends 
Rule[LogicalPlan] {
         if 
!SubqueryExpression.hasSubquery(cond.getOrElse(Literal.TrueLiteral)) && 
isIcebergRelation(table) =>
       val optimizedCond = 
optimizeCondition(cond.getOrElse(Literal.TrueLiteral), table)
       d.copy(condition = Some(optimizedCond))
+    case u @ UpdateTable(table, _, cond)

Review comment:
       Took another look. Not sure, actually. We need both branches to get 
`cond`, the main shared logic is in a separate method but I'd welcome any ideas 
I could overlook. 

##########
File path: 
spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/analysis/AlignUpdateTable.scala
##########
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.spark.sql.catalyst.analysis
+
+import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, UpdateTable}
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.internal.SQLConf
+
+case class AlignUpdateTable(conf: SQLConf) extends Rule[LogicalPlan] with 
AssignmentAlignmentSupport {
+
+  override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
+    case u: UpdateTable if u.resolved =>

Review comment:
       Good catch, I forgot to update it.

##########
File path: 
spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/optimizer/RewriteUpdate.scala
##########
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.spark.sql.catalyst.optimizer
+
+import org.apache.spark.sql.AnalysisException
+import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.expressions.Alias
+import org.apache.spark.sql.catalyst.expressions.Expression
+import org.apache.spark.sql.catalyst.expressions.If
+import org.apache.spark.sql.catalyst.expressions.SubqueryExpression
+import org.apache.spark.sql.catalyst.plans.logical.Assignment
+import org.apache.spark.sql.catalyst.plans.logical.Filter
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.catalyst.plans.logical.Project
+import org.apache.spark.sql.catalyst.plans.logical.ReplaceData
+import org.apache.spark.sql.catalyst.plans.logical.UpdateTable
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.catalyst.utils.PlanUtils.isIcebergRelation
+import org.apache.spark.sql.catalyst.utils.RewriteRowLevelOperationHelper
+import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
+import 
org.apache.spark.sql.execution.datasources.v2.ExtendedDataSourceV2Implicits
+
+// TODO: should be part of early scan push down after the delete condition is 
optimized
+case class RewriteUpdate(spark: SparkSession) extends Rule[LogicalPlan] with 
RewriteRowLevelOperationHelper {
+
+  import ExtendedDataSourceV2Implicits._
+
+  // TODO: can we do any better for no-op updates? when conditions evaluate to 
false/true?
+  override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
+    case UpdateTable(r: DataSourceV2Relation, assignments, Some(cond))
+        if isIcebergRelation(r) && SubqueryExpression.hasSubquery(cond) =>
+      throw new AnalysisException("UPDATE statements with subqueries are not 
currently supported")
+
+    case UpdateTable(r: DataSourceV2Relation, assignments, Some(cond)) if 
isIcebergRelation(r) =>
+      val writeInfo = newWriteInfo(r.schema)
+      val mergeBuilder = r.table.asMergeable.newMergeBuilder("update", 
writeInfo)
+
+      val matchingRowsPlanBuilder = scanRelation => Filter(cond, scanRelation)
+      val scanPlan = buildDynamicFilterScanPlan(spark, r.table, r.output, 
mergeBuilder, cond, matchingRowsPlanBuilder)
+
+      val updateProjection = buildUpdateProjection(r, scanPlan, assignments, 
cond)
+
+      val mergeWrite = mergeBuilder.asWriteBuilder.buildForBatch()
+      val writePlan = buildWritePlan(updateProjection, r.table)
+      ReplaceData(r, mergeWrite, writePlan)
+  }
+
+  private def buildUpdateProjection(
+      relation: DataSourceV2Relation,
+      scanPlan: LogicalPlan,
+      assignments: Seq[Assignment],
+      cond: Expression): LogicalPlan = {
+
+    // this method relies on the fact that the assignments have been aligned 
before
+    require(relation.output.size == assignments.size, "assignments must be 
aligned")
+
+    // Spark is going to execute the condition for each column but it seems we 
cannot avoid this

Review comment:
       @dilipbiswal @mehtaashish23 Do you have an existing infra for 
benchmarks? Will it be easy for you to try this?

##########
File path: 
spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/analysis/AlignUpdateTable.scala
##########
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.spark.sql.catalyst.analysis
+
+import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, UpdateTable}
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.internal.SQLConf
+
+case class AlignUpdateTable(conf: SQLConf) extends Rule[LogicalPlan] with 
AssignmentAlignmentSupport {
+
+  override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
+    case u: UpdateTable if u.resolved =>

Review comment:
       Fixed.

##########
File path: 
spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/optimizer/OptimizeConditionsInRowLevelOperations.scala
##########
@@ -21,7 +21,7 @@ package org.apache.spark.sql.catalyst.optimizer
 
 import org.apache.spark.sql.SparkSession
 import org.apache.spark.sql.catalyst.expressions.{Expression, Literal, 
SubqueryExpression}
-import org.apache.spark.sql.catalyst.plans.logical.{DeleteFromTable, Filter, 
LocalRelation, LogicalPlan}
+import org.apache.spark.sql.catalyst.plans.logical.{DeleteFromTable, Filter, 
LocalRelation, LogicalPlan, UpdateTable}

Review comment:
       Fixed.

##########
File path: 
spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/analysis/AlignUpdateTable.scala
##########
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.spark.sql.catalyst.analysis
+
+import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, UpdateTable}

Review comment:
       Fixed.

##########
File path: 
spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/optimizer/RewriteUpdate.scala
##########
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.spark.sql.catalyst.optimizer
+
+import org.apache.spark.sql.AnalysisException
+import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.expressions.Alias
+import org.apache.spark.sql.catalyst.expressions.Expression
+import org.apache.spark.sql.catalyst.expressions.If
+import org.apache.spark.sql.catalyst.expressions.SubqueryExpression
+import org.apache.spark.sql.catalyst.plans.logical.Assignment
+import org.apache.spark.sql.catalyst.plans.logical.Filter
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.catalyst.plans.logical.Project
+import org.apache.spark.sql.catalyst.plans.logical.ReplaceData
+import org.apache.spark.sql.catalyst.plans.logical.UpdateTable
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.catalyst.utils.PlanUtils.isIcebergRelation
+import org.apache.spark.sql.catalyst.utils.RewriteRowLevelOperationHelper
+import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
+import 
org.apache.spark.sql.execution.datasources.v2.ExtendedDataSourceV2Implicits
+
+// TODO: should be part of early scan push down after the delete condition is 
optimized
+case class RewriteUpdate(spark: SparkSession) extends Rule[LogicalPlan] with 
RewriteRowLevelOperationHelper {
+
+  import ExtendedDataSourceV2Implicits._
+
+  // TODO: can we do any better for no-op updates? when conditions evaluate to 
false/true?
+  override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
+    case UpdateTable(r: DataSourceV2Relation, assignments, Some(cond))
+        if isIcebergRelation(r) && SubqueryExpression.hasSubquery(cond) =>
+      throw new AnalysisException("UPDATE statements with subqueries are not 
currently supported")
+
+    case UpdateTable(r: DataSourceV2Relation, assignments, Some(cond)) if 
isIcebergRelation(r) =>
+      val writeInfo = newWriteInfo(r.schema)
+      val mergeBuilder = r.table.asMergeable.newMergeBuilder("update", 
writeInfo)
+
+      val matchingRowsPlanBuilder = scanRelation => Filter(cond, scanRelation)
+      val scanPlan = buildDynamicFilterScanPlan(spark, r.table, r.output, 
mergeBuilder, cond, matchingRowsPlanBuilder)
+
+      val updateProjection = buildUpdateProjection(r, scanPlan, assignments, 
cond)
+
+      val mergeWrite = mergeBuilder.asWriteBuilder.buildForBatch()
+      val writePlan = buildWritePlan(updateProjection, r.table)
+      ReplaceData(r, mergeWrite, writePlan)
+  }
+
+  private def buildUpdateProjection(
+      relation: DataSourceV2Relation,
+      scanPlan: LogicalPlan,
+      assignments: Seq[Assignment],
+      cond: Expression): LogicalPlan = {
+
+    // this method relies on the fact that the assignments have been aligned 
before
+    require(relation.output.size == assignments.size, "assignments must be 
aligned")
+
+    // Spark is going to execute the condition for each column but it seems we 
cannot avoid this

Review comment:
       @rdblue, yes, I confirm your thoughts as I tried that.

##########
File path: 
spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/analysis/AlignUpdateTable.scala
##########
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.spark.sql.catalyst.analysis
+
+import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, UpdateTable}
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.internal.SQLConf
+
+case class AlignUpdateTable(conf: SQLConf) extends Rule[LogicalPlan] with 
AssignmentAlignmentSupport {
+
+  override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
+    case u: UpdateTable if u.resolved =>
+      u.copy(assignments = alignAssignments(u.table, u.assignments))

Review comment:
       I have no preference. `AlignMergeIntoTable` is way more involved but 
adding updates there would be alright.

##########
File path: 
spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/optimizer/RewriteUpdate.scala
##########
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.spark.sql.catalyst.optimizer
+
+import org.apache.spark.sql.AnalysisException
+import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.expressions.Alias
+import org.apache.spark.sql.catalyst.expressions.Expression
+import org.apache.spark.sql.catalyst.expressions.If
+import org.apache.spark.sql.catalyst.expressions.SubqueryExpression
+import org.apache.spark.sql.catalyst.plans.logical.Assignment
+import org.apache.spark.sql.catalyst.plans.logical.Filter
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.catalyst.plans.logical.Project
+import org.apache.spark.sql.catalyst.plans.logical.ReplaceData
+import org.apache.spark.sql.catalyst.plans.logical.UpdateTable
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.catalyst.utils.PlanUtils.isIcebergRelation
+import org.apache.spark.sql.catalyst.utils.RewriteRowLevelOperationHelper
+import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
+import 
org.apache.spark.sql.execution.datasources.v2.ExtendedDataSourceV2Implicits
+
+// TODO: should be part of early scan push down after the delete condition is 
optimized

Review comment:
       Copy and paste from the delete rule, will remove.

##########
File path: 
spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/utils/RewriteRowLevelOperationHelper.scala
##########
@@ -197,6 +203,22 @@ trait RewriteRowLevelOperationHelper extends 
PredicateHelper with Logging {
       }
     }
   }
+
+  protected def buildWritePlan(childPlan: LogicalPlan, table: Table): 
LogicalPlan = {

Review comment:
       I think we may consider doing that too.

##########
File path: 
spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/optimizer/RewriteUpdate.scala
##########
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.spark.sql.catalyst.optimizer
+
+import org.apache.spark.sql.AnalysisException
+import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.expressions.Alias
+import org.apache.spark.sql.catalyst.expressions.Expression
+import org.apache.spark.sql.catalyst.expressions.If
+import org.apache.spark.sql.catalyst.expressions.SubqueryExpression
+import org.apache.spark.sql.catalyst.plans.logical.Assignment
+import org.apache.spark.sql.catalyst.plans.logical.Filter
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.catalyst.plans.logical.Project
+import org.apache.spark.sql.catalyst.plans.logical.ReplaceData
+import org.apache.spark.sql.catalyst.plans.logical.UpdateTable
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.catalyst.utils.PlanUtils.isIcebergRelation
+import org.apache.spark.sql.catalyst.utils.RewriteRowLevelOperationHelper
+import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
+import 
org.apache.spark.sql.execution.datasources.v2.ExtendedDataSourceV2Implicits
+
+// TODO: should be part of early scan push down after the delete condition is 
optimized
+case class RewriteUpdate(spark: SparkSession) extends Rule[LogicalPlan] with 
RewriteRowLevelOperationHelper {
+
+  import ExtendedDataSourceV2Implicits._
+
+  // TODO: can we do any better for no-op updates? when conditions evaluate to 
false/true?
+  override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
+    case UpdateTable(r: DataSourceV2Relation, assignments, Some(cond))
+        if isIcebergRelation(r) && SubqueryExpression.hasSubquery(cond) =>
+      throw new AnalysisException("UPDATE statements with subqueries are not 
currently supported")
+
+    case UpdateTable(r: DataSourceV2Relation, assignments, Some(cond)) if 
isIcebergRelation(r) =>
+      val writeInfo = newWriteInfo(r.schema)
+      val mergeBuilder = r.table.asMergeable.newMergeBuilder("update", 
writeInfo)
+
+      val matchingRowsPlanBuilder = scanRelation => Filter(cond, scanRelation)
+      val scanPlan = buildDynamicFilterScanPlan(spark, r.table, r.output, 
mergeBuilder, cond, matchingRowsPlanBuilder)
+
+      val updateProjection = buildUpdateProjection(r, scanPlan, assignments, 
cond)
+
+      val mergeWrite = mergeBuilder.asWriteBuilder.buildForBatch()
+      val writePlan = buildWritePlan(updateProjection, r.table)
+      ReplaceData(r, mergeWrite, writePlan)
+  }
+
+  private def buildUpdateProjection(
+      relation: DataSourceV2Relation,
+      scanPlan: LogicalPlan,
+      assignments: Seq[Assignment],
+      cond: Expression): LogicalPlan = {
+
+    // this method relies on the fact that the assignments have been aligned 
before
+    require(relation.output.size == assignments.size, "assignments must be 
aligned")
+
+    // Spark is going to execute the condition for each column but it seems we 
cannot avoid this

Review comment:
       I meant if we have an alternate implementation to what is currently 
proposed, will it be possible to perform tests at some reasonable scale to see 
if that alternative solution works better?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to