This is an automated email from the ASF dual-hosted git repository.
biyan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/paimon.git
The following commit(s) were added to refs/heads/master by this push:
new b2ef40ef94 [spark] Refactor DataSourceV2 fallback checks into reusable
helper methods (#7130)
b2ef40ef94 is described below
commit b2ef40ef94b82cfff58667826a6357ddb1c739e4
Author: Kerwin Zhang <[email protected]>
AuthorDate: Tue Jan 27 17:24:41 2026 +0800
[spark] Refactor DataSourceV2 fallback checks into reusable helper methods
(#7130)
---
.../catalyst/analysis/PaimonDeleteTable.scala | 15 --------------
.../spark/catalyst/analysis/RowLevelHelper.scala | 24 +++++++++++++++++++++-
2 files changed, 23 insertions(+), 16 deletions(-)
diff --git
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/catalyst/analysis/PaimonDeleteTable.scala
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/catalyst/analysis/PaimonDeleteTable.scala
index 6808e64c45..46297ec7a6 100644
---
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/catalyst/analysis/PaimonDeleteTable.scala
+++
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/catalyst/analysis/PaimonDeleteTable.scala
@@ -29,21 +29,6 @@ import org.apache.spark.sql.catalyst.rules.Rule
object PaimonDeleteTable extends Rule[LogicalPlan] with RowLevelHelper {
- /** Determines if DataSourceV2 delete is not supported for the given table.
*/
- private def shouldFallbackToV1Delete(table: SparkTable, condition:
Expression): Boolean = {
- val baseTable = table.getTable
- org.apache.spark.SPARK_VERSION < "3.5" ||
- !baseTable.isInstanceOf[FileStoreTable] ||
- !baseTable.primaryKeys().isEmpty ||
- !table.useV2Write ||
- table.coreOptions.deletionVectorsEnabled() ||
- table.coreOptions.rowTrackingEnabled() ||
- table.coreOptions.dataEvolutionEnabled() ||
- OptimizeMetadataOnlyDeleteFromPaimonTable.isMetadataOnlyDelete(
- baseTable.asInstanceOf[FileStoreTable],
- condition)
- }
-
override val operation: RowLevelOp = Delete
override def apply(plan: LogicalPlan): LogicalPlan = {
diff --git
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/catalyst/analysis/RowLevelHelper.scala
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/catalyst/analysis/RowLevelHelper.scala
index eecf0542e1..b41ceed627 100644
---
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/catalyst/analysis/RowLevelHelper.scala
+++
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/catalyst/analysis/RowLevelHelper.scala
@@ -18,7 +18,9 @@
package org.apache.paimon.spark.catalyst.analysis
-import org.apache.paimon.table.Table
+import org.apache.paimon.spark.SparkTable
+import
org.apache.paimon.spark.catalyst.optimizer.OptimizeMetadataOnlyDeleteFromPaimonTable
+import org.apache.paimon.table.{FileStoreTable, Table}
import org.apache.spark.sql.catalyst.SQLConfHelper
import org.apache.spark.sql.catalyst.expressions.{AttributeReference,
AttributeSet, BinaryExpression, EqualTo, Expression, SubqueryExpression}
@@ -73,4 +75,24 @@ trait RowLevelHelper extends SQLConfHelper {
case _ => false
}
}
+
+ /** Determines if DataSourceV2 is not supported for the given table. */
+ protected def shouldFallbackToV1(table: SparkTable): Boolean = {
+ val baseTable = table.getTable
+ org.apache.spark.SPARK_VERSION < "3.5" ||
+ !baseTable.isInstanceOf[FileStoreTable] ||
+ !baseTable.primaryKeys().isEmpty ||
+ !table.useV2Write ||
+ table.coreOptions.deletionVectorsEnabled() ||
+ table.coreOptions.rowTrackingEnabled() ||
+ table.coreOptions.dataEvolutionEnabled()
+ }
+
+ /** Determines if DataSourceV2 delete is not supported for the given table.
*/
+ protected def shouldFallbackToV1Delete(table: SparkTable, condition:
Expression): Boolean = {
+ shouldFallbackToV1(table) ||
+ OptimizeMetadataOnlyDeleteFromPaimonTable.isMetadataOnlyDelete(
+ table.getTable.asInstanceOf[FileStoreTable],
+ condition)
+ }
}