This is an automated email from the ASF dual-hosted git repository.

lzljs3620320 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/paimon.git


The following commit(s) were added to refs/heads/master by this push:
     new db56793fbb [spark] Disable compaction for data evolution table (#6342)
db56793fbb is described below

commit db56793fbb2f580eb1f1090e8dbb0bb4f0c81e7c
Author: Juntao Zhang <[email protected]>
AuthorDate: Fri Oct 10 12:05:52 2025 +0800

    [spark] Disable compaction for data evolution table (#6342)
---
 docs/content/append-table/data-evolution.md               |  4 ++--
 .../apache/paimon/spark/procedure/CompactProcedure.java   |  3 +++
 .../org/apache/paimon/spark/sql/RowTrackingTestBase.scala | 15 +++++++++++++++
 3 files changed, 20 insertions(+), 2 deletions(-)

diff --git a/docs/content/append-table/data-evolution.md 
b/docs/content/append-table/data-evolution.md
index 7d3ff96a11..3cdf67da7e 100644
--- a/docs/content/append-table/data-evolution.md
+++ b/docs/content/append-table/data-evolution.md
@@ -67,7 +67,7 @@ ON t.id = s.id
 WHEN MATCHED THEN UPDATE SET t.b = s.b
 WHEN NOT MATCHED THEN INSERT (id, b, c) VALUES (id, b, 0);
 
-SELECT * FROM my_table;
+SELECT * FROM target_table;
 +----+----+----+
 | id | b  | c  |
 +----+----+----+
@@ -80,7 +80,7 @@ This statement updates only the `b` column in the target 
table `target_table` ba
 `source_table`. The `id` column and `c` column remain unchanged, and new 
records are inserted with the specified values. The difference between this and 
table those are not enabled with data evolution is that only the `b` column 
data is written to new files.
 
 Note that: 
-* Data Evolution Table does not support 'Delete' statement yet.
+* Data Evolution Table does not support 'Delete', 'Update', or 'Compact' 
statement yet.
 * Merge Into for Data Evolution Table does not support 'WHEN NOT MATCHED BY 
SOURCE' clause.
 
 ## Spec
diff --git 
a/paimon-spark/paimon-spark-common/src/main/java/org/apache/paimon/spark/procedure/CompactProcedure.java
 
b/paimon-spark/paimon-spark-common/src/main/java/org/apache/paimon/spark/procedure/CompactProcedure.java
index 7788a5a0be..2796b6deb6 100644
--- 
a/paimon-spark/paimon-spark-common/src/main/java/org/apache/paimon/spark/procedure/CompactProcedure.java
+++ 
b/paimon-spark/paimon-spark-common/src/main/java/org/apache/paimon/spark/procedure/CompactProcedure.java
@@ -191,6 +191,9 @@ public class CompactProcedure extends BaseProcedure {
                 tableIdent,
                 table -> {
                     checkArgument(table instanceof FileStoreTable);
+                    checkArgument(
+                            !((FileStoreTable) 
table).coreOptions().dataEvolutionEnabled(),
+                            "Compact operation is not supported when data 
evolution is enabled yet.");
                     checkArgument(
                             
sortColumns.stream().noneMatch(table.partitionKeys()::contains),
                             "order_by should not contain partition cols, 
because it is meaningless, your order_by cols are %s, and partition cols are 
%s",
diff --git 
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/RowTrackingTestBase.scala
 
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/RowTrackingTestBase.scala
index 422b51a10e..cda852f3a4 100644
--- 
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/RowTrackingTestBase.scala
+++ 
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/RowTrackingTestBase.scala
@@ -340,6 +340,21 @@ abstract class RowTrackingTestBase extends 
PaimonSparkTestBase {
     }
   }
 
+  test("Data Evolution: compact table throws exception") {
+    withTable("t") {
+      sql(
+        "CREATE TABLE t (id INT, b INT) TBLPROPERTIES ('row-tracking.enabled' 
= 'true', 'data-evolution.enabled' = 'true')")
+      for (i <- 1 to 6) {
+        sql(s"INSERT INTO t VALUES ($i, $i)")
+      }
+      assert(
+        intercept[RuntimeException] {
+          sql("CALL sys.compact(table => 't')")
+        }.getMessage
+          .contains("Compact operation is not supported when data evolution is 
enabled yet."))
+    }
+  }
+
   test("Data Evolution: delete table throws exception") {
     withTable("t") {
       sql(

Reply via email to