This is an automated email from the ASF dual-hosted git repository.
lzljs3620320 pushed a commit to branch release-1.3
in repository https://gitbox.apache.org/repos/asf/paimon.git
The following commit(s) were added to refs/heads/release-1.3 by this push:
new ca48db65a6 [hotfix] Fix compile error in CompactAction
ca48db65a6 is described below
commit ca48db65a61044b7b35f2cfe7ae7d213a808f878
Author: JingsongLi <[email protected]>
AuthorDate: Tue Oct 14 10:45:46 2025 +0200
[hotfix] Fix compile error in CompactAction
---
.../org/apache/paimon/flink/action/CompactAction.java | 16 ++--------------
1 file changed, 2 insertions(+), 14 deletions(-)
diff --git
a/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/action/CompactAction.java
b/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/action/CompactAction.java
index 4d514379ac..ce30ed5685 100644
---
a/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/action/CompactAction.java
+++
b/paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/action/CompactAction.java
@@ -250,14 +250,7 @@ public class CompactAction extends TableActionBase {
LOGGER.info(
"No partition needs to be incrementally clustered. "
+ "Please set '--compact_strategy full' if you
need to forcibly trigger the cluster.");
- if (this.forceStartFlinkJob) {
- env.fromSequence(0, 0)
- .name("Nothing to Cluster Source")
- .sinkTo(new DiscardingSink<>());
- return true;
- } else {
- return false;
- }
+ return false;
}
Map<BinaryRow, DataSplit[]> partitionSplits =
compactUnits.entrySet().stream()
@@ -307,14 +300,9 @@ public class CompactAction extends TableActionBase {
// 2.3 write and then reorganize the committable
// set parallelism to null, and it'll forward parallelism when
doWrite()
RowAppendTableSink sink = new RowAppendTableSink(table, null,
null, null);
- boolean blobAsDescriptor = table.coreOptions().blobAsDescriptor();
DataStream<Committable> clusterCommittable =
sink.doWrite(
- FlinkSinkBuilder.mapToInternalRow(
- sorted,
- table.rowType(),
- blobAsDescriptor,
-
table.catalogEnvironment().catalogContext()),
+ FlinkSinkBuilder.mapToInternalRow(sorted,
table.rowType()),
commitUser,
null)
.transform(