This is an automated email from the ASF dual-hosted git repository.

lzljs3620320 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/paimon.git


The following commit(s) were added to refs/heads/master by this push:
     new a54d7c5a72 [hotfix] Remove useless method in AbstractFileStoreWrite
a54d7c5a72 is described below

commit a54d7c5a72eefd81b87d3c8904f6345afaffa6ed
Author: JingsongLi <[email protected]>
AuthorDate: Sun Jun 22 14:49:06 2025 +0800

    [hotfix] Remove useless method in AbstractFileStoreWrite
---
 .../org/apache/paimon/operation/AbstractFileStoreWrite.java  | 12 ++----------
 .../java/org/apache/paimon/operation/FileStoreWrite.java     |  4 ----
 2 files changed, 2 insertions(+), 14 deletions(-)

diff --git 
a/paimon-core/src/main/java/org/apache/paimon/operation/AbstractFileStoreWrite.java
 
b/paimon-core/src/main/java/org/apache/paimon/operation/AbstractFileStoreWrite.java
index a7bac3f12a..d477fc8e12 100644
--- 
a/paimon-core/src/main/java/org/apache/paimon/operation/AbstractFileStoreWrite.java
+++ 
b/paimon-core/src/main/java/org/apache/paimon/operation/AbstractFileStoreWrite.java
@@ -427,11 +427,9 @@ public abstract class AbstractFileStoreWrite<T> implements 
FileStoreWrite<T> {
         Snapshot previousSnapshot =
                 ignorePreviousFiles ? null : 
snapshotManager.latestSnapshotFromFileSystem();
         List<DataFileMeta> restoreFiles = new ArrayList<>();
-        int totalBuckets;
+        int totalBuckets = numBuckets;
         if (previousSnapshot != null) {
             totalBuckets = scanExistingFileMetas(previousSnapshot, partition, 
bucket, restoreFiles);
-        } else {
-            totalBuckets = getDefaultBucketNum(partition);
         }
 
         IndexMaintainer<T> indexMaintainer =
@@ -478,7 +476,7 @@ public abstract class AbstractFileStoreWrite<T> implements 
FileStoreWrite<T> {
             List<DataFileMeta> existingFileMetas) {
         List<ManifestEntry> files =
                 scan.withSnapshot(snapshot).withPartitionBucket(partition, 
bucket).plan().files();
-        int totalBuckets = getDefaultBucketNum(partition);
+        int totalBuckets = numBuckets;
         for (ManifestEntry entry : files) {
             if (!ignoreNumBucketCheck && entry.totalBuckets() != numBuckets) {
                 String partInfo =
@@ -502,12 +500,6 @@ public abstract class AbstractFileStoreWrite<T> implements 
FileStoreWrite<T> {
         return totalBuckets;
     }
 
-    // TODO see comments on FileStoreWrite#withIgnoreNumBucketCheck for what 
is needed to support
-    //  writing partitions with different buckets
-    public int getDefaultBucketNum(BinaryRow partition) {
-        return numBuckets;
-    }
-
     private ExecutorService compactExecutor() {
         if (lazyCompactExecutor == null) {
             lazyCompactExecutor =
diff --git 
a/paimon-core/src/main/java/org/apache/paimon/operation/FileStoreWrite.java 
b/paimon-core/src/main/java/org/apache/paimon/operation/FileStoreWrite.java
index dd7c463061..71d42f8a66 100644
--- a/paimon-core/src/main/java/org/apache/paimon/operation/FileStoreWrite.java
+++ b/paimon-core/src/main/java/org/apache/paimon/operation/FileStoreWrite.java
@@ -76,10 +76,6 @@ public interface FileStoreWrite<T> extends 
Restorable<List<FileStoreWrite.State<
     /**
      * Ignores the check that the written partition must have the same number 
of buckets with the
      * table option.
-     *
-     * <p>TODO: to support writing partitions with different total buckets, 
we'll also need a
-     * special {@link org.apache.paimon.table.sink.ChannelComputer} and {@link
-     * org.apache.paimon.table.sink.KeyAndBucketExtractor} to deal with 
different bucket numbers.
      */
     void withIgnoreNumBucketCheck(boolean ignoreNumBucketCheck);
 

Reply via email to