This is an automated email from the ASF dual-hosted git repository.
lzljs3620320 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/paimon.git
The following commit(s) were added to refs/heads/master by this push:
new c4a7dde0b4 [core] Don't use avro in postpone bucket tables when schema
contains unsupported types (#5733)
c4a7dde0b4 is described below
commit c4a7dde0b49bf54576cc74846c9ee087a3bd1133
Author: tsreaper <[email protected]>
AuthorDate: Wed Jun 11 17:26:01 2025 +0800
[core] Don't use avro in postpone bucket tables when schema contains
unsupported types (#5733)
---
.../postpone/PostponeBucketFileStoreWrite.java | 11 +++++--
.../paimon/flink/PostponeBucketTableITCase.java | 34 ++++++++++++++++++++++
2 files changed, 43 insertions(+), 2 deletions(-)
diff --git
a/paimon-core/src/main/java/org/apache/paimon/postpone/PostponeBucketFileStoreWrite.java
b/paimon-core/src/main/java/org/apache/paimon/postpone/PostponeBucketFileStoreWrite.java
index 0bc6c48470..ccd4ac6fe3 100644
---
a/paimon-core/src/main/java/org/apache/paimon/postpone/PostponeBucketFileStoreWrite.java
+++
b/paimon-core/src/main/java/org/apache/paimon/postpone/PostponeBucketFileStoreWrite.java
@@ -23,6 +23,7 @@ import org.apache.paimon.KeyValue;
import org.apache.paimon.data.BinaryRow;
import org.apache.paimon.deletionvectors.DeletionVectorsMaintainer;
import org.apache.paimon.format.FileFormat;
+import org.apache.paimon.format.avro.AvroSchemaConverter;
import org.apache.paimon.fs.FileIO;
import org.apache.paimon.io.DataFileMeta;
import org.apache.paimon.io.KeyValueFileWriterFactory;
@@ -40,6 +41,7 @@ import org.apache.paimon.utils.SnapshotManager;
import javax.annotation.Nullable;
+import java.util.HashMap;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ThreadLocalRandom;
@@ -71,8 +73,13 @@ public class PostponeBucketFileStoreWrite extends
AbstractFileStoreWrite<KeyValu
super(snapshotManager, scan, null, null, tableName, options,
partitionType);
Options newOptions = new Options(options.toMap());
- // use avro for postpone bucket
- newOptions.set(CoreOptions.FILE_FORMAT, "avro");
+ try {
+ // use avro for postpone bucket
+ AvroSchemaConverter.convertToSchema(schema.logicalRowType(), new
HashMap<>());
+ newOptions.set(CoreOptions.FILE_FORMAT, "avro");
+ } catch (Exception e) {
+ // ignored, avro does not support certain types in schema
+ }
newOptions.set(CoreOptions.METADATA_STATS_MODE, "none");
// Each writer should have its unique prefix, so files from the same
writer can be consumed
// by the same compaction reader to keep the input order.
diff --git
a/paimon-flink/paimon-flink-common/src/test/java/org/apache/paimon/flink/PostponeBucketTableITCase.java
b/paimon-flink/paimon-flink-common/src/test/java/org/apache/paimon/flink/PostponeBucketTableITCase.java
index 694ba66e50..cf319ac1c2 100644
---
a/paimon-flink/paimon-flink-common/src/test/java/org/apache/paimon/flink/PostponeBucketTableITCase.java
+++
b/paimon-flink/paimon-flink-common/src/test/java/org/apache/paimon/flink/PostponeBucketTableITCase.java
@@ -629,6 +629,40 @@ public class PostponeBucketTableITCase extends
AbstractTestBase {
.containsExactlyInAnyOrder("+I[1, 40]");
}
+ @Test
+ public void testAvroUnsupportedTypes() throws Exception {
+ String warehouse = getTempDirPath();
+ TableEnvironment tEnv =
+ tableEnvironmentBuilder()
+ .batchMode()
+ .setConf(TableConfigOptions.TABLE_DML_SYNC, true)
+ .build();
+
+ tEnv.executeSql(
+ "CREATE CATALOG mycat WITH (\n"
+ + " 'type' = 'paimon',\n"
+ + " 'warehouse' = '"
+ + warehouse
+ + "'\n"
+ + ")");
+ tEnv.executeSql("USE CATALOG mycat");
+ tEnv.executeSql(
+ "CREATE TABLE T (\n"
+ + " k INT,\n"
+ + " v TIMESTAMP(9),\n"
+ + " PRIMARY KEY (k) NOT ENFORCED\n"
+ + ") WITH (\n"
+ + " 'bucket' = '-2'\n"
+ + ")");
+
+ tEnv.executeSql(
+ "INSERT INTO T VALUES (1, TIMESTAMP '2025-06-11
16:35:45.123456789'), (2, CAST(NULL AS TIMESTAMP(9)))")
+ .await();
+ tEnv.executeSql("CALL sys.compact(`table` => 'default.T')").await();
+ assertThat(collect(tEnv.executeSql("SELECT * FROM T")))
+ .containsExactlyInAnyOrder("+I[1,
2025-06-11T16:35:45.123456789]", "+I[2, null]");
+ }
+
private List<String> collect(TableResult result) throws Exception {
List<String> ret = new ArrayList<>();
try (CloseableIterator<Row> it = result.collect()) {