hailin0 commented on code in PR #7529:
URL: https://github.com/apache/seatunnel/pull/7529#discussion_r1850123590


##########
seatunnel-e2e/seatunnel-connector-v2-e2e/connector-clickhouse-e2e/src/test/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/ClickhouseIT.java:
##########
@@ -107,6 +112,27 @@ public void testSourceParallelism(TestContainer container) 
throws Exception {
         Assertions.assertEquals(0, execResult.getExitCode());
     }
 
+    @TestTemplate
+    @DisabledOnContainer(
+            value = {},
+            type = {EngineType.SPARK, EngineType.FLINK},
+            disabledReason = "The multi-catalog does not currently support the 
Spark Flink engine")

Review Comment:
   Current, flink and spark supported multi-tables



##########
seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/source/ClickhouseSource.java:
##########
@@ -17,70 +17,217 @@
 
 package org.apache.seatunnel.connectors.seatunnel.clickhouse.source;
 
+import org.apache.seatunnel.shade.com.typesafe.config.Config;
+import org.apache.seatunnel.shade.com.typesafe.config.ConfigFactory;
+
+import org.apache.seatunnel.api.common.PrepareFailException;
+import org.apache.seatunnel.api.common.SeaTunnelAPIErrorCode;
+import org.apache.seatunnel.api.configuration.ReadonlyConfig;
 import org.apache.seatunnel.api.source.Boundedness;
 import org.apache.seatunnel.api.source.SeaTunnelSource;
 import org.apache.seatunnel.api.source.SourceReader;
 import org.apache.seatunnel.api.source.SourceSplitEnumerator;
 import org.apache.seatunnel.api.source.SupportColumnProjection;
 import org.apache.seatunnel.api.source.SupportParallelism;
 import org.apache.seatunnel.api.table.catalog.CatalogTable;
+import org.apache.seatunnel.api.table.catalog.CatalogTableUtil;
+import org.apache.seatunnel.api.table.catalog.PhysicalColumn;
+import org.apache.seatunnel.api.table.catalog.TableIdentifier;
+import org.apache.seatunnel.api.table.catalog.TablePath;
+import org.apache.seatunnel.api.table.catalog.TableSchema;
+import org.apache.seatunnel.api.table.type.SeaTunnelDataType;
 import org.apache.seatunnel.api.table.type.SeaTunnelRow;
+import org.apache.seatunnel.api.table.type.SeaTunnelRowType;
+import org.apache.seatunnel.common.config.CheckConfigUtil;
+import org.apache.seatunnel.common.constants.PluginType;
+import org.apache.seatunnel.common.utils.ExceptionUtils;
+import 
org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseCatalogConfig;
+import 
org.apache.seatunnel.connectors.seatunnel.clickhouse.exception.ClickhouseConnectorException;
 import 
org.apache.seatunnel.connectors.seatunnel.clickhouse.state.ClickhouseSourceState;
+import 
org.apache.seatunnel.connectors.seatunnel.clickhouse.util.ClickhouseUtil;
+import 
org.apache.seatunnel.connectors.seatunnel.clickhouse.util.TypeConvertUtil;
+
+import org.apache.commons.collections4.map.HashedMap;
 
+import com.clickhouse.client.ClickHouseClient;
+import com.clickhouse.client.ClickHouseException;
+import com.clickhouse.client.ClickHouseFormat;
 import com.clickhouse.client.ClickHouseNode;
+import com.clickhouse.client.ClickHouseResponse;
+import com.google.auto.service.AutoService;
 
 import java.util.Collections;
 import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.stream.Collectors;
+
+import static 
org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.CLICKHOUSE_CONFIG;
+import static 
org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.DATABASE;
+import static 
org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.HOST;
+import static 
org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.PASSWORD;
+import static 
org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.SERVER_TIME_ZONE;
+import static 
org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.SQL;
+import static 
org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.TABLE_LIST;
+import static 
org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.TABLE_PATH;
+import static 
org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.USERNAME;
 
+@AutoService(SeaTunnelSource.class)
 public class ClickhouseSource
         implements SeaTunnelSource<SeaTunnelRow, ClickhouseSourceSplit, 
ClickhouseSourceState>,
                 SupportParallelism,
                 SupportColumnProjection {
 
     private List<ClickHouseNode> servers;
-    private CatalogTable catalogTable;
-    private String sql;
+    private Map<TablePath, ClickhouseCatalogConfig> 
tableClickhouseCatalogConfigMap =
+            new HashedMap<>();
 
-    public ClickhouseSource(List<ClickHouseNode> servers, CatalogTable 
catalogTable, String sql) {
-        this.servers = servers;
-        this.catalogTable = catalogTable;
-        this.sql = sql;
-    }
+    private static final String defaultTablePath = "default";
 
     @Override
     public String getPluginName() {
         return "Clickhouse";
     }
 
+    @Override
+    public void prepare(Config config) throws PrepareFailException {
+        config =
+                config.withFallback(
+                        ConfigFactory.parseMap(
+                                Collections.singletonMap(
+                                        SERVER_TIME_ZONE.key(), 
SERVER_TIME_ZONE.defaultValue())));
+
+        Map<String, String> customConfig =
+                CheckConfigUtil.isValidParam(config, CLICKHOUSE_CONFIG.key())
+                        ? 
config.getObject(CLICKHOUSE_CONFIG.key()).entrySet().stream()
+                                .collect(
+                                        Collectors.toMap(
+                                                Map.Entry::getKey,
+                                                entry -> 
entry.getValue().unwrapped().toString()))
+                        : null;
+
+        servers =
+                ClickhouseUtil.createNodes(
+                        config.getString(HOST.key()),
+                        config.getString(DATABASE.key()),
+                        config.getString(SERVER_TIME_ZONE.key()),
+                        config.getString(USERNAME.key()),
+                        config.getString(PASSWORD.key()),
+                        customConfig);
+
+        ClickHouseNode currentServer =
+                
servers.get(ThreadLocalRandom.current().nextInt(servers.size()));
+
+        ReadonlyConfig readonlyConfig = ReadonlyConfig.fromConfig(config);
+        String sql = readonlyConfig.getOptional(SQL).orElse(null);
+
+        if (readonlyConfig.getOptional(TABLE_LIST).isPresent()) {
+            readonlyConfig.get(TABLE_LIST).stream()
+                    .map(ReadonlyConfig::fromMap)
+                    .forEach(
+                            conf -> {
+                                String confSql = conf.getOptional(SQL).get();
+                                SeaTunnelRowType clickhouseRowType =
+                                        getClickhouseRowType(currentServer, 
confSql);
+                                TablePath tablePath =
+                                        
TablePath.of(conf.getOptional(TABLE_PATH).orElse(""));
+                                CatalogTable catalogTable =
+                                        createCatalogTable(clickhouseRowType, 
tablePath);
+
+                                ClickhouseCatalogConfig 
clickhouseCatalogConfig =
+                                        new ClickhouseCatalogConfig();
+                                clickhouseCatalogConfig.setSql(confSql);
+                                
clickhouseCatalogConfig.setCatalogTable(catalogTable);
+                                tableClickhouseCatalogConfigMap.put(
+                                        tablePath, clickhouseCatalogConfig);
+                            });
+        } else {
+            SeaTunnelRowType clickhouseRowType = 
getClickhouseRowType(currentServer, sql);
+            CatalogTable catalogTable =
+                    CatalogTableUtil.getCatalogTable(defaultTablePath, 
clickhouseRowType);
+
+            ClickhouseCatalogConfig clickhouseCatalogConfig = new 
ClickhouseCatalogConfig();
+            clickhouseCatalogConfig.setCatalogTable(catalogTable);
+            clickhouseCatalogConfig.setSql(sql);
+            tableClickhouseCatalogConfigMap.put(
+                    TablePath.of(defaultTablePath), clickhouseCatalogConfig);
+        }
+    }
+
+    private CatalogTable createCatalogTable(SeaTunnelRowType rowType, 
TablePath tablePath) {

Review Comment:
   why not use `CatalogTableUtil.convertDataTypeToCatalogTables()`



##########
seatunnel-e2e/seatunnel-connector-v2-e2e/connector-clickhouse-e2e/src/test/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/ClickhouseIT.java:
##########
@@ -107,6 +112,27 @@ public void testSourceParallelism(TestContainer container) 
throws Exception {
         Assertions.assertEquals(0, execResult.getExitCode());
     }
 
+    @TestTemplate
+    @DisabledOnContainer(
+            value = {},
+            type = {EngineType.SPARK, EngineType.FLINK},
+            disabledReason = "The multi-catalog does not currently support the 
Spark Flink engine")
+    public void testClickhouseMultiSource(TestContainer container) throws 
Exception {
+        Container.ExecResult execResult = 
container.executeJob(CLICKHOUSE_MULTI_LIST_TABLE_CONFIG);
+        Assertions.assertEquals(0, execResult.getExitCode());
+
+        Thread.sleep(3000);

Review Comment:
   why sleep?



##########
seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/source/ClickhouseSource.java:
##########
@@ -17,70 +17,217 @@
 
 package org.apache.seatunnel.connectors.seatunnel.clickhouse.source;
 
+import org.apache.seatunnel.shade.com.typesafe.config.Config;
+import org.apache.seatunnel.shade.com.typesafe.config.ConfigFactory;
+
+import org.apache.seatunnel.api.common.PrepareFailException;
+import org.apache.seatunnel.api.common.SeaTunnelAPIErrorCode;
+import org.apache.seatunnel.api.configuration.ReadonlyConfig;
 import org.apache.seatunnel.api.source.Boundedness;
 import org.apache.seatunnel.api.source.SeaTunnelSource;
 import org.apache.seatunnel.api.source.SourceReader;
 import org.apache.seatunnel.api.source.SourceSplitEnumerator;
 import org.apache.seatunnel.api.source.SupportColumnProjection;
 import org.apache.seatunnel.api.source.SupportParallelism;
 import org.apache.seatunnel.api.table.catalog.CatalogTable;
+import org.apache.seatunnel.api.table.catalog.CatalogTableUtil;
+import org.apache.seatunnel.api.table.catalog.PhysicalColumn;
+import org.apache.seatunnel.api.table.catalog.TableIdentifier;
+import org.apache.seatunnel.api.table.catalog.TablePath;
+import org.apache.seatunnel.api.table.catalog.TableSchema;
+import org.apache.seatunnel.api.table.type.SeaTunnelDataType;
 import org.apache.seatunnel.api.table.type.SeaTunnelRow;
+import org.apache.seatunnel.api.table.type.SeaTunnelRowType;
+import org.apache.seatunnel.common.config.CheckConfigUtil;
+import org.apache.seatunnel.common.constants.PluginType;
+import org.apache.seatunnel.common.utils.ExceptionUtils;
+import 
org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseCatalogConfig;
+import 
org.apache.seatunnel.connectors.seatunnel.clickhouse.exception.ClickhouseConnectorException;
 import 
org.apache.seatunnel.connectors.seatunnel.clickhouse.state.ClickhouseSourceState;
+import 
org.apache.seatunnel.connectors.seatunnel.clickhouse.util.ClickhouseUtil;
+import 
org.apache.seatunnel.connectors.seatunnel.clickhouse.util.TypeConvertUtil;
+
+import org.apache.commons.collections4.map.HashedMap;
 
+import com.clickhouse.client.ClickHouseClient;
+import com.clickhouse.client.ClickHouseException;
+import com.clickhouse.client.ClickHouseFormat;
 import com.clickhouse.client.ClickHouseNode;
+import com.clickhouse.client.ClickHouseResponse;
+import com.google.auto.service.AutoService;
 
 import java.util.Collections;
 import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.stream.Collectors;
+
+import static 
org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.CLICKHOUSE_CONFIG;
+import static 
org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.DATABASE;
+import static 
org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.HOST;
+import static 
org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.PASSWORD;
+import static 
org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.SERVER_TIME_ZONE;
+import static 
org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.SQL;
+import static 
org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.TABLE_LIST;
+import static 
org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.TABLE_PATH;
+import static 
org.apache.seatunnel.connectors.seatunnel.clickhouse.config.ClickhouseConfig.USERNAME;
 
+@AutoService(SeaTunnelSource.class)
 public class ClickhouseSource
         implements SeaTunnelSource<SeaTunnelRow, ClickhouseSourceSplit, 
ClickhouseSourceState>,
                 SupportParallelism,
                 SupportColumnProjection {
 
     private List<ClickHouseNode> servers;
-    private CatalogTable catalogTable;
-    private String sql;
+    private Map<TablePath, ClickhouseCatalogConfig> 
tableClickhouseCatalogConfigMap =
+            new HashedMap<>();

Review Comment:
   why not use `HashMap`?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to