ruanhang1993 commented on code in PR #3916:
URL: https://github.com/apache/flink-cdc/pull/3916#discussion_r2022315733


##########
flink-cdc-connect/flink-cdc-pipeline-connectors/flink-cdc-pipeline-connector-jdbc-parent/flink-cdc-pipeline-connector-jdbc-core/src/main/java/org/apache/flink/cdc/connectors/jdbc/factory/JdbcDataSinkFactory.java:
##########
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.cdc.connectors.jdbc.factory;
+
+import org.apache.flink.cdc.common.configuration.ConfigOption;
+import org.apache.flink.cdc.common.configuration.Configuration;
+import org.apache.flink.cdc.common.factories.DataSinkFactory;
+import org.apache.flink.cdc.common.factories.FactoryHelper;
+import org.apache.flink.cdc.common.sink.DataSink;
+import org.apache.flink.cdc.connectors.base.utils.OptionUtils;
+import org.apache.flink.cdc.connectors.jdbc.config.JdbcSinkConfig;
+import org.apache.flink.cdc.connectors.jdbc.dialect.JdbcSinkDialectFactory;
+import org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions;
+import org.apache.flink.cdc.connectors.jdbc.sink.JdbcDataSink;
+import org.apache.flink.configuration.ConfigurationUtils;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.ServiceConfigurationError;
+import java.util.ServiceLoader;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.CONNECTION_POOL_SIZE;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.CONNECT_MAX_RETRIES;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.CONNECT_TIMEOUT;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.CONN_URL;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.DRIVER_CLASS_NAME;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.JDBC_PROPERTIES_PROP_PREFIX;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.PASSWORD;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.SERVER_TIME_ZONE;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.USERNAME;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.WRITE_BATCH_INTERVAL_MS;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.WRITE_BATCH_SIZE;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.WRITE_MAX_RETRIES;
+
+/** A {@link DataSinkFactory} for creating JDBC sinks. */
+public class JdbcDataSinkFactory implements DataSinkFactory {
+
+    public static final String IDENTIFIER = "jdbc";
+
+    private static final Logger LOG = 
LoggerFactory.getLogger(JdbcDataSinkFactory.class);
+
+    @Override
+    public DataSink createDataSink(Context context) {
+        FactoryHelper.createFactoryHelper(this, context)
+                .validateExcept(JDBC_PROPERTIES_PROP_PREFIX);
+
+        // Construct JdbcSinkConfig from FactoryConfigurations
+        final Configuration config = context.getFactoryConfiguration();
+        JdbcSinkConfig.Builder<?> builder = new JdbcSinkConfig.Builder<>();
+
+        List<JdbcSinkDialectFactory<JdbcSinkConfig>> dialectFactories =
+                discoverDialectFactories(getClass().getClassLoader());
+        config.getOptional(CONN_URL).ifPresent(builder::connUrl);
+        config.getOptional(USERNAME).ifPresent(builder::username);
+        config.getOptional(PASSWORD).ifPresent(builder::password);
+
+        
builder.serverTimeZone(config.getOptional(SERVER_TIME_ZONE).orElse("UTC"));
+        builder.connectTimeout(config.get(CONNECT_TIMEOUT));
+        builder.connectionPoolSize(config.get(CONNECTION_POOL_SIZE));
+        builder.connectMaxRetries(config.get(CONNECT_MAX_RETRIES));
+        builder.writeBatchIntervalMs(config.get(WRITE_BATCH_INTERVAL_MS));
+        builder.writeBatchSize(config.get(WRITE_BATCH_SIZE));
+        builder.writeMaxRetries(config.get(WRITE_MAX_RETRIES));
+        builder.driverClassName(config.get(DRIVER_CLASS_NAME));

Review Comment:
   Every `set` method of the builder should return an instance of this builder. 
   
   This part should be as follows.
   ```java
   builder.url(config.get(URL))
       .password(config.get(PASSWORD))
       .username(config.get(USERNAME))
       ..... 
   ;
   ```



##########
flink-cdc-connect/flink-cdc-pipeline-connectors/flink-cdc-pipeline-connector-jdbc-parent/flink-cdc-pipeline-connector-jdbc-core/src/main/java/org/apache/flink/cdc/connectors/jdbc/options/JdbcSinkOptions.java:
##########
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.cdc.connectors.jdbc.options;
+
+import org.apache.flink.cdc.common.configuration.ConfigOption;
+import org.apache.flink.cdc.common.configuration.ConfigOptions;
+import org.apache.flink.cdc.common.configuration.Configuration;
+
+import java.time.Duration;
+import java.util.HashMap;
+import java.util.Map;
+
+/** Configurations for JDBC data source. */
+public class JdbcSinkOptions {
+    public static final ConfigOption<String> CONN_URL =
+            ConfigOptions.key("conn.url")

Review Comment:
   ```suggestion
               ConfigOptions.key("url")
   ```



##########
flink-cdc-connect/flink-cdc-pipeline-connectors/flink-cdc-pipeline-connector-jdbc-parent/flink-cdc-pipeline-connector-jdbc-core/src/main/java/org/apache/flink/cdc/connectors/jdbc/sink/v2/JdbcWriter.java:
##########
@@ -0,0 +1,195 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.cdc.connectors.jdbc.sink.v2;
+
+import org.apache.flink.api.connector.sink2.Sink;
+import org.apache.flink.api.connector.sink2.StatefulSink;
+import org.apache.flink.cdc.common.event.TableId;
+import org.apache.flink.cdc.common.schema.Schema;
+import org.apache.flink.cdc.connectors.jdbc.config.JdbcSinkConfig;
+import org.apache.flink.cdc.connectors.jdbc.dialect.JdbcSinkDialect;
+import org.apache.flink.cdc.connectors.jdbc.sink.utils.JsonWrapper;
+import org.apache.flink.connector.jdbc.JdbcExecutionOptions;
+import org.apache.flink.connector.jdbc.JdbcStatementBuilder;
+import 
org.apache.flink.connector.jdbc.datasource.connections.JdbcConnectionProvider;
+import org.apache.flink.connector.jdbc.internal.JdbcOutputFormat;
+import org.apache.flink.connector.jdbc.internal.JdbcOutputSerializer;
+import org.apache.flink.connector.jdbc.sink.writer.JdbcWriterState;
+
+import 
org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.type.TypeReference;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.ConcurrentHashMap;
+
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/** Implementation class of the {@link StatefulSink.StatefulSinkWriter} 
interface. */
+public class JdbcWriter<IN> implements StatefulSink.StatefulSinkWriter<IN, 
JdbcWriterState> {
+    private static final Logger LOG = 
LoggerFactory.getLogger(JdbcWriter.class);
+
+    private final JdbcExecutionOptions executionOptions;
+    private final JdbcConnectionProvider connectionProvider;
+    private final JdbcOutputSerializer<Object> outputSerializer;
+    private final RecordSerializationSchema<IN> serializationSchema;
+    private final JsonWrapper jsonWrapper;
+
+    private final JdbcSinkDialect dialect;
+    private final Map<TableId, RichJdbcOutputFormat> outputHandlers;
+
+    public JdbcWriter(
+            Sink.InitContext initContext,
+            JdbcExecutionOptions executionOptions,
+            JdbcConnectionProvider connectionProvider,
+            JdbcOutputSerializer<Object> outputSerializer,
+            RecordSerializationSchema<IN> serializationSchema,
+            JdbcSinkDialect dialect,
+            JdbcSinkConfig sinkConfig) {
+
+        checkNotNull(initContext, "initContext must be defined");
+        checkNotNull(executionOptions, "executionOptions must be defined");
+        checkNotNull(connectionProvider, "connectionProvider must be defined");
+        checkNotNull(outputSerializer, "outputSerializer must be defined");
+        checkNotNull(serializationSchema, "serializationSchema must be 
defined");
+        checkNotNull(sinkConfig, "sinkConfig must be defined");
+
+        this.jsonWrapper = new JsonWrapper();
+        this.executionOptions = executionOptions;
+        this.connectionProvider = connectionProvider;
+        this.outputSerializer = outputSerializer;
+        this.serializationSchema = serializationSchema;
+        this.dialect = dialect;
+        this.outputHandlers = new ConcurrentHashMap<>();
+    }
+
+    @Override
+    public List<JdbcWriterState> snapshotState(long checkpointId) {
+        // Jdbc sink supports at-least-once semantic only. No state 
snapshotting & restoring
+        // required.
+        return Collections.emptyList();
+    }
+
+    @Override
+    public void write(IN event, Context context) throws IOException {
+        RichJdbcRowData rowData = serializationSchema.serialize(event);
+        if (rowData == null) {
+            return;
+        }
+
+        TableId tableId = rowData.getTableId();
+        if (RowKind.SCHEMA_CHANGE.is(rowData.getRowKind())) {
+            // All previous outputHandlers would expire after schema changes.
+            flush(false);
+            
Optional.ofNullable(outputHandlers.remove(tableId)).ifPresent(JdbcOutputFormat::close);
+        } else {
+            RichJdbcOutputFormat outputFormat = getOrCreateHandler(tableId, 
rowData.getSchema());
+            outputFormat.writeRecord(rowData);
+            if (!rowData.hasPrimaryKey()) {
+                // For non-PK table, we must flush immediately to avoid data 
consistency issues.
+                outputFormat.flush();
+            }
+        }
+    }
+
+    private RichJdbcOutputFormat getJdbcOutputFormat(
+            String upsertSql,
+            String deleteSql,
+            JdbcStatementBuilder<RichJdbcRowData> upsertStmtBuilder,
+            JdbcStatementBuilder<RichJdbcRowData> deleteStmtBuilder) {
+        RichJdbcOutputFormat jdbcOutputFormat =
+                new RichJdbcOutputFormat(
+                        connectionProvider,
+                        executionOptions,
+                        () ->
+                                new BatchedStatementExecutor(
+                                        upsertSql,
+                                        deleteSql,
+                                        upsertStmtBuilder,
+                                        deleteStmtBuilder));
+        try {
+            jdbcOutputFormat.open(outputSerializer);
+        } catch (IOException e) {
+            throw new RuntimeException(e);
+        }
+        return jdbcOutputFormat;
+    }
+
+    @Override
+    public void flush(boolean endOfInput) throws IOException {
+        for (RichJdbcOutputFormat handler : outputHandlers.values()) {
+            handler.flush();
+            if (endOfInput) {
+                handler.close();
+            }
+        }
+    }
+
+    @Override
+    public void close() throws Exception {
+        flush(true);
+    }
+
+    private RichJdbcOutputFormat getOrCreateHandler(TableId tableId, Schema 
schema) {
+        if (outputHandlers.containsKey(tableId)) {
+            return outputHandlers.get(tableId);
+        }
+
+        String upsertStmt = dialect.getUpsertStatement(tableId, schema);

Review Comment:
   What if the dialect does not support the upsert statement?
   Please refer to the method 
`org.apache.flink.connector.jdbc.core.table.sink.JdbcOutputFormat#build` and 
add the support for systems without the upsert statement.
   



##########
flink-cdc-connect/flink-cdc-pipeline-connectors/flink-cdc-pipeline-connector-jdbc-parent/flink-cdc-pipeline-connector-jdbc-core/src/main/java/org/apache/flink/cdc/connectors/jdbc/factory/JdbcDataSinkFactory.java:
##########
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.cdc.connectors.jdbc.factory;
+
+import org.apache.flink.cdc.common.configuration.ConfigOption;
+import org.apache.flink.cdc.common.configuration.Configuration;
+import org.apache.flink.cdc.common.factories.DataSinkFactory;
+import org.apache.flink.cdc.common.factories.FactoryHelper;
+import org.apache.flink.cdc.common.sink.DataSink;
+import org.apache.flink.cdc.connectors.base.utils.OptionUtils;
+import org.apache.flink.cdc.connectors.jdbc.config.JdbcSinkConfig;
+import org.apache.flink.cdc.connectors.jdbc.dialect.JdbcSinkDialectFactory;
+import org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions;
+import org.apache.flink.cdc.connectors.jdbc.sink.JdbcDataSink;
+import org.apache.flink.configuration.ConfigurationUtils;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.ServiceConfigurationError;
+import java.util.ServiceLoader;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.CONNECTION_POOL_SIZE;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.CONNECT_MAX_RETRIES;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.CONNECT_TIMEOUT;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.CONN_URL;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.DRIVER_CLASS_NAME;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.JDBC_PROPERTIES_PROP_PREFIX;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.PASSWORD;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.SERVER_TIME_ZONE;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.USERNAME;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.WRITE_BATCH_INTERVAL_MS;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.WRITE_BATCH_SIZE;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.WRITE_MAX_RETRIES;
+
+/** A {@link DataSinkFactory} for creating JDBC sinks. */
+public class JdbcDataSinkFactory implements DataSinkFactory {
+
+    public static final String IDENTIFIER = "jdbc";
+
+    private static final Logger LOG = 
LoggerFactory.getLogger(JdbcDataSinkFactory.class);
+
+    @Override
+    public DataSink createDataSink(Context context) {
+        FactoryHelper.createFactoryHelper(this, context)
+                .validateExcept(JDBC_PROPERTIES_PROP_PREFIX);
+
+        // Construct JdbcSinkConfig from FactoryConfigurations
+        final Configuration config = context.getFactoryConfiguration();
+        JdbcSinkConfig.Builder<?> builder = new JdbcSinkConfig.Builder<>();
+
+        List<JdbcSinkDialectFactory<JdbcSinkConfig>> dialectFactories =
+                discoverDialectFactories(getClass().getClassLoader());
+        config.getOptional(CONN_URL).ifPresent(builder::connUrl);
+        config.getOptional(USERNAME).ifPresent(builder::username);
+        config.getOptional(PASSWORD).ifPresent(builder::password);

Review Comment:
   `url`, `username` and `password` are required options, so we could just use 
the `get` method. 



##########
flink-cdc-connect/flink-cdc-pipeline-connectors/flink-cdc-pipeline-connector-jdbc-parent/flink-cdc-pipeline-connector-jdbc-core/src/main/java/org/apache/flink/cdc/connectors/jdbc/sink/v2/BatchedStatementExecutor.java:
##########
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.cdc.connectors.jdbc.sink.v2;
+
+import org.apache.flink.connector.jdbc.JdbcStatementBuilder;
+import 
org.apache.flink.connector.jdbc.internal.executor.JdbcBatchStatementExecutor;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+
+/** A batched statement executor of {@link RichJdbcRowData}. */
+public class BatchedStatementExecutor implements 
JdbcBatchStatementExecutor<RichJdbcRowData> {

Review Comment:
   To support the systems without upsert statement, maybe we could split this 
executor to two parts: upsertExecutor and deleteExcutor like 
`TableBufferReducedStatementExecutor` in `flink-connector-jdbc`. 



##########
flink-cdc-composer/src/main/java/org/apache/flink/cdc/composer/flink/FlinkPipelineComposer.java:
##########
@@ -205,7 +205,11 @@ private void translate(StreamExecutionEnvironment env, 
PipelineDef pipelineDef)
 
         // Schema Operator -> Sink -> X
         sinkTranslator.translate(
-                pipelineDef.getSink(), stream, dataSink, 
schemaOperatorIDGenerator.generate());
+                pipelineDef.getSink(),
+                stream,
+                parallelism,
+                dataSink,
+                schemaOperatorIDGenerator.generate());

Review Comment:
   Should this change be introduced in this PR?



##########
flink-cdc-connect/flink-cdc-pipeline-connectors/flink-cdc-pipeline-connector-jdbc-parent/flink-cdc-pipeline-connector-jdbc-core/src/main/java/org/apache/flink/cdc/connectors/jdbc/factory/JdbcDataSinkFactory.java:
##########
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.cdc.connectors.jdbc.factory;
+
+import org.apache.flink.cdc.common.configuration.ConfigOption;
+import org.apache.flink.cdc.common.configuration.Configuration;
+import org.apache.flink.cdc.common.factories.DataSinkFactory;
+import org.apache.flink.cdc.common.factories.FactoryHelper;
+import org.apache.flink.cdc.common.sink.DataSink;
+import org.apache.flink.cdc.connectors.base.utils.OptionUtils;
+import org.apache.flink.cdc.connectors.jdbc.config.JdbcSinkConfig;
+import org.apache.flink.cdc.connectors.jdbc.dialect.JdbcSinkDialectFactory;
+import org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions;
+import org.apache.flink.cdc.connectors.jdbc.sink.JdbcDataSink;
+import org.apache.flink.configuration.ConfigurationUtils;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.ServiceConfigurationError;
+import java.util.ServiceLoader;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.CONNECTION_POOL_SIZE;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.CONNECT_MAX_RETRIES;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.CONNECT_TIMEOUT;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.CONN_URL;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.DRIVER_CLASS_NAME;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.JDBC_PROPERTIES_PROP_PREFIX;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.PASSWORD;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.SERVER_TIME_ZONE;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.USERNAME;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.WRITE_BATCH_INTERVAL_MS;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.WRITE_BATCH_SIZE;
+import static 
org.apache.flink.cdc.connectors.jdbc.options.JdbcSinkOptions.WRITE_MAX_RETRIES;
+
+/** A {@link DataSinkFactory} for creating JDBC sinks. */
+public class JdbcDataSinkFactory implements DataSinkFactory {
+
+    public static final String IDENTIFIER = "jdbc";
+
+    private static final Logger LOG = 
LoggerFactory.getLogger(JdbcDataSinkFactory.class);
+
+    @Override
+    public DataSink createDataSink(Context context) {
+        FactoryHelper.createFactoryHelper(this, context)
+                .validateExcept(JDBC_PROPERTIES_PROP_PREFIX);
+
+        // Construct JdbcSinkConfig from FactoryConfigurations
+        final Configuration config = context.getFactoryConfiguration();
+        JdbcSinkConfig.Builder<?> builder = new JdbcSinkConfig.Builder<>();
+
+        List<JdbcSinkDialectFactory<JdbcSinkConfig>> dialectFactories =
+                discoverDialectFactories(getClass().getClassLoader());
+        config.getOptional(CONN_URL).ifPresent(builder::connUrl);
+        config.getOptional(USERNAME).ifPresent(builder::username);
+        config.getOptional(PASSWORD).ifPresent(builder::password);
+
+        
builder.serverTimeZone(config.getOptional(SERVER_TIME_ZONE).orElse("UTC"));
+        builder.connectTimeout(config.get(CONNECT_TIMEOUT));
+        builder.connectionPoolSize(config.get(CONNECTION_POOL_SIZE));
+        builder.connectMaxRetries(config.get(CONNECT_MAX_RETRIES));
+        builder.writeBatchIntervalMs(config.get(WRITE_BATCH_INTERVAL_MS));
+        builder.writeBatchSize(config.get(WRITE_BATCH_SIZE));
+        builder.writeMaxRetries(config.get(WRITE_MAX_RETRIES));
+        builder.driverClassName(config.get(DRIVER_CLASS_NAME));
+
+        Properties properties = new Properties();
+        Map<String, String> jdbcProperties =
+                JdbcSinkOptions.getPropertiesByPrefix(config, 
JDBC_PROPERTIES_PROP_PREFIX);
+        properties.putAll(jdbcProperties);
+        builder.jdbcProperties(properties);
+        JdbcSinkConfig jdbcSinkConfig = builder.build();
+
+        // Discover corresponding factory
+        String dialect = jdbcSinkConfig.getDialect();
+        JdbcSinkDialectFactory<JdbcSinkConfig> dialectFactory =

Review Comment:
   Please check the size of found factories. There must be only one factory for 
the dialect, or else we should throw an exception.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to