This is an automated email from the ASF dual-hosted git repository.

wuchunfu pushed a commit to branch dev
in repository https://gitbox.apache.org/repos/asf/incubator-seatunnel.git


The following commit(s) were added to refs/heads/dev by this push:
     new 4710918b0 [Connector&Engine]Set S3 AK to optional (#3688)
4710918b0 is described below

commit 4710918b027aedf427a7867e9b559f1da89c80d2
Author: Kirs <[email protected]>
AuthorDate: Fri Dec 9 21:59:40 2022 +0800

    [Connector&Engine]Set S3 AK to optional (#3688)
    
    * [Connector&Engine]Support other CredentialsProvider
    
    IAMInstanceCredentialsProvider don't need AK,
    IAM credentials of any EC2 VM or AWS container in which the process is 
running.
    
    * update change log
    
    * fix check style
---
 docs/en/connector-v2/sink/S3File.md                           | 11 ++++++-----
 docs/en/connector-v2/source/S3File.md                         |  9 +++++----
 .../seatunnel/connectors/seatunnel/file/s3/config/S3Conf.java |  4 ++++
 .../connectors/seatunnel/file/s3/sink/S3FileSinkFactory.java  |  4 ++--
 .../checkpoint/storage/hdfs/common/S3Configuration.java       |  8 ++------
 5 files changed, 19 insertions(+), 17 deletions(-)

diff --git a/docs/en/connector-v2/sink/S3File.md 
b/docs/en/connector-v2/sink/S3File.md
index 444d0f672..d49a2a33d 100644
--- a/docs/en/connector-v2/sink/S3File.md
+++ b/docs/en/connector-v2/sink/S3File.md
@@ -30,8 +30,8 @@ By default, we use 2PC commit to ensure `exactly-once`
 
|----------------------------------|---------|----------|-----------------------------------------------------------|
 | path                             | string  | yes      | -                    
                                     |
 | bucket                           | string  | yes      | -                    
                                     |
-| access_key                       | string  | yes      | -                    
                                     |
-| access_secret                    | string  | yes      | -                    
                                     |
+| access_key                       | string  | no       | -                    
                                     |
+| access_secret                    | string  | no       | -                    
                                     |
 | hadoop_s3_properties             | map     | no       | -                    
                                     |
 | file_name_expression             | string  | no       | "${transactionId}"   
                                     |
 | file_format                      | string  | no       | "text"               
                                     |
@@ -56,11 +56,11 @@ The bucket address of s3 file system, for example: 
`s3n://seatunnel-test`, if yo
 
 ### access_key [string]
 
-The access key of s3 file system.
+The access key of s3 file system. If this parameter is not set, please confirm 
that the credential provider chain can be authenticated correctly, you could 
check this 
[hadoop-aws](https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/index.html)
 
 ### access_secret [string]
 
-The access secret of s3 file system.
+The access secret of s3 file system. If this parameter is not set, please 
confirm that the credential provider chain can be authenticated correctly, you 
could check this 
[hadoop-aws](https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/index.html)
 
 ### hadoop_s3_properties [map]
 
@@ -235,4 +235,5 @@ For orc file format
   - Allow user to add additional hadoop-s3 parameters
   - Allow the use of the s3a protocol
   - Decouple hadoop-aws dependencies
-- [Improve] Support setting batch size for every file 
([3625](https://github.com/apache/incubator-seatunnel/pull/3625))
\ No newline at end of file
+- [Improve] Support setting batch size for every file 
([3625](https://github.com/apache/incubator-seatunnel/pull/3625))
+- [Feature]Set S3 AK to optional 
([3688](https://github.com/apache/incubator-seatunnel/pull/))
\ No newline at end of file
diff --git a/docs/en/connector-v2/source/S3File.md 
b/docs/en/connector-v2/source/S3File.md
index 5286119bb..4b35d9692 100644
--- a/docs/en/connector-v2/source/S3File.md
+++ b/docs/en/connector-v2/source/S3File.md
@@ -35,8 +35,8 @@ Read all the data in a split in a pollNext call. What splits 
are read will be sa
 | path                      | string  | yes      | -                   |
 | type                      | string  | yes      | -                   |
 | bucket                    | string  | yes      | -                   |
-| access_key                | string  | yes      | -                   |
-| access_secret             | string  | yes      | -                   |
+| access_key                | string  | no       | -                   |
+| access_secret             | string  | no       | -                   |
 | hadoop_s3_properties      | map     | no       | -                   |
 | delimiter                 | string  | no       | \001                |
 | parse_partition_from_path | boolean | no       | true                |
@@ -188,11 +188,11 @@ The bucket address of s3 file system, for example: 
`s3n://seatunnel-test`, if yo
 
 ### access_key [string]
 
-The access key of s3 file system.
+The access key of s3 file system. If this parameter is not set, please confirm 
that the credential provider chain can be authenticated correctly, you could 
check this 
[hadoop-aws](https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/index.html)
 
 ### access_secret [string]
 
-The access secret of s3 file system.
+The access secret of s3 file system. If this parameter is not set, please 
confirm that the credential provider chain can be authenticated correctly, you 
could check this 
[hadoop-aws](https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/index.html)
 
 ### hadoop_s3_properties [map]
 
@@ -262,3 +262,4 @@ Source plugin common parameters, please refer to [Source 
Common Options](common-
   - Allow user to add additional hadoop-s3 parameters
   - Allow the use of the s3a protocol
   - Decouple hadoop-aws dependencies
+- [Feature]Set S3 AK to optional 
([3688](https://github.com/apache/incubator-seatunnel/pull/))
diff --git 
a/seatunnel-connectors-v2/connector-file/connector-file-s3/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/s3/config/S3Conf.java
 
b/seatunnel-connectors-v2/connector-file/connector-file-s3/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/s3/config/S3Conf.java
index 46cabf49b..e4602d899 100644
--- 
a/seatunnel-connectors-v2/connector-file/connector-file-s3/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/s3/config/S3Conf.java
+++ 
b/seatunnel-connectors-v2/connector-file/connector-file-s3/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/s3/config/S3Conf.java
@@ -72,6 +72,9 @@ public class S3Conf extends HadoopConf {
     }
 
     private static void putS3SK(Map<String, String> s3Options, Config config) {
+        if (!CheckConfigUtil.isValidParam(config, 
S3Config.S3_ACCESS_KEY.key()) && CheckConfigUtil.isValidParam(config, 
S3Config.S3_SECRET_KEY.key())) {
+            return;
+        }
         String accessKey = config.getString(S3Config.S3_ACCESS_KEY.key());
         String secretKey = config.getString(S3Config.S3_SECRET_KEY.key());
         if (S3A_SCHEMA.equals(SCHEMA)) {
@@ -83,4 +86,5 @@ public class S3Conf extends HadoopConf {
         s3Options.put("fs.s3n.awsAccessKeyId", accessKey);
         s3Options.put("fs.s3n.awsSecretAccessKey", secretKey);
     }
+
 }
diff --git 
a/seatunnel-connectors-v2/connector-file/connector-file-s3/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/s3/sink/S3FileSinkFactory.java
 
b/seatunnel-connectors-v2/connector-file/connector-file-s3/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/s3/sink/S3FileSinkFactory.java
index 068d3aecb..c86135c53 100644
--- 
a/seatunnel-connectors-v2/connector-file/connector-file-s3/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/s3/sink/S3FileSinkFactory.java
+++ 
b/seatunnel-connectors-v2/connector-file/connector-file-s3/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/s3/sink/S3FileSinkFactory.java
@@ -38,8 +38,8 @@ public class S3FileSinkFactory implements TableSinkFactory {
         return OptionRule.builder()
                 .required(S3Config.FILE_PATH)
                 .required(S3Config.S3_BUCKET)
-                .required(S3Config.S3_ACCESS_KEY)
-                .required(S3Config.S3_SECRET_KEY)
+                .optional(S3Config.S3_ACCESS_KEY)
+                .optional(S3Config.S3_SECRET_KEY)
                 .optional(BaseSinkConfig.FILE_NAME_EXPRESSION)
                 .optional(BaseSinkConfig.FILE_FORMAT)
                 .optional(BaseSinkConfig.FILENAME_TIME_FORMAT)
diff --git 
a/seatunnel-engine/seatunnel-engine-storage/checkpoint-storage-plugins/checkpoint-storage-hdfs/src/main/java/org/apache/seatunnel/engine/checkpoint/storage/hdfs/common/S3Configuration.java
 
b/seatunnel-engine/seatunnel-engine-storage/checkpoint-storage-plugins/checkpoint-storage-hdfs/src/main/java/org/apache/seatunnel/engine/checkpoint/storage/hdfs/common/S3Configuration.java
index 868a0d193..ca0b79260 100644
--- 
a/seatunnel-engine/seatunnel-engine-storage/checkpoint-storage-plugins/checkpoint-storage-hdfs/src/main/java/org/apache/seatunnel/engine/checkpoint/storage/hdfs/common/S3Configuration.java
+++ 
b/seatunnel-engine/seatunnel-engine-storage/checkpoint-storage-plugins/checkpoint-storage-hdfs/src/main/java/org/apache/seatunnel/engine/checkpoint/storage/hdfs/common/S3Configuration.java
@@ -31,7 +31,7 @@ import java.util.Map;
  * we just support s3n and s3a protocol.
  * some hadoop low version not support s3a, if you want to use s3a, you should 
check your hadoop version first.
  * <p>
- * access, secret and bucket is required, and the default schema is s3n
+ * bucket is required, and the default schema is s3n
  * we used the bucket name to get the protocol,if you used s3a, this bucket 
name must be s3a://bucket, if you used s3n, this bucket name must be 
s3n://bucket
  * <p>
  * other configuration is optional, if you need to set other configuration, 
you can set it in the config
@@ -46,8 +46,6 @@ import java.util.Map;
 public class S3Configuration extends AbstractConfiguration {
 
     /**************** S3 required keys ***************/
-    public static final String S3_ACCESS_KEY = "access.key";
-    public static final String S3_SECRET_KEY = "secret.key";
     public static final String S3_BUCKET_KEY = "s3.bucket";
 
 
@@ -62,7 +60,7 @@ public class S3Configuration extends AbstractConfiguration {
 
     @Override
     public Configuration buildConfiguration(Map<String, String> config) {
-        checkConfiguration(config, S3_ACCESS_KEY, S3_SECRET_KEY, 
S3_BUCKET_KEY);
+        checkConfiguration(config, S3_BUCKET_KEY);
         String protocol = DEFAULT_PROTOCOL;
         if (config.get(S3_BUCKET_KEY).startsWith(S3A_PROTOCOL)) {
             protocol = S3A_PROTOCOL;
@@ -70,8 +68,6 @@ public class S3Configuration extends AbstractConfiguration {
         String fsImpl = protocol.equals(S3A_PROTOCOL) ? HDFS_S3A_IMPL : 
HDFS_S3N_IMPL;
         Configuration hadoopConf = new Configuration();
         hadoopConf.set(FS_DEFAULT_NAME_KEY, config.get(S3_BUCKET_KEY));
-        hadoopConf.set(formatKey(protocol, S3_ACCESS_KEY), 
config.get(S3_ACCESS_KEY));
-        hadoopConf.set(formatKey(protocol, S3_SECRET_KEY), 
config.get(S3_SECRET_KEY));
         hadoopConf.set(formatKey(protocol, HDFS_IMPL_KEY), fsImpl);
         setExtraConfiguration(hadoopConf, config, FS_KEY + protocol + 
SPLIT_CHAR);
         return hadoopConf;

Reply via email to