This is an automated email from the ASF dual-hosted git repository.

wanghailin pushed a commit to branch dev
in repository https://gitbox.apache.org/repos/asf/incubator-seatunnel.git


The following commit(s) were added to refs/heads/dev by this push:
     new f1a1dfe4a [Hotfix][Connector-V2][Hive] Fix hive unknownhost (#4141)
f1a1dfe4a is described below

commit f1a1dfe4afff2a4f73001d15dee239f15c29bd55
Author: Tyrantlucifer <[email protected]>
AuthorDate: Thu Feb 16 14:15:10 2023 +0800

    [Hotfix][Connector-V2][Hive] Fix hive unknownhost (#4141)
    
    * [Hotfix][Connector-V2][Hive] Fix the bug that can not load hdfs-site.xml
---
 docs/en/connector-v2/sink/Hive.md                              |  5 +++++
 docs/en/connector-v2/source/Hive.md                            |  5 +++++
 .../seatunnel/connectors/seatunnel/hive/sink/HiveSink.java     | 10 ++++++++--
 3 files changed, 18 insertions(+), 2 deletions(-)

diff --git a/docs/en/connector-v2/sink/Hive.md 
b/docs/en/connector-v2/sink/Hive.md
index 5a0bd8686..a6abe4abb 100644
--- a/docs/en/connector-v2/sink/Hive.md
+++ b/docs/en/connector-v2/sink/Hive.md
@@ -35,6 +35,7 @@ By default, we use 2PC commit to ensure `exactly-once`
 | table_name           | string | yes      | -             |
 | metastore_uri        | string | yes      | -             |
 | compress_codec       | string | no       | none          |
+| hdfs_site_path       | string | no       | -             |
 | kerberos_principal   | string | no       | -             |
 | kerberos_keytab_path | string | no       | -             |
 | common-options       |        | no       | -             |
@@ -47,6 +48,10 @@ Target Hive table name eg: db1.table1
 
 Hive metastore uri
 
+### hdfs_site_path [string]
+
+The path of `hdfs-site.xml`, used to load ha configuration of namenodes
+
 ### kerberos_principal [string]
 
 The principal of kerberos
diff --git a/docs/en/connector-v2/source/Hive.md 
b/docs/en/connector-v2/source/Hive.md
index 878a3c0a7..fc0023539 100644
--- a/docs/en/connector-v2/source/Hive.md
+++ b/docs/en/connector-v2/source/Hive.md
@@ -39,6 +39,7 @@ Read all the data in a split in a pollNext call. What splits 
are read will be sa
 | metastore_uri        | string | yes      | -             |
 | kerberos_principal   | string | no       | -             |
 | kerberos_keytab_path | string | no       | -             |
+| hdfs_site_path       | string | no       | -             |
 | read_partitions      | list   | no       | -             |
 | common-options       |        | no       | -             |
 
@@ -50,6 +51,10 @@ Target Hive table name eg: db1.table1
 
 Hive metastore uri
 
+### hdfs_site_path [string]
+
+The path of `hdfs-site.xml`, used to load ha configuration of namenodes
+
 ### read_partitions [list]
 
 The target partitions that user want to read from hive table, if user does not 
set this parameter, it will read all the data from hive table.
diff --git 
a/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/sink/HiveSink.java
 
b/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/sink/HiveSink.java
index e00aca5ac..31a6a8a04 100644
--- 
a/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/sink/HiveSink.java
+++ 
b/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/sink/HiveSink.java
@@ -53,6 +53,7 @@ import java.util.Map;
 import java.util.Optional;
 import java.util.stream.Collectors;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
 import static 
org.apache.seatunnel.connectors.seatunnel.file.config.BaseSinkConfig.FIELD_DELIMITER;
 import static 
org.apache.seatunnel.connectors.seatunnel.file.config.BaseSinkConfig.FILE_FORMAT;
 import static 
org.apache.seatunnel.connectors.seatunnel.file.config.BaseSinkConfig.FILE_NAME_EXPRESSION;
@@ -157,9 +158,13 @@ public class HiveSink extends BaseHdfsFileSink {
         try {
             URI uri = new URI(hdfsLocation);
             String path = uri.getPath();
-            pluginConfig =
-                    pluginConfig.withValue(FILE_PATH.key(), 
ConfigValueFactory.fromAnyRef(path));
             hadoopConf = new HadoopConf(hdfsLocation.replace(path, ""));
+            pluginConfig =
+                    pluginConfig
+                            .withValue(FILE_PATH.key(), 
ConfigValueFactory.fromAnyRef(path))
+                            .withValue(
+                                    FS_DEFAULT_NAME_KEY,
+                                    
ConfigValueFactory.fromAnyRef(hadoopConf.getHdfsNameKey()));
         } catch (URISyntaxException e) {
             String errorMsg =
                     String.format(
@@ -170,6 +175,7 @@ public class HiveSink extends BaseHdfsFileSink {
                     HiveConnectorErrorCode.GET_HDFS_NAMENODE_HOST_FAILED, 
errorMsg, e);
         }
         this.pluginConfig = pluginConfig;
+        super.prepare(pluginConfig);
     }
 
     @Override

Reply via email to