github-actions[bot] commented on code in PR #17586:
URL: https://github.com/apache/doris/pull/17586#discussion_r1138974473


##########
be/src/io/fs/hdfs_file_system.h:
##########
@@ -81,49 +81,43 @@ class HdfsFileSystemHandle {
 
 class HdfsFileSystem final : public RemoteFileSystem {
 public:
-    static std::shared_ptr<HdfsFileSystem> create(const THdfsParams& 
hdfs_params,
-                                                  const std::string& path);
+    static Status create(const THdfsParams& hdfs_params, const std::string& 
path,
+                         std::shared_ptr<HdfsFileSystem>* fs);
 
     ~HdfsFileSystem() override;
 
-    Status create_file(const Path& path, FileWriterPtr* writer) override;
-
-    Status open_file(const Path& path, FileReaderSPtr* reader, IOContext* 
io_ctx) override;
-
-    Status delete_file(const Path& path) override;
-
-    Status create_directory(const Path& path) override;
-
-    // Delete all files under path.
-    Status delete_directory(const Path& path) override;
-
-    Status link_file(const Path& /*src*/, const Path& /*dest*/) override {
-        return Status::NotSupported("Not supported");
-    }
-
-    Status exists(const Path& path, bool* res) const override;
-
-    Status file_size(const Path& path, size_t* file_size) const override;
-
-    Status list(const Path& path, std::vector<Path>* files) override;
-
-    Status upload(const Path& /*local_path*/, const Path& /*dest_path*/) 
override {
-        return Status::NotSupported("Currently not support to upload file to 
HDFS");
-    }
-
-    Status batch_upload(const std::vector<Path>& /*local_paths*/,
-                        const std::vector<Path>& /*dest_paths*/) override {
-        return Status::NotSupported("Currently not support to batch upload 
file to HDFS");
-    }
+    HdfsFileSystemHandle* get_handle();
 
-    Status connect() override;
+protected:
+    Status connect_impl() override;
+    Status create_file_impl(const Path& file, FileWriterPtr* writer) override;
+    Status open_file_internal(const Path& file, FileReaderSPtr* reader) 
override;
+    Status create_directory_impl(const Path& dir) override;
+    Status delete_file_impl(const Path& file) override;
+    Status delete_directory_impl(const Path& dir) override;
+    Status batch_delete_impl(const std::vector<Path>& files) override;
+    Status exists_impl(const Path& path, bool* res) const override;
+    Status file_size_impl(const Path& file, size_t* file_size) const override;
+    Status list_impl(const Path& dir, bool only_file, std::vector<FileInfo>* 
files,
+                     bool* exists) override;
+    Status rename_impl(const Path& orig_name, const Path& new_name) override;
+    Status rename_dir_impl(const Path& orig_name, const Path& new_name) 
override;
+
+    Status upload_impl(const Path& local_file, const Path& remote_file) 
override;
+    Status batch_upload_impl(const std::vector<Path>& local_files,
+                             const std::vector<Path>& remote_files) override;
+    Status direct_upload_impl(const Path& remote_file, const std::string& 
content) override;
+    Status upload_with_checksum_impl(const Path& local_file, const Path& 
remote_file,
+                                     const std::string& checksum) override;
+    Status download_impl(const Path& remote_file, const Path& local_file) 
override;
+    Status direct_download_impl(const Path& remote_file, std::string* content) 
override;
 
-    HdfsFileSystemHandle* get_handle();
+private:
+    Status delete_internal(const Path& path, int is_recursive);
 
 private:

Review Comment:
   warning: redundant access specifier has the same accessibility as the 
previous access specifier [readability-redundant-access-specifiers]
   
   ```suggestion
   
   ```
   **be/src/io/fs/hdfs_file_system.h:114:** previously declared here
   ```cpp
   private:
   ^
   ```
   



##########
be/test/io/fs/file_system_test.cpp:
##########
@@ -0,0 +1,643 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <gtest/gtest.h>
+
+#include "io/fs/broker_file_system.h"
+#include "io/fs/file_reader.h"
+#include "io/fs/file_writer.h"
+#include "io/fs/hdfs_file_system.h"
+#include "io/fs/local_file_system.h"
+#include "io/fs/s3_file_system.h"
+#include "io/hdfs_builder.h"
+#include "util/s3_uri.h"
+
+namespace doris {
+
+#ifndef CHECK_STATUS_OK
+#define CHECK_STATUS_OK(stmt)                   \
+    do {                                        \
+        Status _status_ = (stmt);               \
+        ASSERT_TRUE(_status_.ok()) << _status_; \
+    } while (false)
+#endif
+
+// set your own info
+// s3
+static std::string ak = "";

Review Comment:
   warning: redundant string initialization [readability-redundant-string-init]
   
   ```suggestion
   static std::string ak;
   ```
   



##########
be/test/io/fs/file_system_test.cpp:
##########
@@ -0,0 +1,643 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <gtest/gtest.h>
+
+#include "io/fs/broker_file_system.h"
+#include "io/fs/file_reader.h"
+#include "io/fs/file_writer.h"
+#include "io/fs/hdfs_file_system.h"
+#include "io/fs/local_file_system.h"
+#include "io/fs/s3_file_system.h"
+#include "io/hdfs_builder.h"
+#include "util/s3_uri.h"
+
+namespace doris {
+
+#ifndef CHECK_STATUS_OK
+#define CHECK_STATUS_OK(stmt)                   \
+    do {                                        \
+        Status _status_ = (stmt);               \
+        ASSERT_TRUE(_status_.ok()) << _status_; \
+    } while (false)
+#endif
+
+// set your own info
+// s3
+static std::string ak = "";
+static std::string sk = "";
+static std::string endpoint = "http://cos.ap-beijing.myqcloud.com";;
+static std::string region = "ap-beijing";
+static std::string s3_location = "";
+
+// hdfs
+static std::string fs_name = "hdfs://my_nameservice";
+static std::string username = "hadoop";
+static std::string nameservices = "my_nameservice";
+static std::string nn = "nn1,nn2";
+static std::string rpc1 = "172.21.0.1:4007";
+static std::string rpc2 = "172.21.0.2:4007";
+static std::string provider =
+        
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider";
+static std::string hdfs_location = "/user/doris/";
+
+// broker
+static std::string broker_ip = "127.0.0.1";
+static int broker_port = 8008;
+static std::string broker_location = "hdfs://my_nameservice/user/doris";
+
+// commend out to enable specified test
+#define TestHdfsFileSystem DISABLED_TestHdfsFileSystem
+#define TestS3FileSystem DISABLED_TestS3FileSystem
+#define TestBrokerFileSystem DISABLED_TestBrokerFileSystem
+
+class FileSystemTest : public testing::Test {
+public:
+    virtual void SetUp() {
+        s3_prop.emplace("AWS_ACCESS_KEY", ak);
+        s3_prop.emplace("AWS_SECRET_KEY", sk);
+        s3_prop.emplace("AWS_ENDPOINT", endpoint);
+        s3_prop.emplace("AWS_REGION", region);
+
+        hdfs_prop.emplace("fs.defaultFS", fs_name);
+        hdfs_prop.emplace("hadoop.username", username);
+        hdfs_prop.emplace("username", username); // for broker hdfs
+        hdfs_prop.emplace("dfs.nameservices", nameservices);
+        hdfs_prop.emplace("dfs.ha.namenodes.my_nameservice", nn);
+        hdfs_prop.emplace("dfs.namenode.rpc-address.my_nameservice.nn1", rpc1);
+        hdfs_prop.emplace("dfs.namenode.rpc-address.my_nameservice.nn2", rpc2);
+        hdfs_prop.emplace("dfs.client.failover.proxy.provider.my_nameservice", 
provider);
+
+        broker_addr.__set_hostname(broker_ip);
+        broker_addr.__set_port(broker_port);
+    }
+
+    virtual void TearDown() {}
+
+private:
+    std::map<std::string, std::string> s3_prop;
+    std::map<std::string, std::string> hdfs_prop;
+    TNetworkAddress broker_addr;
+};
+
+TEST_F(FileSystemTest, TestBrokerFileSystem) {
+    std::shared_ptr<io::BrokerFileSystem> fs;
+    CHECK_STATUS_OK(io::BrokerFileSystem::create(broker_addr, hdfs_prop, 0, 
&fs));

Review Comment:
   warning: 'hdfs_prop' is a private member of 'doris::FileSystemTest' 
[clang-diagnostic-error]
   ```cpp
       CHECK_STATUS_OK(io::BrokerFileSystem::create(broker_addr, hdfs_prop, 0, 
&fs));
                                                                 ^
   ```
   **be/test/io/fs/file_system_test.cpp:92:** declared private here
   ```cpp
       std::map<std::string, std::string> hdfs_prop;
                                          ^
   ```
   



##########
be/test/olap/remote_rowset_gc_test.cpp:
##########
@@ -53,8 +53,10 @@
         s3_conf.region = config::test_s3_region;
         s3_conf.bucket = config::test_s3_bucket;

Review Comment:
   warning: no member named 'test_s3_bucket' in namespace 'doris::config' 
[clang-diagnostic-error]
   ```cpp
           s3_conf.bucket = config::test_s3_bucket;
                                    ^
   ```
   



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to