This is an automated email from the ASF dual-hosted git repository.

xuanwo pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-opendal.git


The following commit(s) were added to refs/heads/main by this push:
     new 421df7b8d fix(services/s3): Fix s3 batch max operations (#2418)
421df7b8d is described below

commit 421df7b8dd4cc2d7dac0294b073e53c0c68af478
Author: A-Stupid-Sun <[email protected]>
AuthorDate: Mon Jun 5 19:15:40 2023 +0800

    fix(services/s3): Fix s3 batch max operations (#2418)
---
 core/src/services/s3/backend.rs | 17 ++++++++++++++---
 core/src/services/s3/core.rs    |  1 +
 2 files changed, 15 insertions(+), 3 deletions(-)

diff --git a/core/src/services/s3/backend.rs b/core/src/services/s3/backend.rs
index 99ec8bfab..be720a8f9 100644
--- a/core/src/services/s3/backend.rs
+++ b/core/src/services/s3/backend.rs
@@ -55,7 +55,7 @@ static ENDPOINT_TEMPLATES: Lazy<HashMap<&'static str, 
&'static str>> = Lazy::new
 });
 
 const DEFAULT_WRITE_MIN_SIZE: usize = 8 * 1024 * 1024;
-
+const DEFAULT_BATCH_MAX_OPERATIONS: usize = 1000;
 /// Aws S3 and compatible services (including minio, digitalocean space, 
Tencent Cloud Object Storage(COS) and so on) support.
 /// For more information about s3-compatible services, refer to [Compatible 
Services](#compatible-services).
 ///
@@ -93,6 +93,8 @@ pub struct S3Builder {
     /// the part size of s3 multipart upload, which should be 5 MiB to 5 GiB.
     /// There is no minimum size limit on the last part of your multipart 
upload
     write_min_size: Option<usize>,
+    /// batch_max_operations
+    batch_max_operations: Option<usize>,
 }
 
 impl Debug for S3Builder {
@@ -508,6 +510,12 @@ impl S3Builder {
     pub fn write_min_size(&mut self, write_min_size: usize) -> &mut Self {
         self.write_min_size = Some(write_min_size);
 
+        self
+    }
+    /// Set maximum batch operations of this backend.
+    pub fn batch_max_operations(&mut self, batch_max_operations: usize) -> 
&mut Self {
+        self.batch_max_operations = Some(batch_max_operations);
+
         self
     }
 }
@@ -696,7 +704,9 @@ impl Builder for S3Builder {
             )
             .with_context("service", Scheme::S3));
         }
-
+        let batch_max_operations = self
+            .batch_max_operations
+            .unwrap_or(DEFAULT_BATCH_MAX_OPERATIONS);
         debug!("backend build finished");
         Ok(S3Backend {
             core: Arc::new(S3Core {
@@ -714,6 +724,7 @@ impl Builder for S3Builder {
                 loader,
                 client,
                 write_min_size,
+                batch_max_operations,
             }),
         })
     }
@@ -773,7 +784,7 @@ impl Accessor for S3Backend {
                 presign_write: true,
 
                 batch: true,
-                batch_max_operations: Some(1000),
+                batch_max_operations: Some(self.core.batch_max_operations),
 
                 ..Default::default()
             });
diff --git a/core/src/services/s3/core.rs b/core/src/services/s3/core.rs
index bc38fe194..c802fd430 100644
--- a/core/src/services/s3/core.rs
+++ b/core/src/services/s3/core.rs
@@ -82,6 +82,7 @@ pub struct S3Core {
     pub loader: AwsLoader,
     pub client: HttpClient,
     pub write_min_size: usize,
+    pub batch_max_operations: usize,
 }
 
 impl Debug for S3Core {

Reply via email to