This is an automated email from the ASF dual-hosted git repository.

laiyingchun pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-pegasus.git


The following commit(s) were added to refs/heads/master by this push:
     new 7f5039456 fix(backup): Fix the stack overflow when read large sst file 
(#2059)
7f5039456 is described below

commit 7f50394565ab5e1891bdcb44b4940f7fc7f7ae31
Author: Yingchun Lai <[email protected]>
AuthorDate: Thu Jul 4 15:44:47 2024 +0800

    fix(backup): Fix the stack overflow when read large sst file (#2059)
    
    After refactoring to use RocksDB APIs to read files from local
    filesystem, it may cause stack overflow when the file to read
    is larger than the stack size (say 8MB).
    
    This patch changes to use heap instead of stack to store the
    file content.
---
 src/block_service/hdfs/hdfs_service.cpp | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/src/block_service/hdfs/hdfs_service.cpp 
b/src/block_service/hdfs/hdfs_service.cpp
index e30371049..3defe8835 100644
--- a/src/block_service/hdfs/hdfs_service.cpp
+++ b/src/block_service/hdfs/hdfs_service.cpp
@@ -41,6 +41,7 @@
 #include "utils/fmt_logging.h"
 #include "utils/safe_strerror_posix.h"
 #include "utils/strings.h"
+#include "utils/utils.h"
 
 DSN_DEFINE_uint64(replication,
                   hdfs_read_batch_size_bytes,
@@ -435,8 +436,8 @@ dsn::task_ptr hdfs_file_object::upload(const upload_request 
&req,
             }
 
             rocksdb::Slice result;
-            char scratch[file_size];
-            s = rfile->Read(file_size, &result, scratch);
+            auto scratch = dsn::utils::make_shared_array<char>(file_size);
+            s = rfile->Read(file_size, &result, scratch.get());
             if (!s.ok()) {
                 LOG_ERROR(
                     "read local file '{}' failed, err = {}", 
req.input_local_name, s.ToString());


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to