This is an automated email from the ASF dual-hosted git repository.

liaoxin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 32fd98ea66d [Fix](regression-test) fix low wal disk test cast failure 
issues (#60408)
32fd98ea66d is described below

commit 32fd98ea66ddb8119664f27270510c6a6625131f
Author: heguanhui <[email protected]>
AuthorDate: Mon Feb 9 11:21:36 2026 +0800

    [Fix](regression-test) fix low wal disk test cast failure issues (#60408)
    
    Fix low wal disk test cast failure issues,
    test_low_wal_disk_space_fault_injection.groovy suite.
    1.remove absent be debug point in this suite
    2.correctly adding group commit in asynchronous mode, successfully
    downgrading due to insufficient disk space in wal
    3.correctly add group commit and intercept due to insufficient disk
    space in wal mode when transitioning from asynchronous mode to sync mode
---
 be/src/http/utils.cpp                              |   1 +
 .../test_low_wal_disk_space_fault_injection.groovy | 115 ++++++++++++++-------
 2 files changed, 78 insertions(+), 38 deletions(-)

diff --git a/be/src/http/utils.cpp b/be/src/http/utils.cpp
index 79552c497b9..63953d8f7c9 100644
--- a/be/src/http/utils.cpp
+++ b/be/src/http/utils.cpp
@@ -232,6 +232,7 @@ void do_dir_response(const std::string& dir_path, 
HttpRequest* req, bool is_acqu
 }
 
 bool load_size_smaller_than_wal_limit(int64_t content_length) {
+    
DBUG_EXECUTE_IF("StreamLoad.load_size_smaller_than_wal_limit.always_false", { 
return false; });
     // 1. req->header(HttpHeaders::CONTENT_LENGTH) will return streamload 
content length. If it is empty or equals to 0, it means this streamload
     // is a chunked streamload and we are not sure its size.
     // 2. if streamload content length is too large, like larger than 80% of 
the WAL constrain.
diff --git 
a/regression-test/suites/fault_injection_p0/test_low_wal_disk_space_fault_injection.groovy
 
b/regression-test/suites/fault_injection_p0/test_low_wal_disk_space_fault_injection.groovy
index 0b7cab481f7..ab5fbc82cdc 100644
--- 
a/regression-test/suites/fault_injection_p0/test_low_wal_disk_space_fault_injection.groovy
+++ 
b/regression-test/suites/fault_injection_p0/test_low_wal_disk_space_fault_injection.groovy
@@ -16,51 +16,90 @@
 // under the License.
 
 suite("test_low_wal_disk_space_fault_injection","nonConcurrent") {
-
-
     def tableName = "wal_test"
     sql """ DROP TABLE IF EXISTS ${tableName} """
-
-    sql """
-        CREATE TABLE IF NOT EXISTS ${tableName} (
-            `k` int ,
-            `v` int ,
-        ) engine=olap
-        UNIQUE KEY(k)
-        DISTRIBUTED BY HASH(`k`) 
-        BUCKETS 32 
-        properties("replication_num" = "1")
+    sql """    
+        CREATE TABLE IF NOT EXISTS ${tableName} (    
+            `k` int ,    
+            `v` int ,    
+        ) engine=olap    
+        UNIQUE KEY(k)    
+        DISTRIBUTED BY HASH(`k`)     
+        BUCKETS 32     
+        properties("replication_num" = "1")    
         """
-
     GetDebugPoint().clearDebugPointsForAllBEs()
-
     sql """ set group_commit = async_mode; """
-        try {
-            
GetDebugPoint().enableDebugPointForAllBEs("GroupCommitBlockSink._add_blocks.return_sync_mode")
-                def t1 = []
-                for (int i = 0; i < 20; i++) {
-                    t1.add(Thread.startDaemon {
-                        streamLoad {
-                            table "${tableName}1"
 
-                            set 'column_separator', ','
-                            set 'compress_type', 'GZ'
-                            set 'format', 'csv'
-                            set 'group_commit', 'async_mode'
-                            unset 'label'
-
-                            file 
'test_low_wal_disk_space_fault_injection.csv.gz'
-                            time 600000 
+    // case 1: Simulate WAL space shortage at Debug Point to test downgrade 
mechanism
+    logger.info("=== case 1: Simulate WAL space shortage at Debug Point to 
test downgrade mechanism ===")
+    try {
+        
GetDebugPoint().enableDebugPointForAllBEs("LoadBlockQueue.has_enough_wal_disk_space.low_space")
+        def results = Collections.synchronizedList([])
+        def threads = []
+        for (int i = 0; i < 5; i++) {
+            threads.add(Thread.startDaemon("stream-load-$i") {
+                streamLoad {
+                    table "${tableName}"
+                    set 'column_separator', ','
+                    set 'compress_type', 'GZ'
+                    set 'format', 'csv'
+                    set 'group_commit', 'async_mode'
+                    unset 'label'
+                    file 'test_low_wal_disk_space_fault_injection.csv.gz'
+                    time 60000
+                    check { result, exception, startTime, endTime ->
+                        if (exception != null) {
+                            throw exception
                         }
-                    })
+                        def json = parseJson(result)
+                        results.add(json)
+                    }
                 }
-                t1.join()
-        } catch (Exception e) {
-            logger.info(e.getMessage())
-            // make sure there is no exception.
-            assertFalse(true)
-        } finally {
-            
GetDebugPoint().disableDebugPointForAllBEs("GroupCommitBlockSink._add_blocks.return_sync_mode")
+            })
+        }
+        // wait for all stream load threads done
+        threads.each { it.join(60000) }
+        assertTrue(results.size() == 5, "there should be 5 stream load 
results")
+        results.each { json ->
+            assertEquals("Success", json.Status, "Stream load show success")
+            assertTrue(json.GroupCommit, "GroupCommit should be true")
         }
+        def txnIds = results.collect { it.TxnId }
+        assertTrue(txnIds.every { it == txnIds[0] }, "all txnId show be the 
same")
+        logger.info("case1 verification successful: correctly downgraded to 
sync mode when WAL space is insufficient, stream load successful")
+    } catch (Exception e) {
+        logger.error("case1 failure: " + e.getMessage())
+        throw e
+    } finally {
+        
GetDebugPoint().disableDebugPointForAllBEs("LoadBlockQueue.has_enough_wal_disk_space.low_space")
+    }
 
-}
\ No newline at end of file
+    // case 2: Set a minimal WAL limit to test HTTP layer admission checks
+    logger.info("=== case 2: Set a minimal WAL limit to test HTTP layer 
admission checks ===")
+    try {
+        
GetDebugPoint().enableDebugPointForAllBEs("StreamLoad.load_size_smaller_than_wal_limit.always_false")
+        try {
+            streamLoad {
+                table "${tableName}"
+                set 'column_separator', ','
+                set 'compress_type', 'GZ'
+                set 'format', 'csv'
+                set 'group_commit', 'async_mode'
+                unset 'label'
+                file 'test_low_wal_disk_space_fault_injection.csv.gz'
+                time 60000
+            }
+            assertTrue(false, "An exception for insufficient WAL space should 
be thrown")
+        } catch (Exception e) {
+            logger.info("catch exception message: " + e.getMessage())
+            assertTrue(e.getMessage().contains("no space for group commit") || 
e.getMessage().contains("WAL"), "abnormal information should include content 
related to WAL space")
+        }
+        logger.info("case2 verification successful: when WAL restriction is 
too small, requests are correctly rejected at the HTTP layer")
+    } catch (Exception e) {
+        logger.error("failure in case2: " + e.getMessage())
+        throw e
+    } finally {
+        
GetDebugPoint().disableDebugPointForAllBEs("StreamLoad.load_size_smaller_than_wal_limit.always_false")
+    }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to