This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new 3083f0c00c3 branch-3.0: [Fix](test) Fix Show Data Case #47224 (#47293)
3083f0c00c3 is described below

commit 3083f0c00c3e219882151fcfd7a87a71b4e17871
Author: github-actions[bot] 
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Wed Jan 22 15:15:15 2025 +0800

    branch-3.0: [Fix](test) Fix Show Data Case #47224 (#47293)
    
    Cherry-picked from #47224
    
    Co-authored-by: abmdocrt <lianyuk...@selectdb.com>
---
 cloud/src/common/config.h                          |  6 +-
 .../main/java/org/apache/doris/common/Config.java  |  3 +-
 regression-test/plugins/aliyun_oss_sdk.groovy      |  7 ++
 .../plugins/cloud_show_data_plugin.groovy          | 94 +++++++++++++++++++---
 .../test_cloud_follower_show_data.groovy           | 11 ++-
 .../test_cloud_mtmv_show_data.groovy               | 62 ++++----------
 ...ema_change_add_and_drop_column_show_data.groovy | 24 +++---
 ...hema_change_add_and_drop_index_show_data.groovy | 20 +++--
 ...d_schema_change_reorder_column_show_data.groovy | 15 ++--
 .../test_cloud_delete_table_rows_show_data.groovy  | 27 ++++---
 ...oud_drop_and_recover_partition_show_data.groovy | 40 +++++----
 .../test_cloud_drop_table_show_data.groovy         | 44 +++++-----
 ... => test_cloud_truncate_table_show_data.groovy} | 53 ++++--------
 .../test_cloud_disable_compaction_show_data.groovy | 11 ++-
 .../test_cloud_inverted_index_v1_show_data.groovy  | 11 ++-
 .../test_cloud_inverted_index_v2_show_data.groovy  | 11 ++-
 .../test_cloud_lz4_show_data.groovy                | 11 ++-
 .../test_cloud_zstd_show_data.groovy               | 11 ++-
 .../test_cloud_agg_show_data.groovy                | 11 ++-
 .../test_cloud_dup_show_data.groovy                | 16 ++--
 .../test_cloud_mor_show_data.groovy                | 11 ++-
 .../test_cloud_mow_partial_update_show_data.groovy | 13 ++-
 .../test_cloud_mow_show_data.groovy                | 11 ++-
 23 files changed, 313 insertions(+), 210 deletions(-)

diff --git a/cloud/src/common/config.h b/cloud/src/common/config.h
index ac4064c8d92..acd60da5027 100644
--- a/cloud/src/common/config.h
+++ b/cloud/src/common/config.h
@@ -62,7 +62,7 @@ CONF_String(custom_conf_path, "./conf/doris_cloud.conf");
 CONF_mInt64(recycle_interval_seconds, "3600");
 CONF_mInt64(retention_seconds, "259200"); // 72h, global retention time
 CONF_Int32(recycle_concurrency, "16");
-CONF_Int32(recycle_job_lease_expired_ms, "60000");
+CONF_mInt32(recycle_job_lease_expired_ms, "60000");
 CONF_mInt64(compacted_rowset_retention_seconds, "1800");   // 0.5h
 CONF_mInt64(dropped_index_retention_seconds, "10800");     // 3h
 CONF_mInt64(dropped_partition_retention_seconds, "10800"); // 3h
@@ -110,7 +110,7 @@ CONF_String(test_hdfs_fs_name, "");
 // CONF_Bool(b, "true");
 
 // txn config
-CONF_Int32(label_keep_max_second, "259200"); //3 * 24 * 3600 seconds
+CONF_mInt32(label_keep_max_second, "259200"); //3 * 24 * 3600 seconds
 CONF_Int32(expired_txn_scan_key_nums, "1000");
 
 // Maximum number of version of a tablet. If the version num of a tablet 
exceed limit,
@@ -133,7 +133,7 @@ CONF_String(specific_max_qps_limit, 
"get_cluster:5000000;begin_txn:5000000");
 CONF_Bool(enable_rate_limit, "true");
 CONF_Int64(bvar_qps_update_second, "5");
 
-CONF_Int32(copy_job_max_retention_second, "259200"); //3 * 24 * 3600 seconds
+CONF_mInt32(copy_job_max_retention_second, "259200"); //3 * 24 * 3600 seconds
 CONF_String(arn_id, "");
 CONF_String(arn_ak, "");
 CONF_String(arn_sk, "");
diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/Config.java 
b/fe/fe-common/src/main/java/org/apache/doris/common/Config.java
index 7ddf6ed2925..c4f0a7f05dc 100644
--- a/fe/fe-common/src/main/java/org/apache/doris/common/Config.java
+++ b/fe/fe-common/src/main/java/org/apache/doris/common/Config.java
@@ -930,7 +930,8 @@ public class Config extends ConfigBase {
 
     // update interval of tablet stat
     // All frontends will get tablet stat from all backends at each interval
-    @ConfField public static int tablet_stat_update_interval_second = 60;  // 
1 min
+    @ConfField(mutable = true)
+    public static int tablet_stat_update_interval_second = 60;  // 1 min
 
     /**
      * Max bytes a broker scanner can process in one broker load job.
diff --git a/regression-test/plugins/aliyun_oss_sdk.groovy 
b/regression-test/plugins/aliyun_oss_sdk.groovy
index efd6efa585b..6b0c096d7e5 100644
--- a/regression-test/plugins/aliyun_oss_sdk.groovy
+++ b/regression-test/plugins/aliyun_oss_sdk.groovy
@@ -97,6 +97,7 @@ Suite.metaClass.calculateFolderLength = { OSS client, String 
bucketName, String
     ObjectListing objectListing = null;
     do {
         // The default value for MaxKey is 100, and the maximum value is 1000
+        logger.info("debug:" + folder)
         ListObjectsRequest request = new 
ListObjectsRequest(bucketName).withPrefix(folder).withMaxKeys(1000);
         if (objectListing != null) {
             request.setMarker(objectListing.getNextMarker());
@@ -104,6 +105,12 @@ Suite.metaClass.calculateFolderLength = { OSS client, 
String bucketName, String
         objectListing = client.listObjects(request);
         List<OSSObjectSummary> sums = objectListing.getObjectSummaries();
         for (OSSObjectSummary s : sums) {
+            logger.info("Object Key: ${s.getKey()}")
+            logger.info("Size: ${s.getSize()} bytes")
+            logger.info("Last Modified: ${s.getLastModified()}")
+            logger.info("Storage Class: ${s.getStorageClass()}")
+            logger.info("Owner: ${s.getOwner()?.getId()}")
+            logger.info("-------------------")
             size += s.getSize();
         }
     } while (objectListing.isTruncated());
diff --git a/regression-test/plugins/cloud_show_data_plugin.groovy 
b/regression-test/plugins/cloud_show_data_plugin.groovy
index 43dc6fd3834..54375c955d4 100644
--- a/regression-test/plugins/cloud_show_data_plugin.groovy
+++ b/regression-test/plugins/cloud_show_data_plugin.groovy
@@ -69,7 +69,8 @@ import org.codehaus.groovy.runtime.IOGroovyMethods
     }
 
     Suite.metaClass.get_tablets_from_table = { String table ->
-        def res = sql_return_maparray """show tablets from  ${table}"""
+        def res = sql_return_maparray """show tablets from ${table}"""
+        logger.info("get tablets from ${table}:" + res)
         return res 
     }
 
@@ -120,10 +121,10 @@ import org.codehaus.groovy.runtime.IOGroovyMethods
                 if (tabletStatusAfterCompaction.rowsets.size() < 
tabletStatusBeforeCompaction.rowsets.size()){
                     compactionStatus = 'FINISHED'
                 }
-                Thread.sleep(60 * 1000)
-            } while (timeoutTimestamp > System.currentTimeMillis() && (status 
!= 'FINISHED'))
+                Thread.sleep(10 * 1000)
+            } while (timeoutTimestamp > System.currentTimeMillis() && 
(compactionStatus != 'FINISHED'))
 
-            if (status != "FINISHED") {
+            if (compactionStatus != "FINISHED") {
                 logger.info("compaction not Finish or failed")
                 return false
             }
@@ -132,8 +133,6 @@ import org.codehaus.groovy.runtime.IOGroovyMethods
 
     Suite.metaClass.trigger_compaction = { List<List<Object>> tablets ->
         for(def tablet: tablets) {
-            trigger_tablet_compaction(tablet, "cumulative")
-            trigger_tablet_compaction(tablet, "base")
             trigger_tablet_compaction(tablet, "full")
         }
     }
@@ -157,7 +156,7 @@ import org.codehaus.groovy.runtime.IOGroovyMethods
 
             def client = initOssClient(ak, sk, endpoint)
             for(String tabletId: tabletIds) {
-                storageSize += calculateFolderLength(client, bucketName, 
storagePrefix + "/data/" + tabletId)
+                storageSize += calculateFolderLength(client, bucketName, 
storagePrefix + "data/" + tabletId)
             }
             shutDownOssClient(client)
         }
@@ -168,8 +167,8 @@ import org.codehaus.groovy.runtime.IOGroovyMethods
             def fsUser = context.config.otherConfigs.get("cbsFsUser")
             def storagePrefix = context.config.otherConfigs.get("cbsFsPrefix")
         }
-
-        return storageSize
+        def round_size = new BigDecimal(storageSize/1024/1024).setScale(0, 
BigDecimal.ROUND_FLOOR);
+        return round_size
     }
 
     Suite.metaClass.translate_different_unit_to_MB = { String size, String 
unitField ->
@@ -196,7 +195,8 @@ import org.codehaus.groovy.runtime.IOGroovyMethods
             def unitField = fields[1]
             mysqlShowDataSize = translate_different_unit_to_MB(sizeField, 
unitField)
         }
-        return mysqlShowDataSize
+        def round_size = new BigDecimal(mysqlShowDataSize).setScale(0, 
BigDecimal.ROUND_FLOOR);
+        return round_size
     }
 
     Suite.metaClass.caculate_table_data_size_through_api = { 
List<List<Object>> tablets ->
@@ -214,7 +214,79 @@ import org.codehaus.groovy.runtime.IOGroovyMethods
                 }
             }
         }
+        def round_size = new BigDecimal(apiCaculateSize).setScale(0, 
BigDecimal.ROUND_FLOOR);
+        return round_size
+    }
+
+    Suite.metaClass.update_ms_config = { String ms_endpoint, String key, 
String value /*param */ ->
+        return curl("POST", 
String.format("http://%s/MetaService/http/v1/update_config?%s=%s";, ms_endpoint, 
key, value))
+    }
+
+    Suite.metaClass.set_config_before_show_data_test = { ->
+
+        sql """admin set frontend config ("tablet_stat_update_interval_second" 
= "1")"""
+        sql """admin set frontend config ("catalog_trash_expire_second" = 
"1")"""
+
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        getBackendIpHttpPort(backendId_to_backendIP, 
backendId_to_backendHttpPort);
+
+        def get_be_param = { paramName ->
+            // assuming paramName on all BEs have save value
+            def (code, out, err) = 
show_be_config(backendIdToBackendIP.get(backendId), 
backendIdToBackendHttpPort.get(backendId))
+            assertEquals(code, 0)
+            def configList = parseJson(out.trim())
+            assert configList instanceof List
+            for (Object ele in (List) configList) {
+                assert ele instanceof List<String>
+                if (((List<String>) ele)[0] == paramName) {
+                    return ((List<String>) ele)[2]
+                }
+            }
+        }
+
+        def ms_endpoint = get_be_param("meta_service_endpoint");
+
+        update_ms_config.call(ms_endpoint, "recycle_interval_seconds", "5")
+        update_ms_config.call(ms_endpoint, "retention_seconds", "0")
+        update_ms_config.call(ms_endpoint, 
"compacted_rowset_retention_seconds", "0")
+        update_ms_config.call(ms_endpoint, "recycle_job_lease_expired_ms", "0")
+        update_ms_config.call(ms_endpoint, 
"dropped_partition_retention_seconds", "0")
+        update_ms_config.call(ms_endpoint, "label_keep_max_second", "0")
+        update_ms_config.call(ms_endpoint, "copy_job_max_retention_second", 
"0")
+    }
+
+    Suite.metaClass.set_config_after_show_data_test = { ->
+
+        sql """admin set frontend config ("tablet_stat_update_interval_second" 
= "10")"""
+        sql """admin set frontend config ("catalog_trash_expire_second" = 
"600")"""
+
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        getBackendIpHttpPort(backendId_to_backendIP, 
backendId_to_backendHttpPort);
+
+        def get_be_param = { paramName ->
+            // assuming paramName on all BEs have save value
+            def (code, out, err) = 
show_be_config(backendIdToBackendIP.get(backendId), 
backendIdToBackendHttpPort.get(backendId))
+            assertEquals(code, 0)
+            def configList = parseJson(out.trim())
+            assert configList instanceof List
+            for (Object ele in (List) configList) {
+                assert ele instanceof List<String>
+                if (((List<String>) ele)[0] == paramName) {
+                    return ((List<String>) ele)[2]
+                }
+            }
+        }
+
+        def ms_endpoint = get_be_param("meta_service_endpoint");
 
-        return apiCaculateSize
+        update_ms_config.call(ms_endpoint, "recycle_interval_seconds", "600")
+        update_ms_config.call(ms_endpoint, "retention_seconds", "259200")
+        update_ms_config.call(ms_endpoint, 
"compacted_rowset_retention_seconds", "1800")
+        update_ms_config.call(ms_endpoint, "recycle_job_lease_expired_ms", 
"60000")
+        update_ms_config.call(ms_endpoint, 
"dropped_partition_retention_seconds", "10800")
+        update_ms_config.call(ms_endpoint, "label_keep_max_second", "300")
+        update_ms_config.call(ms_endpoint, "copy_job_max_retention_second", 
"259200")
     }
 
//http://qa-build.oss-cn-beijing.aliyuncs.com/regression/show_data/fullData.1.part1.gz
diff --git 
a/regression-test/suites/show_data_p2/test_cloud_follower_show_data.groovy 
b/regression-test/suites/show_data_p2/test_cloud_follower_show_data.groovy
index f748cb740b4..671191a963d 100644
--- a/regression-test/suites/show_data_p2/test_cloud_follower_show_data.groovy
+++ b/regression-test/suites/show_data_p2/test_cloud_follower_show_data.groovy
@@ -21,7 +21,7 @@
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
  // loading one data 10 times, expect data size not rising
-suite("test_cloud_follower_show_data","p2") {
+suite("test_cloud_follower_show_data","p2, nonConcurrent") {
     //cloud-mode
     if (!isCloudMode()) {
         logger.info("not cloud mode, not run")
@@ -72,13 +72,13 @@ suite("test_cloud_follower_show_data","p2") {
             trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-            sleep(60 * 1000)
             logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
         }
 
@@ -86,6 +86,7 @@ suite("test_cloud_follower_show_data","p2") {
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0])
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0])
         // expect load 1 times ==  load 10 times
+        logger.info("after 1 time stream load, size is 
${sizeRecords["mysqlSize"][0]}, after 10 times stream load, size is 
${sizeRecords["mysqlSize"][1]}")
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1])
         assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
         assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
@@ -121,5 +122,9 @@ suite("test_cloud_follower_show_data","p2") {
         check(tableName)
     }
 
+    set_config_before_show_data_test()
+    sleep(10 * 1000)
     main()
+    set_config_after_show_data_test()
+    sleep(10 * 1000)
 }
diff --git 
a/regression-test/suites/show_data_p2/test_table_modification/test_cloud_mtmv_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_modification/test_cloud_mtmv_show_data.groovy
index cc4fd289296..637aa463d45 100644
--- 
a/regression-test/suites/show_data_p2/test_table_modification/test_cloud_mtmv_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_modification/test_cloud_mtmv_show_data.groovy
@@ -21,7 +21,7 @@
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
  // loading one data 10 times, expect data size not rising
-suite("test_cloud_mtmv_show_data","p2") {
+suite("test_cloud_mtmv_show_data","p2, nonConcurrent") {
     //cloud-mode
     if (!isCloudMode()) {
         logger.info("not cloud mode, not run")
@@ -98,13 +98,13 @@ suite("test_cloud_mtmv_show_data","p2") {
             trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-            sleep(60 * 1000)
             logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
         }
 
@@ -124,75 +124,37 @@ suite("test_cloud_mtmv_show_data","p2") {
             trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
-
-            sql "select count(*) from ${tableName}"
-
-            
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
-            
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
-            
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-
-
-            // expect mysqlSize == apiSize == storageSize
-            assertEquals(sizeRecords["mysqlSize"][2], 
sizeRecords["apiSize"][2])
-            assertEquals(sizeRecords["mysqlSize"][2], 
sizeRecords["cbsSize"][2])
-
-            // 加一下触发compaction的机制
-            trigger_compaction(tablets)
-
-            // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
-
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-
-
-            // expect mysqlSize == apiSize == storageSize
-            assertEquals(sizeRecords["mysqlSize"][3], 
sizeRecords["apiSize"][3])
-            assertEquals(sizeRecords["mysqlSize"][3], 
sizeRecords["cbsSize"][3])
+            logger.info("after create mv, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
         }
 
         if (op == 2){
             create_mtmv(tableName)
-            tableName = ${tableName} + "_mtmv"
+            tableName = "${tableName}" + "_mtmv"
             tablets = get_tablets_from_table(tableName)
 
             // 加一下触发compaction的机制
             trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
-
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-
+            logger.info("after create mtmv, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
 
             // expect mysqlSize == apiSize == storageSize
             assertEquals(sizeRecords["mysqlSize"][2], 
sizeRecords["apiSize"][2])
             assertEquals(sizeRecords["mysqlSize"][2], 
sizeRecords["cbsSize"][2])
-
-            // 加一下触发compaction的机制
-            trigger_compaction(tablets)
-
-            // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
-
-            sql "select count(*) from ${tableName}"
-
-            
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
-            
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
-            
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-
-
-            // expect mysqlSize == apiSize == storageSize
-            assertEquals(sizeRecords["mysqlSize"][3], 
sizeRecords["apiSize"][3])
-            assertEquals(sizeRecords["mysqlSize"][3], 
sizeRecords["cbsSize"][3])
         }
     }
 
@@ -205,5 +167,9 @@ suite("test_cloud_mtmv_show_data","p2") {
         check(tableName, 2)
     }
 
+    set_config_before_show_data_test()
+    sleep(10 * 1000)
     main()
+    set_config_after_show_data_test()
+    sleep(10 * 1000)
 }
diff --git 
a/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_add_and_drop_column_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_add_and_drop_column_show_data.groovy
index 51d4f3936c3..a2c49850b40 100644
--- 
a/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_add_and_drop_column_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_add_and_drop_column_show_data.groovy
@@ -21,7 +21,7 @@
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
  // loading one data 10 times, expect data size not rising
-suite("test_cloud_schema_change_add_and_drop_column_show_data","p2") {
+suite("test_cloud_schema_change_add_and_drop_column_show_data","p2, 
nonConcurrent") {
     //cloud-mode
     if (!isCloudMode()) {
         logger.info("not cloud mode, not run")
@@ -126,13 +126,13 @@ 
suite("test_cloud_schema_change_add_and_drop_column_show_data","p2") {
             trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-            sleep(60 * 1000)
             logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
         }
 
@@ -152,14 +152,15 @@ 
suite("test_cloud_schema_change_add_and_drop_column_show_data","p2") {
         trigger_compaction(tablets)
 
         // 然后 sleep 1min, 等fe汇报完
-        sleep(60 * 1000)
-
+        sleep(10 * 1000)
         sql "select count(*) from ${tableName}"
+        sleep(10 * 1000)
+        tablets = get_tablets_from_table(tableName)
 
         
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
         
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
         
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-
+        logger.info("after add column, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
 
         // expect mysqlSize == apiSize == storageSize
         assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["apiSize"][2])
@@ -171,14 +172,15 @@ 
suite("test_cloud_schema_change_add_and_drop_column_show_data","p2") {
         trigger_compaction(tablets)
 
         // 然后 sleep 1min, 等fe汇报完
-        sleep(60 * 1000)
-
+        sleep(10 * 1000)
         sql "select count(*) from ${tableName}"
+        sleep(10 * 1000)
+        tablets = get_tablets_from_table(tableName)
 
         
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
         
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
         
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-
+        logger.info("after drop column, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
 
         // expect mysqlSize == apiSize == storageSize
         assertEquals(sizeRecords["mysqlSize"][3], sizeRecords["apiSize"][3])
@@ -194,5 +196,9 @@ 
suite("test_cloud_schema_change_add_and_drop_column_show_data","p2") {
         check(tableName)
     }
 
+    set_config_before_show_data_test()
+    sleep(10 * 1000)
     main()
+    set_config_after_show_data_test()
+    sleep(10 * 1000)
 }
diff --git 
a/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_add_and_drop_index_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_add_and_drop_index_show_data.groovy
index 328b73cb60d..c5d94213f9e 100644
--- 
a/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_add_and_drop_index_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_add_and_drop_index_show_data.groovy
@@ -21,7 +21,7 @@
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
  // loading one data 10 times, expect data size not rising
-suite("test_cloud_schema_change_add_and_drop_index_show_data","p2") {
+suite("test_cloud_schema_change_add_and_drop_index_show_data","p2, 
nonConcurrent") {
     //cloud-mode
     if (!isCloudMode()) {
         logger.info("not cloud mode, not run")
@@ -126,13 +126,13 @@ 
suite("test_cloud_schema_change_add_and_drop_index_show_data","p2") {
             trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-            sleep(60 * 1000)
             logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
         }
 
@@ -152,13 +152,14 @@ 
suite("test_cloud_schema_change_add_and_drop_index_show_data","p2") {
         trigger_compaction(tablets)
 
         // 然后 sleep 1min, 等fe汇报完
-        sleep(60 * 1000)
-
+        sleep(10 * 1000)
         sql "select count(*) from ${tableName}"
+        sleep(10 * 1000)
 
         
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
         
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
         
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
+        logger.info("after add index, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
 
 
         // expect mysqlSize == apiSize == storageSize
@@ -173,13 +174,14 @@ 
suite("test_cloud_schema_change_add_and_drop_index_show_data","p2") {
         trigger_compaction(tablets)
 
         // 然后 sleep 1min, 等fe汇报完
-        sleep(60 * 1000)
-
+        sleep(10 * 1000)
         sql "select count(*) from ${tableName}"
+        sleep(10 * 1000)
 
         
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
         
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
         
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
+        logger.info("after  drop index, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
 
 
         // expect mysqlSize == apiSize == storageSize
@@ -196,5 +198,9 @@ 
suite("test_cloud_schema_change_add_and_drop_index_show_data","p2") {
         check(tableName)
     }
 
+    set_config_before_show_data_test()
+    sleep(10 * 1000)
     main()
+    set_config_after_show_data_test()
+    sleep(10 * 1000)
 }
diff --git 
a/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_reorder_column_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_reorder_column_show_data.groovy
index 507d578bbcb..55c87ba56c4 100644
--- 
a/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_reorder_column_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_modification/test_cloud_schema_change_reorder_column_show_data.groovy
@@ -21,7 +21,7 @@
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
  // loading one data 10 times, expect data size not rising
-suite("test_cloud_schema_change_reorder_column_show_data","p2") {
+suite("test_cloud_schema_change_reorder_column_show_data","p2, nonConcurrent") 
{
     //cloud-mode
     if (!isCloudMode()) {
         logger.info("not cloud mode, not run")
@@ -115,13 +115,13 @@ 
suite("test_cloud_schema_change_reorder_column_show_data","p2") {
             trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-            sleep(60 * 1000)
             logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
         }
 
@@ -140,13 +140,14 @@ 
suite("test_cloud_schema_change_reorder_column_show_data","p2") {
         trigger_compaction(tablets)
 
         // 然后 sleep 1min, 等fe汇报完
-        sleep(60 * 1000)
-
+        sleep(10 * 1000)
         sql "select count(*) from ${tableName}"
+        sleep(10 * 1000)
 
         
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
         
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
         
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
+        logger.info("after reorder column, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
 
 
         // expect mysqlSize == apiSize == storageSize
@@ -163,5 +164,9 @@ 
suite("test_cloud_schema_change_reorder_column_show_data","p2") {
         check(tableName)
     }
 
+    set_config_before_show_data_test()
+    sleep(10 * 1000)
     main()
+    set_config_after_show_data_test()
+    sleep(10 * 1000)
 }
diff --git 
a/regression-test/suites/show_data_p2/test_table_operation/test_cloud_delete_table_rows_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_operation/test_cloud_delete_table_rows_show_data.groovy
index 792cc1d2b4d..9cef5a53712 100644
--- 
a/regression-test/suites/show_data_p2/test_table_operation/test_cloud_delete_table_rows_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_operation/test_cloud_delete_table_rows_show_data.groovy
@@ -21,7 +21,7 @@
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
  // loading one data 10 times, expect data size not rising
-suite("test_cloud_delete_table_rows_show_data","p2") {
+suite("test_cloud_delete_table_rows_show_data","p2, nonConcurrent") {
     //cloud-mode
     if (!isCloudMode()) {
         logger.info("not cloud mode, not run")
@@ -84,13 +84,11 @@ suite("test_cloud_delete_table_rows_show_data","p2") {
             PARTITION BY RANGE(L_ORDERKEY) 
             (
               PARTITION p1 VALUES LESS THAN (100000),                          
                                                                                
                                          
-              PARTITION p2 VALUES LESS THAN (200000),
               PARTITION p3 VALUES LESS THAN (300000),
-              PARTITION p4 VALUES LESS THAN (400000),
               PARTITION p5 VALUES LESS THAN (500000),
               PARTITION other VALUES LESS THAN (MAXVALUE)
             )
-            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3
+            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 1
             PROPERTIES (
               "replication_num" = "1"
             )
@@ -125,7 +123,7 @@ suite("test_cloud_delete_table_rows_show_data","p2") {
             AUTO PARTITION BY RANGE (date_trunc(`L_SHIPDATE`, 'year'))
             (
             )
-            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3
+            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 1
             PROPERTIES (
               "replication_num" = "1"
             )
@@ -141,17 +139,18 @@ suite("test_cloud_delete_table_rows_show_data","p2") {
             repeate_stream_load_same_data(tableName, i, 
"regression/tpch/sf0.1/lineitem.tbl.gz")
             def rows = sql_return_maparray "select count(*) as count from 
${tableName};"
             logger.info("table ${tableName} has ${rows[0]["count"]} rows")
+            tablets = get_tablets_from_table(tableName)
             // 加一下触发compaction的机制
             trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-            sleep(60 * 1000)
             logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
         }
 
@@ -164,18 +163,20 @@ suite("test_cloud_delete_table_rows_show_data","p2") {
         assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
 
         sql """delete from ${tableName} where L_ORDERKEY >=0;"""
-        
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
-        
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
-        
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
 
         // 加一下触发compaction的机制
         trigger_compaction(tablets)
 
         // 然后 sleep 1min, 等fe汇报完
-        sleep(60 * 1000)
+        sleep(10 * 1000)
         sql "select count(*) from ${tableName}"
+        sleep(10 * 1000)
 
+        
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
+        
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
+        
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
         // expect mysqlSize == apiSize == storageSize
+        logger.info("after delete, mysqlSize is: 
${sizeRecords["mysqlSize"][2]}, apiSize is: ${sizeRecords["apiSize"][2]}, 
storageSize is: ${sizeRecords["cbsSize"][2]}")
         assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["apiSize"][2])
         assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["cbsSize"][2])
     }
@@ -192,5 +193,9 @@ suite("test_cloud_delete_table_rows_show_data","p2") {
         check(tableName)
     }
 
+    set_config_before_show_data_test()
+    sleep(10 * 1000)
     main()
+    set_config_after_show_data_test()
+    sleep(10 * 1000)
 }
diff --git 
a/regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_and_recover_partition_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_and_recover_partition_show_data.groovy
index 672c0f78e39..b067ad5d9ad 100644
--- 
a/regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_and_recover_partition_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_and_recover_partition_show_data.groovy
@@ -21,7 +21,7 @@
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
  // loading one data 10 times, expect data size not rising
-suite("test_cloud_drop_and_recover_partition_show_data","p2") {
+suite("test_cloud_drop_and_recover_partition_show_data","p2, nonConcurrent") {
     //cloud-mode
     if (!isCloudMode()) {
         logger.info("not cloud mode, not run")
@@ -84,13 +84,11 @@ 
suite("test_cloud_drop_and_recover_partition_show_data","p2") {
             PARTITION BY RANGE(L_ORDERKEY) 
             (
               PARTITION p1 VALUES LESS THAN (100000),                          
                                                                                
                                          
-              PARTITION p2 VALUES LESS THAN (200000),
               PARTITION p3 VALUES LESS THAN (300000),
-              PARTITION p4 VALUES LESS THAN (400000),
               PARTITION p5 VALUES LESS THAN (500000),
               PARTITION other VALUES LESS THAN (MAXVALUE)
             )
-            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3
+            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 1
             PROPERTIES (
               "replication_num" = "1"
             )
@@ -125,7 +123,7 @@ 
suite("test_cloud_drop_and_recover_partition_show_data","p2") {
             AUTO PARTITION BY RANGE (date_trunc(`L_SHIPDATE`, 'year'))
             (
             )
-            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 3
+            DISTRIBUTED BY HASH(L_ORDERKEY) BUCKETS 1
             PROPERTIES (
               "replication_num" = "1"
             )
@@ -141,17 +139,18 @@ 
suite("test_cloud_drop_and_recover_partition_show_data","p2") {
             repeate_stream_load_same_data(tableName, i, 
"regression/tpch/sf0.1/lineitem.tbl.gz")
             def rows = sql_return_maparray "select count(*) as count from 
${tableName};"
             logger.info("table ${tableName} has ${rows[0]["count"]} rows")
+            tablets = get_tablets_from_table(tableName)
             // 加一下触发compaction的机制
             trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-            sleep(60 * 1000)
             logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
         }
 
@@ -171,16 +170,18 @@ 
suite("test_cloud_drop_and_recover_partition_show_data","p2") {
 
         // after drop partition,tablets will changed,need get new tablets
         tablets = get_tablets_from_table(tableName)
-        
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
-        
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
-        
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
 
         // 加一下触发compaction的机制
         trigger_compaction(tablets)
 
         // 然后 sleep 1min, 等fe汇报完
-        sleep(60 * 1000)
+        sleep(10 * 1000)
         sql "select count(*) from ${tableName}"
+        sleep(10 * 1000)
+        
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
+        
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
+        
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
+        logger.info("after drop partition, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
 
         // expect mysqlSize == apiSize == storageSize
         assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["apiSize"][2])
@@ -189,21 +190,24 @@ 
suite("test_cloud_drop_and_recover_partition_show_data","p2") {
         if (op == 1){
           sql """recover partition p1 from ${tableName};"""
         } else if(op == 2){
-          sql """recover partition pp19920101000000 from ${tableName};"""
+          sql """recover partition p19920101000000 from ${tableName};"""
         }
 
         // after drop partition,tablets will changed,need get new tablets
         tablets = get_tablets_from_table(tableName)
-        
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
-        
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
-        
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
 
         // 加一下触发compaction的机制
         trigger_compaction(tablets)
 
         // 然后 sleep 1min, 等fe汇报完
-        sleep(60 * 1000)
+        sleep(10 * 1000)
         sql "select count(*) from ${tableName}"
+        sleep(10 * 1000)
+
+        
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
+        
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
+        
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
+        logger.info("after recover partition, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
 
         // expect mysqlSize == apiSize == storageSize
         assertEquals(sizeRecords["mysqlSize"][3], sizeRecords["apiSize"][3])
@@ -222,5 +226,9 @@ 
suite("test_cloud_drop_and_recover_partition_show_data","p2") {
         check(tableName, 2)
     }
 
+    set_config_before_show_data_test()
+    sleep(10 * 1000)
     main()
+    set_config_after_show_data_test()
+    sleep(10 * 1000)
 }
diff --git 
a/regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_table_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_table_show_data.groovy
index c8daaaa69be..d80295d802f 100644
--- 
a/regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_table_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_table_show_data.groovy
@@ -21,7 +21,7 @@
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
  // loading one data 10 times, expect data size not rising
-suite("test_cloud_drop_and_recover_table_show_data","p2") {
+suite("test_cloud_drop_and_recover_table_show_data","p2, nonConcurrent") {
     //cloud-mode
     if (!isCloudMode()) {
         logger.info("not cloud mode, not run")
@@ -71,13 +71,13 @@ suite("test_cloud_drop_and_recover_table_show_data","p2") {
             trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-            sleep(60 * 1000)
             logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
         }
 
@@ -93,20 +93,21 @@ suite("test_cloud_drop_and_recover_table_show_data","p2") {
 
             sql """drop table ${tableName}"""
 
-            sleep(60 * 1000)
+            sleep(10 * 1000)
 
             sql """recover table ${tableName}"""
-            
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
-            
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
-            
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-
-            tablets = get_tablets_from_table(tableName)
             // 加一下触发compaction的机制
             trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
+            tablets = get_tablets_from_table(tableName)
+            
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
+            
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
+            
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
+            logger.info("after recover table, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
 
             // expect mysqlSize == apiSize == storageSize
             assertEquals(sizeRecords["mysqlSize"][2], 
sizeRecords["apiSize"][2])
@@ -117,35 +118,30 @@ suite("test_cloud_drop_and_recover_table_show_data","p2") 
{
         if(op == 2){
 
             sql """drop table ${tableName} force"""
-            
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
-            
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
-            
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-
-            tablets = get_tablets_from_table(tableName)
-            // 加一下触发compaction的机制
-            trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
             sleep(60 * 1000)
-            sql "select count(*) from ${tableName}"
+            
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
+            logger.info("after drop table force, storageSize is: 
${sizeRecords["cbsSize"][-1]}")
 
-            // expect mysqlSize == apiSize == storageSize
-            assertEquals(sizeRecords["mysqlSize"][2], 
sizeRecords["apiSize"][2])
-            assertEquals(sizeRecords["mysqlSize"][2], 
sizeRecords["cbsSize"][2])
-            assertEquals(sizeRecords["mysqlSize"][2], 0)
+            assertEquals(sizeRecords["cbsSize"][2], 0.0)
 
         }
     }
 
     def main = {
         def tableName = "test_cloud_drop_and_recover_table_show_data"
-        create_normal_table(tableName) 
-        check(tableName, 1)
+        //create_normal_table(tableName) 
+        //check(tableName, 1)
 
         tableName = "test_cloud_drop_and_recover_table_force_show_data"
         create_normal_table(tableName) 
         check(tableName, 2)
     }
 
+    set_config_before_show_data_test()
+    sleep(10 * 1000)
     main()
+    set_config_after_show_data_test()
+    sleep(10 * 1000)
 }
diff --git 
a/regression-test/suites/show_data_p2/test_table_operation/test_cloud_truncate_and_recover_table_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_operation/test_cloud_truncate_table_show_data.groovy
similarity index 77%
rename from 
regression-test/suites/show_data_p2/test_table_operation/test_cloud_truncate_and_recover_table_show_data.groovy
rename to 
regression-test/suites/show_data_p2/test_table_operation/test_cloud_truncate_table_show_data.groovy
index c06a402ce94..e435b6bb180 100644
--- 
a/regression-test/suites/show_data_p2/test_table_operation/test_cloud_truncate_and_recover_table_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_operation/test_cloud_truncate_table_show_data.groovy
@@ -21,7 +21,7 @@
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
  // loading one data 10 times, expect data size not rising
-suite("test_cloud_truncate_and_recover_table_show_data","p2") {
+suite("test_cloud_truncate_table_show_data","p2, nonConcurrent") {
     //cloud-mode
     if (!isCloudMode()) {
         logger.info("not cloud mode, not run")
@@ -58,7 +58,7 @@ suite("test_cloud_truncate_and_recover_table_show_data","p2") 
{
         """
     }
 
-    def check = {String tableName, int op -> 
+    def check = {String tableName-> 
         List<String> tablets = get_tablets_from_table(tableName)
         def loadTimes = [1, 10]
         Map<String, List> sizeRecords = ["apiSize":[], "mysqlSize":[], 
"cbsSize":[]]
@@ -71,13 +71,13 @@ 
suite("test_cloud_truncate_and_recover_table_show_data","p2") {
             trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
         }
 
@@ -89,61 +89,36 @@ 
suite("test_cloud_truncate_and_recover_table_show_data","p2") {
         assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
         assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
 
-        if(op == 1){
-
         sql """truncate table ${tableName}"""
 
-        sleep(60 * 1000)
-
-        sql """recover table ${tableName}"""
-        
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
-        
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
-        
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-
         // 加一下触发compaction的机制
         trigger_compaction(tablets)
 
         // 然后 sleep 1min, 等fe汇报完
-        sleep(60 * 1000)
+        sleep(10 * 1000)
         sql "select count(*) from ${tableName}"
+        sleep(10 * 1000)
+        tablets = get_tablets_from_table(tableName)
 
-        // expect mysqlSize == apiSize == storageSize
-        assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["apiSize"][2])
-        assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["cbsSize"][2])
-        assertEquals(sizeRecords["mysqlSize"][1], sizeRecords["apiSize"][2])
-        }
-
-        if(op == 2){
-
-        sql """truncate table ${tableName} force"""
         
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
         
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
         
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-
-        // 加一下触发compaction的机制
-        trigger_compaction(tablets)
-
-        // 然后 sleep 1min, 等fe汇报完
-        sleep(60 * 1000)
-        sql "select count(*) from ${tableName}"
+        logger.info("after truncate table, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
 
         // expect mysqlSize == apiSize == storageSize
         assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["apiSize"][2])
         assertEquals(sizeRecords["mysqlSize"][2], sizeRecords["cbsSize"][2])
-        assertEquals(sizeRecords["mysqlSize"][2], 0)
-
-        }
     }
 
     def main = {
-        def tableName = "test_cloud_truncate_and_recover_table_show_data"
-        create_normal_table(tableName) 
-        check(tableName, 1)
-
-        tableName = "test_cloud_truncate_and_recover_table_force_show_data"
+        def tableName = "test_cloud_truncate_table_show_data"
         create_normal_table(tableName) 
-        check(tableName, 2)
+        check(tableName)
     }
 
+    set_config_before_show_data_test()
+    sleep(10 * 1000)
     main()
+    set_config_after_show_data_test()
+    sleep(10 * 1000)
 }
diff --git 
a/regression-test/suites/show_data_p2/test_table_property/test_cloud_disable_compaction_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_property/test_cloud_disable_compaction_show_data.groovy
index 452d25a4739..f04c6613334 100644
--- 
a/regression-test/suites/show_data_p2/test_table_property/test_cloud_disable_compaction_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_property/test_cloud_disable_compaction_show_data.groovy
@@ -21,7 +21,7 @@
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
  // loading one data 10 times, expect data size not rising
-suite("test_cloud_disable_compaction_show_data","p2") {
+suite("test_cloud_disable_compaction_show_data","p2, nonConcurrent") {
     //cloud-mode
     if (!isCloudMode()) {
         logger.info("not cloud mode, not run")
@@ -68,13 +68,13 @@ suite("test_cloud_disable_compaction_show_data","p2") {
             logger.info("table ${tableName} has ${rows[0]["count"]} rows")
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-            sleep(60 * 1000)
             logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
         }
 
@@ -82,10 +82,15 @@ suite("test_cloud_disable_compaction_show_data","p2") {
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0])
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0])
         // expect load 1 times ==  load 10 times
+        logger.info("after 1 time stream load, size is 
${sizeRecords["mysqlSize"][0]}, after 10 times stream load, size is 
${sizeRecords["mysqlSize"][1]}")
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1])
         assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
         assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
     }
 
+    set_config_before_show_data_test()
+    sleep(10 * 1000)
     main()
+    set_config_after_show_data_test()
+    sleep(10 * 1000)
 }
diff --git 
a/regression-test/suites/show_data_p2/test_table_property/test_cloud_inverted_index_v1_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_property/test_cloud_inverted_index_v1_show_data.groovy
index 0cd12e6a9ff..1077e0436a0 100644
--- 
a/regression-test/suites/show_data_p2/test_table_property/test_cloud_inverted_index_v1_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_property/test_cloud_inverted_index_v1_show_data.groovy
@@ -21,7 +21,7 @@
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
  // loading one data 10 times, expect data size not rising
-suite("test_cloud_inverted_index_v1_show_data","p2") {
+suite("test_cloud_inverted_index_v1_show_data","p2, nonConcurrent") {
     //cloud-mode
     if (!isCloudMode()) {
         logger.info("not cloud mode, not run")
@@ -73,13 +73,13 @@ suite("test_cloud_inverted_index_v1_show_data","p2") {
             trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-            sleep(60 * 1000)
             logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
         }
 
@@ -87,10 +87,15 @@ suite("test_cloud_inverted_index_v1_show_data","p2") {
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0])
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0])
         // expect load 1 times ==  load 10 times
+        logger.info("after 1 time stream load, size is 
${sizeRecords["mysqlSize"][0]}, after 10 times stream load, size is 
${sizeRecords["mysqlSize"][1]}")
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1])
         assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
         assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
     }
 
+    set_config_before_show_data_test()
+    sleep(10 * 1000)
     main()
+    set_config_after_show_data_test()
+    sleep(10 * 1000)
 }
diff --git 
a/regression-test/suites/show_data_p2/test_table_property/test_cloud_inverted_index_v2_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_property/test_cloud_inverted_index_v2_show_data.groovy
index 6670e2067da..db07832bfb1 100644
--- 
a/regression-test/suites/show_data_p2/test_table_property/test_cloud_inverted_index_v2_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_property/test_cloud_inverted_index_v2_show_data.groovy
@@ -21,7 +21,7 @@
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
  // loading one data 10 times, expect data size not rising
-suite("test_cloud_inverted_index_v2_show_data","p2") {
+suite("test_cloud_inverted_index_v2_show_data","p2, nonConcurrent") {
     //cloud-mode
     if (!isCloudMode()) {
         logger.info("not cloud mode, not run")
@@ -73,13 +73,13 @@ suite("test_cloud_inverted_index_v2_show_data","p2") {
             trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-            sleep(60 * 1000)
             logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
         }
 
@@ -87,10 +87,15 @@ suite("test_cloud_inverted_index_v2_show_data","p2") {
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0])
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0])
         // expect load 1 times ==  load 10 times
+        logger.info("after 1 time stream load, size is 
${sizeRecords["mysqlSize"][0]}, after 10 times stream load, size is 
${sizeRecords["mysqlSize"][1]}")
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1])
         assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
         assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
     }
 
+    set_config_before_show_data_test()
+    sleep(10 * 1000)
     main()
+    set_config_after_show_data_test()
+    sleep(10 * 1000)
 }
diff --git 
a/regression-test/suites/show_data_p2/test_table_property/test_cloud_lz4_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_property/test_cloud_lz4_show_data.groovy
index de5464759cc..aab9604f67f 100644
--- 
a/regression-test/suites/show_data_p2/test_table_property/test_cloud_lz4_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_property/test_cloud_lz4_show_data.groovy
@@ -21,7 +21,7 @@
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
  // loading one data 10 times, expect data size not rising
-suite("test_cloud_lz4_show_data","p2") {
+suite("test_cloud_lz4_show_data","p2, nonConcurrent") {
     //cloud-mode
     if (!isCloudMode()) {
         logger.info("not cloud mode, not run")
@@ -70,13 +70,13 @@ suite("test_cloud_lz4_show_data","p2") {
             trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-            sleep(60 * 1000)
             logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
         }
 
@@ -84,10 +84,15 @@ suite("test_cloud_lz4_show_data","p2") {
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0])
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0])
         // expect load 1 times ==  load 10 times
+        logger.info("after 1 time stream load, size is 
${sizeRecords["mysqlSize"][0]}, after 10 times stream load, size is 
${sizeRecords["mysqlSize"][1]}")
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1])
         assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
         assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
     }
 
+    set_config_before_show_data_test()
+    sleep(10 * 1000)
     main()
+    set_config_after_show_data_test()
+    sleep(10 * 1000)
 }
diff --git 
a/regression-test/suites/show_data_p2/test_table_property/test_cloud_zstd_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_property/test_cloud_zstd_show_data.groovy
index ad37f9ac95e..83e50450dca 100644
--- 
a/regression-test/suites/show_data_p2/test_table_property/test_cloud_zstd_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_property/test_cloud_zstd_show_data.groovy
@@ -21,7 +21,7 @@
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
  // loading one data 10 times, expect data size not rising
-suite("test_cloud_lz4_show_data","p2") {
+suite("test_cloud_zstd_show_data","p2, nonConcurrent") {
     //cloud-mode
     if (!isCloudMode()) {
         logger.info("not cloud mode, not run")
@@ -70,13 +70,13 @@ suite("test_cloud_lz4_show_data","p2") {
             trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-            sleep(60 * 1000)
             logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
         }
 
@@ -84,10 +84,15 @@ suite("test_cloud_lz4_show_data","p2") {
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0])
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0])
         // expect load 1 times ==  load 10 times
+        logger.info("after 1 time stream load, size is 
${sizeRecords["mysqlSize"][0]}, after 10 times stream load, size is 
${sizeRecords["mysqlSize"][1]}")
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1])
         assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
         assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
     }
 
+    set_config_before_show_data_test()
+    sleep(10 * 1000)
     main()
+    set_config_after_show_data_test()
+    sleep(10 * 1000)
 }
diff --git 
a/regression-test/suites/show_data_p2/test_table_type/test_cloud_agg_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_type/test_cloud_agg_show_data.groovy
index e995845f26a..55bf038efb0 100644
--- 
a/regression-test/suites/show_data_p2/test_table_type/test_cloud_agg_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_type/test_cloud_agg_show_data.groovy
@@ -21,7 +21,7 @@
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
  // loading one data 10 times, expect data size not rising
-suite("test_cloud_agg_show_data","p2") {
+suite("test_cloud_agg_show_data","p2, nonConcurrent") {
     //cloud-mode
     if (!isCloudMode()) {
         logger.info("not cloud mode, not run")
@@ -69,13 +69,13 @@ suite("test_cloud_agg_show_data","p2") {
             trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-            sleep(60 * 1000)
             logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
 
         }
@@ -87,9 +87,14 @@ suite("test_cloud_agg_show_data","p2") {
         assertEquals(sizeRecords["mysqlSize"][1], sizeRecords["apiSize"][1])
         assertEquals(sizeRecords["mysqlSize"][1], sizeRecords["cbsSize"][1])
         // expect 10 * 1 times on agg table >= load 10 times on agg table >= 1 
times on agg table
+        logger.info("after 1 time stream load, size is 
${sizeRecords["mysqlSize"][0]}, after 10 times stream load, size is 
${sizeRecords["mysqlSize"][1]}")
         assertTrue(10*sizeRecords["mysqlSize"][0]>=sizeRecords["mysqlSize"][1])
         assertTrue(sizeRecords["mysqlSize"][1]>=sizeRecords["mysqlSize"][0])
     }
 
+    set_config_before_show_data_test()
+    sleep(10 * 1000)
     main()
+    set_config_after_show_data_test()
+    sleep(10 * 1000)
 }
diff --git 
a/regression-test/suites/show_data_p2/test_table_type/test_cloud_dup_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_type/test_cloud_dup_show_data.groovy
index ad3109dd945..f4ed8338407 100644
--- 
a/regression-test/suites/show_data_p2/test_table_type/test_cloud_dup_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_type/test_cloud_dup_show_data.groovy
@@ -21,7 +21,7 @@
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
  // loading one data 10 times, expect data size not rising
-suite("test_cloud_dup_show_data","p2") {
+suite("test_cloud_dup_show_data","p2, nonConcurrent") {
     //cloud-mode
     if (!isCloudMode()) {
         logger.info("not cloud mode, not run")
@@ -69,13 +69,13 @@ suite("test_cloud_dup_show_data","p2") {
             trigger_compaction(tablets)
 
             // 然后 sleep 5min, 等fe汇报完
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-            sleep(60 * 1000)
             logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
         }
 
@@ -85,9 +85,15 @@ suite("test_cloud_dup_show_data","p2") {
         // expect mysqlSize == apiSize == storageSize
         assertEquals(sizeRecords["mysqlSize"][1], sizeRecords["apiSize"][1])
         assertEquals(sizeRecords["mysqlSize"][1], sizeRecords["cbsSize"][1])
-        // expect load 10 times on dup table = 10 * load 1 times on dup table
-        assertTrue(10*sizeRecords["mysqlSize"][0]==sizeRecords["mysqlSize"][1])
+        // expect load 10 times on dup table < 10 * load 1 times on dup table
+        logger.info("after 1 time stream load, size is 
${sizeRecords["mysqlSize"][0]}, after 10 times stream load, size is 
${sizeRecords["mysqlSize"][1]}")
+        assertTrue(10*sizeRecords["mysqlSize"][0] > 
sizeRecords["mysqlSize"][1])
+        assertTrue(sizeRecords["mysqlSize"][0] < sizeRecords["mysqlSize"][1])
     }
 
+    set_config_before_show_data_test()
+    sleep(10 * 1000)
     main()
+    set_config_after_show_data_test()
+    sleep(10 * 1000)
 }
diff --git 
a/regression-test/suites/show_data_p2/test_table_type/test_cloud_mor_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_type/test_cloud_mor_show_data.groovy
index e159ebcecf9..fb11d96ac5e 100644
--- 
a/regression-test/suites/show_data_p2/test_table_type/test_cloud_mor_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_type/test_cloud_mor_show_data.groovy
@@ -21,7 +21,7 @@
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
  // loading one data 10 times, expect data size not rising
-suite("test_cloud_mor_show_data","p2") {
+suite("test_cloud_mor_show_data","p2, nonConcurrent") {
     //cloud-mode
     if (!isCloudMode()) {
         logger.info("not cloud mode, not run")
@@ -70,13 +70,13 @@ suite("test_cloud_mor_show_data","p2") {
             trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-            sleep(60 * 1000)
             logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
         }
 
@@ -84,10 +84,15 @@ suite("test_cloud_mor_show_data","p2") {
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0])
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0])
         // expect load 1 times ==  load 10 times
+        logger.info("after 1 time stream load, size is 
${sizeRecords["mysqlSize"][0]}, after 10 times stream load, size is 
${sizeRecords["mysqlSize"][1]}")
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1])
         assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
         assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
     }
 
+    set_config_before_show_data_test()
+    sleep(10 * 1000)
     main()
+    set_config_after_show_data_test()
+    sleep(10 * 1000)
 }
diff --git 
a/regression-test/suites/show_data_p2/test_table_type/test_cloud_mow_partial_update_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_type/test_cloud_mow_partial_update_show_data.groovy
index e32342775fb..6521b5190f4 100644
--- 
a/regression-test/suites/show_data_p2/test_table_type/test_cloud_mow_partial_update_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_type/test_cloud_mow_partial_update_show_data.groovy
@@ -21,7 +21,7 @@
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
  // loading one data 10 times, expect data size not rising
-suite("test_cloud_mow_partial_update_show_data","p2") {
+suite("test_cloud_mow_partial_update_show_data","p2, nonConcurrent") {
     //cloud-mode
     if (!isCloudMode()) {
         logger.info("not cloud mode, not run")
@@ -236,7 +236,7 @@ suite("test_cloud_mow_partial_update_show_data","p2") {
                 ) ENGINE=OLAP
                 UNIQUE KEY(`id`)
                 COMMENT "OLAP"
-                DISTRIBUTED BY HASH(`id`) BUCKETS 32
+                DISTRIBUTED BY HASH(`id`) BUCKETS 3
                 PROPERTIES (
                 "store_row_column" = "true"
                 );
@@ -258,13 +258,13 @@ suite("test_cloud_mow_partial_update_show_data","p2") {
             trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-            sleep(60 * 1000)
             logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
         }
 
@@ -272,10 +272,15 @@ suite("test_cloud_mow_partial_update_show_data","p2") {
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0])
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0])
         // expect load 1 times ==  load 10 times
+        logger.info("after 1 time stream load, size is 
${sizeRecords["mysqlSize"][0]}, after 10 times stream load, size is 
${sizeRecords["mysqlSize"][1]}")
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1])
         assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
         assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
     }
 
+    set_config_before_show_data_test()
+    sleep(10 * 1000)
     main()
+    set_config_after_show_data_test()
+    sleep(10 * 1000)
 }
diff --git 
a/regression-test/suites/show_data_p2/test_table_type/test_cloud_mow_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_type/test_cloud_mow_show_data.groovy
index 29ce5af4909..26a407349ca 100644
--- 
a/regression-test/suites/show_data_p2/test_table_type/test_cloud_mow_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_type/test_cloud_mow_show_data.groovy
@@ -21,7 +21,7 @@
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
  // loading one data 10 times, expect data size not rising
-suite("test_cloud_mow_show_data","p2") {
+suite("test_cloud_mow_show_data","p2, nonConcurrent") {
     //cloud-mode
     if (!isCloudMode()) {
         logger.info("not cloud mode, not run")
@@ -69,13 +69,13 @@ suite("test_cloud_mow_show_data","p2") {
             trigger_compaction(tablets)
 
             // 然后 sleep 1min, 等fe汇报完
-            sleep(60 * 1000)
+            sleep(10 * 1000)
             sql "select count(*) from ${tableName}"
+            sleep(10 * 1000)
 
             
sizeRecords["apiSize"].add(caculate_table_data_size_through_api(tablets))
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
-            sleep(60 * 1000)
             logger.info("after ${i} times stream load, mysqlSize is: 
${sizeRecords["mysqlSize"][-1]}, apiSize is: ${sizeRecords["apiSize"][-1]}, 
storageSize is: ${sizeRecords["cbsSize"][-1]}")
         }
 
@@ -83,10 +83,15 @@ suite("test_cloud_mow_show_data","p2") {
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0])
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0])
         // expect load 1 times ==  load 10 times
+        logger.info("after 1 time stream load, size is 
${sizeRecords["mysqlSize"][0]}, after 10 times stream load, size is 
${sizeRecords["mysqlSize"][1]}")
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1])
         assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
         assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
     }
 
+    set_config_before_show_data_test()
+    sleep(10 * 1000)
     main()
+    set_config_after_show_data_test()
+    sleep(10 * 1000)
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to