This is an automated email from the ASF dual-hosted git repository.
hellostephen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push:
new 4624f23cb0b [fix](case) Fix some docker case run in ci (#60087)
4624f23cb0b is described below
commit 4624f23cb0b468cca0b057b7d624ff5372ebbb54
Author: deardeng <[email protected]>
AuthorDate: Fri Feb 6 17:38:45 2026 +0800
[fix](case) Fix some docker case run in ci (#60087)
---
.../cloud_p0/auth/test_set_default_cluster.groovy | 2 +-
...est_balance_use_compute_group_properties.groovy | 9 +++--
.../test_balance_warm_up_task_abnormal.groovy | 13 ++++---
.../suites/cloud_p0/multi_cluster/test_tvf.groovy | 2 +-
.../test_clean_tablet_when_drop_force_table.groovy | 9 +++--
.../suites/query_profile/profile_size_limit.groovy | 45 +++++++++++-----------
6 files changed, 42 insertions(+), 38 deletions(-)
diff --git
a/regression-test/suites/cloud_p0/auth/test_set_default_cluster.groovy
b/regression-test/suites/cloud_p0/auth/test_set_default_cluster.groovy
index ddcf6cd28f4..abfd0ae91c1 100644
--- a/regression-test/suites/cloud_p0/auth/test_set_default_cluster.groovy
+++ b/regression-test/suites/cloud_p0/auth/test_set_default_cluster.groovy
@@ -43,7 +43,7 @@ suite("test_default_cluster", "docker") {
// admin role
def user2 = "default_user2"
// domain user
- def user3 = "default_user3@'175.%'"
+ def user3 = "default_user3@'%.%'"
sql """CREATE USER $user1 IDENTIFIED BY 'Cloud123456' DEFAULT ROLE
'admin'"""
sql """CREATE USER $user2 IDENTIFIED BY 'Cloud123456'"""
diff --git
a/regression-test/suites/cloud_p0/balance/test_balance_use_compute_group_properties.groovy
b/regression-test/suites/cloud_p0/balance/test_balance_use_compute_group_properties.groovy
index ef3cc7d81d5..5a229088d28 100644
---
a/regression-test/suites/cloud_p0/balance/test_balance_use_compute_group_properties.groovy
+++
b/regression-test/suites/cloud_p0/balance/test_balance_use_compute_group_properties.groovy
@@ -140,9 +140,9 @@ suite('test_balance_use_compute_group_properties',
'docker') {
clusterNameToBeIdx[async_warmup_cluster] = [3, 7]
clusterNameToBeIdx[sync_warmup_cluster] = [4, 8]
- // sleep 11s, wait balance
+ // sleep 15s, wait balance
// and sync_warmup cluster task 10s timeout
- sleep(11 * 1000)
+ sleep(15 * 1000)
def afterBalanceEveryClusterCache = [:]
@@ -177,10 +177,11 @@ suite('test_balance_use_compute_group_properties',
'docker') {
def assertFirstMapKeys = { clusterRet, expectedEqual ->
def firstMap = clusterRet[0]
def keys = firstMap.keySet().toList()
+ logger.info("debug: clusterName {} keys {}", clusterName, keys)
if (expectedEqual) {
- assert firstMap[keys[0]] == firstMap[keys[1]]
+ assert firstMap[keys[0]] == firstMap[keys[1]],
"firstMap[keys[0]] == firstMap[keys[1]]"
} else {
- assert firstMap[keys[0]] != firstMap[keys[1]]
+ assert firstMap[keys[0]] != firstMap[keys[1]],
"firstMap[keys[0]] != firstMap[keys[1]] expected not equal, but equal"
}
}
diff --git
a/regression-test/suites/cloud_p0/balance/test_balance_warm_up_task_abnormal.groovy
b/regression-test/suites/cloud_p0/balance/test_balance_warm_up_task_abnormal.groovy
index 5cb20027887..a33b0878ada 100644
---
a/regression-test/suites/cloud_p0/balance/test_balance_warm_up_task_abnormal.groovy
+++
b/regression-test/suites/cloud_p0/balance/test_balance_warm_up_task_abnormal.groovy
@@ -111,13 +111,14 @@ suite('test_balance_warm_up_task_abnormal', 'docker') {
// test recover from abnormal
sql """ALTER COMPUTE GROUP compute_cluster PROPERTIES
('balance_type'='without_warmup')"""
- sleep(5 * 1000)
- def afterAlterResult = sql_return_maparray """ADMIN SHOW REPLICA
DISTRIBUTION FROM $table"""
- logger.info("after alter balance policy result {}", afterAlterResult)
- // now mapping is changed to 1 replica in each be
- assert afterAlterResult.any { row ->
- Integer.valueOf((String) row.ReplicaNum) == 1
+ awaitUntil(60) {
+ def afterAlterResult = sql_return_maparray """ADMIN SHOW REPLICA
DISTRIBUTION FROM $table"""
+ logger.info("after alter balance policy result {}",
afterAlterResult)
+ // now mapping is changed to 1 replica in each be
+ afterAlterResult.any { row ->
+ Integer.valueOf((String) row.ReplicaNum) == 1
+ }
}
}
diff --git a/regression-test/suites/cloud_p0/multi_cluster/test_tvf.groovy
b/regression-test/suites/cloud_p0/multi_cluster/test_tvf.groovy
index f43c3406c81..201e0c8795b 100644
--- a/regression-test/suites/cloud_p0/multi_cluster/test_tvf.groovy
+++ b/regression-test/suites/cloud_p0/multi_cluster/test_tvf.groovy
@@ -71,7 +71,7 @@ suite('test_tvf', 'multi_cluster,docker') {
// use old clusterName, has been droped
test {
sql """select * from numbers("number" = "100")"""
- exception "Can not find compute group"
+ exception "Unable to find the compute group"
}
// switch to old cluster
sql """use @${currentCluster.cluster}"""
diff --git
a/regression-test/suites/cloud_p0/tablets/test_clean_tablet_when_drop_force_table.groovy
b/regression-test/suites/cloud_p0/tablets/test_clean_tablet_when_drop_force_table.groovy
index 6dafb8c2989..5ee69f9ee47 100644
---
a/regression-test/suites/cloud_p0/tablets/test_clean_tablet_when_drop_force_table.groovy
+++
b/regression-test/suites/cloud_p0/tablets/test_clean_tablet_when_drop_force_table.groovy
@@ -41,6 +41,8 @@ suite('test_clean_tablet_when_drop_force_table', 'docker') {
'write_buffer_size_for_agg=10240',
'sys_log_verbose_modules=task_worker_pool',
"enable_packed_file=${enablePackedFile}",
+ 'enable_packed_file=false',
+ 'disable_auto_compaction=true',
]
options.setFeNum(3)
options.setBeNum(3)
@@ -59,10 +61,10 @@ suite('test_clean_tablet_when_drop_force_table', 'docker') {
log.info("found queue_size=0 log line: {}", queueZeroLine)
}
- def waitForTabletCacheState = { Collection<Long> tabletIds, boolean
expectPresent, long timeoutMs = 60000L, long intervalMs = 2000L ->
+ def waitForTabletCacheState = { Collection tabletIds, boolean
expectPresent, long timeoutMs = 60000L, long intervalMs = 2000L ->
long start = System.currentTimeMillis()
while (System.currentTimeMillis() - start < timeoutMs) {
- boolean conditionMet = tabletIds.every { Long tabletId ->
+ boolean conditionMet = tabletIds.every { def tabletId ->
def rows = sql "select tablet_id from
information_schema.file_cache_info where tablet_id = ${tabletId}"
expectPresent ? !rows.isEmpty() : rows.isEmpty()
}
@@ -71,7 +73,7 @@ suite('test_clean_tablet_when_drop_force_table', 'docker') {
}
sleep(intervalMs)
}
- def stillPresent = tabletIds.findAll { Long tabletId -> !(sql "select
tablet_id from information_schema.file_cache_info where tablet_id =
${tabletId}").isEmpty() }
+ def stillPresent = tabletIds.findAll { def tabletId -> !(sql "select
tablet_id from information_schema.file_cache_info where tablet_id =
${tabletId}").isEmpty() }
if (expectPresent) {
assertTrue(false, "Tablet cache info never appeared for tablet ids
${stillPresent}")
} else {
@@ -175,6 +177,7 @@ suite('test_clean_tablet_when_drop_force_table', 'docker') {
GetDebugPoint().enableDebugPointForAllBEs("WorkPoolCloudDropTablet.drop_tablet_callback.failed")
}
// after drop table force
+ sql """select * from $tableName limit 10"""
sql """
DROP TABLE $tableName FORCE
diff --git a/regression-test/suites/query_profile/profile_size_limit.groovy
b/regression-test/suites/query_profile/profile_size_limit.groovy
index f743c278616..4ccf71888dc 100644
--- a/regression-test/suites/query_profile/profile_size_limit.groovy
+++ b/regression-test/suites/query_profile/profile_size_limit.groovy
@@ -27,30 +27,28 @@ import groovy.json.StringEscapeUtils
final String PROFILE_SIZE_NOT_GREATER_THAN_ZERO_MSG = "Profile size is not
greater than 0"
final String PROFILE_SIZE_GREATER_THAN_LIMIT_MSG = "Profile size is greater
than limit"
-def getProfileList = {
- def dst = 'http://' + context.config.feHttpAddress
- def conn = new URL(dst + "/rest/v1/query_profile").openConnection()
+def getProfileList = { String feBaseUrl ->
+ def conn = new URL(feBaseUrl + "/rest/v1/query_profile").openConnection()
conn.setRequestMethod("GET")
- def encoding =
Base64.getEncoder().encodeToString((context.config.feHttpUser + ":" +
- (context.config.feHttpPassword == null ? "" :
context.config.feHttpPassword)).getBytes("UTF-8"))
+ def encoding = Base64.getEncoder().encodeToString(("root" + ":" +
+ "").getBytes("UTF-8"))
conn.setRequestProperty("Authorization", "Basic ${encoding}")
return conn.getInputStream().getText()
}
-def getProfile = { id ->
- def dst = 'http://' + context.config.feHttpAddress
- def conn = new URL(dst +
"/api/profile/text/?query_id=$id").openConnection()
- conn.setRequestMethod("GET")
- def encoding =
Base64.getEncoder().encodeToString((context.config.feHttpUser + ":" +
- (context.config.feHttpPassword == null ? "" :
context.config.feHttpPassword)).getBytes("UTF-8"))
- conn.setRequestProperty("Authorization", "Basic ${encoding}")
- return conn.getInputStream().getText()
+def getProfile = { String feBaseUrl, String id ->
+ def conn = new URL(feBaseUrl +
"/api/profile/text/?query_id=$id").openConnection()
+ conn.setRequestMethod("GET")
+ def encoding = Base64.getEncoder().encodeToString(("root" + ":" +
+ "").getBytes("UTF-8"))
+ conn.setRequestProperty("Authorization", "Basic ${encoding}")
+ return conn.getInputStream().getText()
}
-def getProfileWithToken = { token ->
- def wholeString = getProfileList()
+def getProfileWithToken = { String feBaseUrl, String token ->
+ def wholeString = getProfileList(feBaseUrl)
List profileData = new JsonSlurper().parseText(wholeString).data.rows
- String profileId = "";
+ String profileId = "";
logger.info("{}", token)
for (def profileItem in profileData) {
@@ -64,11 +62,11 @@ def getProfileWithToken = { token ->
// Sleep 2 seconds to make sure profile collection is done
Thread.sleep(2000)
- def String profile = getProfile(profileId).toString()
+ def String profile = getProfile(feBaseUrl, profileId).toString()
return profile;
}
-suite('profile_size_limit', 'docker, nonConcurrent') {
+suite('profile_size_limit', 'docker') {
def options = new ClusterOptions()
options.beNum = 1
options.enableDebugPoints()
@@ -132,14 +130,15 @@ suite('profile_size_limit', 'docker, nonConcurrent') {
(9010, "CTO_Technical_Strategy_Lead", 42);
"""
- def feHttpAddress = context.config.feHttpAddress.split(":")
- def feHost = feHttpAddress[0]
- def fePort = feHttpAddress[1] as int
+ def fe = cluster.getMasterFe()
+ def feHost = fe.host
+ def fePort = fe.httpPort
+ def feBaseUrl = 'http://' + feHost + ':' + fePort
sql """
select "${token}", * from profile_size_limit;
"""
- def String profile = getProfileWithToken(token)
+ def String profile = getProfileWithToken(feBaseUrl, token)
logger.info("Profile of ${token} size: ${profile.size()}")
assertTrue(profile.size() > 0, PROFILE_SIZE_NOT_GREATER_THAN_ZERO_MSG)
@@ -154,7 +153,7 @@ suite('profile_size_limit', 'docker, nonConcurrent') {
sql """
select "${token}", * from profile_size_limit;
"""
- profile = getProfileWithToken(token)
+ profile = getProfileWithToken(feBaseUrl, token)
logger.info("Profile of ${token} size: ${profile.size()}, limit:
${maxProfileSize}")
assertTrue(profile.size() <= maxProfileSize,
PROFILE_SIZE_GREATER_THAN_LIMIT_MSG)
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]