This is an automated email from the ASF dual-hosted git repository. dataroaring pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push: new db5feaf6aa4 [chore](show partitions) show partitions print commit version (#28274) db5feaf6aa4 is described below commit db5feaf6aa46fe342d7b6bb9f9c160335f17a757 Author: yujun <yu.jun.re...@gmail.com> AuthorDate: Wed Aug 7 10:04:13 2024 +0800 [chore](show partitions) show partitions print commit version (#28274) ## Proposed changes Issue Number: close #xxx --- .../doris/common/proc/PartitionsProcDir.java | 5 ++++- .../suites/autobucket/test_autobucket.groovy | 12 ++++++------ .../test_autobucket_dynamic_partition.groovy | 8 ++++---- .../modify_partition_add_policy.groovy | 6 +++--- .../test_dynamic_partition.groovy | 14 +++++++------- .../test_dynamic_partition_with_rename.groovy | 15 ++++++++------- .../test_multi_column_partition.groovy | 22 +++++++++++----------- .../auto_partition/sql/multi_thread_load.groovy | 4 ++-- 8 files changed, 45 insertions(+), 41 deletions(-) diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java index 48e406b66d9..3e7c6bdebc7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java @@ -75,7 +75,7 @@ public class PartitionsProcDir implements ProcDirInterface { .add("State").add("PartitionKey").add("Range").add("DistributionKey") .add("Buckets").add("ReplicationNum").add("StorageMedium").add("CooldownTime").add("RemoteStoragePolicy") .add("LastConsistencyCheckTime").add("DataSize").add("IsInMemory").add("ReplicaAllocation") - .add("IsMutable").add("SyncWithBaseTables").add("UnsyncTables") + .add("IsMutable").add("SyncWithBaseTables").add("UnsyncTables").add("CommittedVersion") .build(); private Database db; @@ -380,6 +380,9 @@ public class PartitionsProcDir implements ProcDirInterface { trow.addToColumnValue(new TCell().setStringVal(FeConstants.null_string)); } + partitionInfo.add(partition.getCommittedVersion()); + trow.addToColumnValue(new TCell().setLongVal(partition.getCommittedVersion())); + partitionInfos.add(Pair.of(partitionInfo, trow)); } } finally { diff --git a/regression-test/suites/autobucket/test_autobucket.groovy b/regression-test/suites/autobucket/test_autobucket.groovy index ff672d7288a..3d0515e59c2 100644 --- a/regression-test/suites/autobucket/test_autobucket.groovy +++ b/regression-test/suites/autobucket/test_autobucket.groovy @@ -33,11 +33,11 @@ suite("test_autobucket") { log.info("show result : ${result}") assertTrue(result.toString().containsIgnoreCase("BUCKETS AUTO")) - result = sql "show partitions from autobucket_test" + result = sql_return_maparray "show partitions from autobucket_test" logger.info("${result}") // XXX: buckets at pos(8), next maybe impl by sql meta // 10 is the default buckets without partition size - assertEquals(Integer.valueOf(result.get(0).get(8)), 10) + assertEquals(10, Integer.valueOf(result.get(0).Buckets)) sql "drop table if exists autobucket_test" @@ -57,10 +57,10 @@ suite("test_autobucket") { ) """ - result = sql "show partitions from autobucket_test_min_buckets" + result = sql_return_maparray "show partitions from autobucket_test_min_buckets" logger.info("${result}") // XXX: buckets at pos(8), next maybe impl by sql meta - assertEquals(Integer.valueOf(result.get(0).get(8)), 5) + assertEquals(5, Integer.valueOf(result.get(0).Buckets)) // set back to default sql "ADMIN SET FRONTEND CONFIG ('autobucket_min_buckets' = '1')" sql "drop table if exists autobucket_test_min_buckets" @@ -81,10 +81,10 @@ suite("test_autobucket") { ) """ - result = sql "show partitions from autobucket_test_max_buckets" + result = sql_return_maparray "show partitions from autobucket_test_max_buckets" logger.info("${result}") // XXX: buckets at pos(8), next maybe impl by sql meta - assertEquals(Integer.valueOf(result.get(0).get(8)), 1) //equals max bucket + assertEquals(1, Integer.valueOf(result.get(0).Buckets)) //equals max bucket // set back to default sql "ADMIN SET FRONTEND CONFIG ('autobucket_max_buckets' = '128')" sql "drop table if exists autobucket_test_max_buckets" diff --git a/regression-test/suites/autobucket/test_autobucket_dynamic_partition.groovy b/regression-test/suites/autobucket/test_autobucket_dynamic_partition.groovy index c2906aa6d06..909dfa9cf92 100644 --- a/regression-test/suites/autobucket/test_autobucket_dynamic_partition.groovy +++ b/regression-test/suites/autobucket/test_autobucket_dynamic_partition.groovy @@ -36,14 +36,14 @@ suite("test_autobucket_dynamic_partition") { log.info("show result : ${result}") assertTrue(result.toString().containsIgnoreCase("BUCKETS AUTO")) - result = sql "show partitions from test_autobucket_dynamic_partition" + result = sql_return_maparray "show partitions from test_autobucket_dynamic_partition" logger.info("${result}") // XXX: buckets at pos(8), next maybe impl by sql meta // 10 is the default buckets without partition size assertEquals(result.size(), 3) - assertEquals(Integer.valueOf(result.get(0).get(8)), 10) - assertEquals(Integer.valueOf(result.get(1).get(8)), 10) - assertEquals(Integer.valueOf(result.get(2).get(8)), 10) + for (def partition : result) { + assertEquals(Integer.valueOf(partition.Buckets), 10) + } sql "drop table if exists test_autobucket_dynamic_partition" } diff --git a/regression-test/suites/cold_heat_separation/empty_table_use_policy/modify_partition_add_policy.groovy b/regression-test/suites/cold_heat_separation/empty_table_use_policy/modify_partition_add_policy.groovy index 467830b7e3d..9608c296836 100644 --- a/regression-test/suites/cold_heat_separation/empty_table_use_policy/modify_partition_add_policy.groovy +++ b/regression-test/suites/cold_heat_separation/empty_table_use_policy/modify_partition_add_policy.groovy @@ -130,12 +130,12 @@ suite("add_table_policy_by_modify_partition") { """ // Test that the partition's specified policy would be covered by the table's policy - def partitions = sql """ + def partitions = sql_return_maparray """ show partitions from create_table_partion_use_created_policy_test """ - for (par in partitions) { - assertTrue(par[12] == "created_create_table_partition_alter_policy") + for (def par in partitions) { + assertTrue(par.RemoteStoragePolicy == "created_create_table_partition_alter_policy") } sql """ diff --git a/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition.groovy b/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition.groovy index b90b06f1df6..ec626eaa691 100644 --- a/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition.groovy +++ b/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition.groovy @@ -32,12 +32,12 @@ suite("test_dynamic_partition") { "dynamic_partition.create_history_partition"="true", "dynamic_partition.replication_allocation" = "tag.location.default: 1") """ - List<List<Object>> result = sql "show tables like 'dy_par'" + def result = sql "show tables like 'dy_par'" logger.info("${result}") assertEquals(result.size(), 1) - result = sql "show partitions from dy_par" + result = sql_return_maparray "show partitions from dy_par" // XXX: buckets at pos(8), next maybe impl by sql meta - assertEquals(Integer.valueOf(result.get(0).get(8)), 10) + assertEquals(result.get(0).Buckets.toInteger(), 10) sql "drop table dy_par" sql "drop table if exists dy_par" @@ -59,9 +59,9 @@ suite("test_dynamic_partition") { result = sql "show tables like 'dy_par'" logger.info("${result}") assertEquals(result.size(), 1) - result = sql "show partitions from dy_par" + result = sql_return_maparray "show partitions from dy_par" // XXX: buckets at pos(8), next maybe impl by sql meta - assertEquals(Integer.valueOf(result.get(0).get(8)), 10) + assertEquals(result.get(0).Buckets.toInteger(), 10) sql "drop table dy_par" sql "drop table if exists dy_par_bucket_set_by_distribution" @@ -83,9 +83,9 @@ suite("test_dynamic_partition") { result = sql "show tables like 'dy_par_bucket_set_by_distribution'" logger.info("${result}") assertEquals(result.size(), 1) - result = sql "show partitions from dy_par_bucket_set_by_distribution" + result = sql_return_maparray "show partitions from dy_par_bucket_set_by_distribution" // XXX: buckets at pos(8), next maybe impl by sql meta - assertEquals(Integer.valueOf(result.get(0).get(8)), 3) + assertEquals(result.get(0).Buckets.toInteger(), 3) sql "drop table dy_par_bucket_set_by_distribution" sql "drop table if exists dy_par_bad" def isCloudMode = { diff --git a/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition_with_rename.groovy b/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition_with_rename.groovy index 5fd190effc5..facb3790e2a 100644 --- a/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition_with_rename.groovy +++ b/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition_with_rename.groovy @@ -16,9 +16,10 @@ // under the License. suite("test_dynamic_partition_with_rename") { - sql "drop table if exists test_dynamic_partition_with_rename" + def tbl = 'test_dynamic_partition_with_rename' + sql "drop table if exists ${tbl}" sql """ - CREATE TABLE IF NOT EXISTS test_dynamic_partition_with_rename + CREATE TABLE IF NOT EXISTS ${tbl} ( k1 date NOT NULL, k2 varchar(20) NOT NULL, k3 int sum NOT NULL ) AGGREGATE KEY(k1,k2) PARTITION BY RANGE(k1) ( ) @@ -33,26 +34,26 @@ suite("test_dynamic_partition_with_rename") { "dynamic_partition.create_history_partition"="true", "dynamic_partition.replication_allocation" = "tag.location.default: 1") """ - def result = sql "show partitions from test_dynamic_partition_with_rename" + def result = sql_return_maparray "show partitions from ${tbl}" assertEquals(7, result.size()) // rename distributed column, then try to add too more dynamic partition sql "alter table test_dynamic_partition_with_rename rename column k1 renamed_k1" sql """ ADMIN SET FRONTEND CONFIG ('dynamic_partition_check_interval_seconds' = '1') """ - sql """ alter table test_dynamic_partition_with_rename set('dynamic_partition.end'='5') """ - result = sql "show partitions from test_dynamic_partition_with_rename" + sql """ alter table ${tbl} set('dynamic_partition.end'='5') """ + result = sql_return_maparray "show partitions from ${tbl}" for (def retry = 0; retry < 120; retry++) { // at most wait 120s if (result.size() == 9) { break; } logger.info("wait dynamic partition scheduler, sleep 1s") sleep(1000); // sleep 1s - result = sql "show partitions from test_dynamic_partition_with_rename" + result = sql_return_maparray "show partitions from ${tbl}" } assertEquals(9, result.size()) for (def line = 0; line < result.size(); line++) { // XXX: DistributionKey at pos(7), next maybe impl by sql meta - assertEquals("renamed_k1", result.get(line).get(7)) + assertEquals("renamed_k1", result.get(line).DistributionKey) } sql "drop table test_dynamic_partition_with_rename" diff --git a/regression-test/suites/partition_p0/multi_partition/test_multi_column_partition.groovy b/regression-test/suites/partition_p0/multi_partition/test_multi_column_partition.groovy index e6335059161..1afd51bfab6 100644 --- a/regression-test/suites/partition_p0/multi_partition/test_multi_column_partition.groovy +++ b/regression-test/suites/partition_p0/multi_partition/test_multi_column_partition.groovy @@ -51,7 +51,7 @@ suite("test_multi_partition_key", "p0") { sql "select * from ${tableName} order by k1, k2" resultFile "partition_table.out" } - def result = sql "SHOW PARTITIONS FROM ${tableName}" + def result = sql_return_maparray "SHOW PARTITIONS FROM ${tableName}" assertTrue(result.size() > 1) if (ifDropTbl) { try_sql """DROP TABLE ${tableName}""" @@ -139,8 +139,8 @@ suite("test_multi_partition_key", "p0") { false ) // expect partition_f range: [ [126, 126] ~ [500, -128] ) - def ret = sql "SHOW PARTITIONS FROM test_default_minvalue WHERE PartitionName='partition_f'" - assertTrue(ret[0][6].contains("[500, -128]")) + def ret = sql_return_maparray "SHOW PARTITIONS FROM test_default_minvalue WHERE PartitionName='partition_f'" + assertTrue(ret[0].Range.contains("[500, -128]")) // partition columns error test { @@ -221,8 +221,8 @@ suite("test_multi_partition_key", "p0") { } sql "ALTER TABLE test_multi_col_test_partition_add ADD PARTITION partition_add VALUES LESS THAN ('30', '1000') " - def ret_add_p = sql "SHOW PARTITIONS FROM test_multi_col_test_partition_add WHERE PartitionName='partition_add'" - assertTrue(ret[0][6].contains("[500, -128]")) + def ret_add_p = sql_return_maparray "SHOW PARTITIONS FROM test_multi_col_test_partition_add WHERE PartitionName='partition_add'" + assertTrue(ret[0].Range.contains("[500, -128]")) test { sql "ALTER TABLE test_multi_col_test_partition_add ADD PARTITION add_partition_wrong " + "VALUES LESS THAN ('30', '800') DISTRIBUTED BY hash(k1) BUCKETS 5" @@ -243,11 +243,11 @@ suite("test_multi_partition_key", "p0") { false ) sql "ALTER TABLE test_multi_col_test_partition_drop DROP PARTITION partition_d" - def ret_drop_p = sql "SHOW PARTITIONS FROM test_multi_col_test_partition_drop WHERE PartitionName='partition_d'" + def ret_drop_p = sql_return_maparray "SHOW PARTITIONS FROM test_multi_col_test_partition_drop WHERE PartitionName='partition_d'" assertEquals(0, ret_drop_p.size()) sql "ALTER TABLE test_multi_col_test_partition_drop ADD PARTITION partition_dd VALUES LESS THAN ('0','0') " - ret_drop_p = sql "SHOW PARTITIONS FROM test_multi_col_test_partition_drop WHERE PartitionName='partition_dd'" - assertTrue(ret_drop_p[0][6].contains("[0, 0]")) + ret_drop_p = sql_return_maparray "SHOW PARTITIONS FROM test_multi_col_test_partition_drop WHERE PartitionName='partition_dd'" + assertTrue(ret_drop_p[0].Range.contains("[0, 0]")) // null value in the lowest partition, if drop the partition null is deleted. sql """drop table if exists test_multi_col_test_partition_null_value""" sql """ @@ -366,8 +366,8 @@ suite("test_multi_partition_key", "p0") { if (!isCloudMode()) { sql "ALTER TABLE test_multi_col_test_rollup MODIFY PARTITION partition_a SET( 'replication_num' = '1')" } - ret = sql "SHOW PARTITIONS FROM test_multi_col_test_rollup WHERE PartitionName='partition_a'" - assertEquals('1', ret[0][9]) + ret = sql_return_maparray "SHOW PARTITIONS FROM test_multi_col_test_rollup WHERE PartitionName='partition_a'" + assertEquals(1, ret[0].ReplicationNum as int) // create table with range partition testPartitionTbl( "test_multi_column_fixed_range_1", @@ -393,7 +393,7 @@ suite("test_multi_partition_key", "p0") { ) // add partition with range sql "ALTER TABLE test_multi_column_fixed_range_1 ADD PARTITION partition_add VALUES LESS THAN ('50','1000') " - ret = sql "SHOW PARTITIONS FROM test_multi_column_fixed_range_1 WHERE PartitionName='partition_add'" + ret = sql_return_maparray "SHOW PARTITIONS FROM test_multi_column_fixed_range_1 WHERE PartitionName='partition_add'" assertEquals(1, ret.size(), ) test { sql "ALTER TABLE test_multi_column_fixed_range_1 ADD PARTITION add_partition_wrong VALUES LESS THAN ('50','800')" diff --git a/regression-test/suites/partition_p1/auto_partition/sql/multi_thread_load.groovy b/regression-test/suites/partition_p1/auto_partition/sql/multi_thread_load.groovy index 8d43d90ff15..0c08d5972f5 100644 --- a/regression-test/suites/partition_p1/auto_partition/sql/multi_thread_load.groovy +++ b/regression-test/suites/partition_p1/auto_partition/sql/multi_thread_load.groovy @@ -211,10 +211,10 @@ suite("multi_thread_load", "p1,nonConcurrent") { // stress case should use resou def row_count_range = sql """select count() from ${table_name};""" assertTrue(data_count*rows == row_count_range[0][0], "${data_count*rows}, ${row_count_range[0][0]}") // check there's no intersect in partitions - def partition_res_range = sql """show partitions from ${table_name} order by PartitionName;""" + def partition_res_range = sql_return_maparray """show partitions from ${table_name} order by PartitionName;""" for (int i = 0; i < partition_res_range.size(); i++) { for (int j = i+1; j < partition_res_range.size(); j++) { - if (partition_res_range[i][6] == partition_res_range[j][6]) { + if (partition_res_range[i].Range == partition_res_range[j].Range) { assertTrue(false, "$i, $j") } } --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org For additional commands, e-mail: commits-h...@doris.apache.org