This is an automated email from the ASF dual-hosted git repository.
alexey pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git
The following commit(s) were added to refs/heads/master by this push:
new 75045fa10 KUDU-2671 fix handling hash dimensions in ADD_RANGE_PARTITION
75045fa10 is described below
commit 75045fa10619c5674ca9b6405332be97e43bfeba
Author: Alexey Serbin
AuthorDate: Sat Jul 2 13:11:58 2022 -0700
KUDU-2671 fix handling hash dimensions in ADD_RANGE_PARTITION
This patch fixes a bug in handling hash dimensions when storing the
information on HashSchema for a newly added range in the system catalog.
I modfied one already existing test scenario in MasterTest to spot
regressions, verifying that before the fix the updated scenario
would fail as expected.
This is a follow-up to 6909ee4f800da192b72e59680916e5004527b6db.
Change-Id: If1fed52f9abd02a8aa2bc85f2692252d16965621
Reviewed-on: http://gerrit.cloudera.org:8080/18695
Reviewed-by: Attila Bukor
Tested-by: Alexey Serbin
---
src/kudu/master/catalog_manager.cc | 3 +--
src/kudu/master/master-test.cc | 42 ++
2 files changed, 39 insertions(+), 6 deletions(-)
diff --git a/src/kudu/master/catalog_manager.cc
b/src/kudu/master/catalog_manager.cc
index da017d828..78fc4ec75 100644
--- a/src/kudu/master/catalog_manager.cc
+++ b/src/kudu/master/catalog_manager.cc
@@ -2705,9 +2705,8 @@ Status CatalogManager::ApplyAlterPartitioningSteps(
auto* hash_dimension_pb = range->add_hash_schema();
hash_dimension_pb->set_num_buckets(hash_dimension.num_buckets);
hash_dimension_pb->set_seed(hash_dimension.seed);
- auto* columns = hash_dimension_pb->add_columns();
for (const auto& column_id : hash_dimension.column_ids) {
-columns->set_id(column_id);
+hash_dimension_pb->add_columns()->set_id(column_id);
}
}
++partition_schema_updates;
diff --git a/src/kudu/master/master-test.cc b/src/kudu/master/master-test.cc
index ff91b65db..03a9f7085 100644
--- a/src/kudu/master/master-test.cc
+++ b/src/kudu/master/master-test.cc
@@ -1006,7 +1006,7 @@ TEST_F(MasterTest,
AlterTableAddAndDropRangeWithSpecificHashSchema) {
constexpr const char* const kCol0 = "c_int32";
constexpr const char* const kCol1 = "c_int64";
const Schema kTableSchema({ColumnSchema(kCol0, INT32),
- ColumnSchema(kCol1, INT64)}, 1);
+ ColumnSchema(kCol1, INT64)}, 2);
FLAGS_enable_per_range_hash_schemas = true;
FLAGS_default_num_replicas = 1;
@@ -1040,7 +1040,7 @@ TEST_F(MasterTest,
AlterTableAddAndDropRangeWithSpecificHashSchema) {
}
const auto& table_id = create_table_resp.table_id();
- const HashSchema custom_hash_schema{{{kCol0}, 5, 1}};
+ const HashSchema custom_hash_schema{{{kCol0,kCol1}, 5, 1}};
// Alter the table, adding a new range with custom hash schema.
{
@@ -1060,6 +1060,7 @@ TEST_F(MasterTest,
AlterTableAddAndDropRangeWithSpecificHashSchema) {
ColumnSchemaPB* col1 = req.mutable_schema()->add_columns();
col1->set_name(kCol1);
col1->set_type(INT64);
+ col0->set_is_key(true);
}
AlterTableRequestPB::Step* step = req.add_alter_schema_steps();
@@ -1115,6 +1116,28 @@ TEST_F(MasterTest,
AlterTableAddAndDropRangeWithSpecificHashSchema) {
PartitionSchemaPB ps_pb;
ASSERT_OK(partition_schema_retriever(&ps_pb));
ASSERT_EQ(1, ps_pb.custom_hash_schema_ranges_size());
+
+ // Check the hash schema parameters (i.e. columns and number of hash
+ // buckets) are stored and read back by the client as expected.
+ const auto& range = ps_pb.custom_hash_schema_ranges(0);
+ ASSERT_EQ(1, range.hash_schema_size());
+ const auto& hash_schema = range.hash_schema(0);
+
+ ASSERT_EQ(5, hash_schema.num_buckets());
+ ASSERT_EQ(1, hash_schema.seed());
+
+ ASSERT_EQ(2, hash_schema.columns_size());
+ const auto schema = kTableSchema.CopyWithColumnIds();
+
+ const auto ref_col_0_id = int32_t(schema.column_id(0));
+ const auto& col_0 = hash_schema.columns(0);
+ ASSERT_TRUE(col_0.has_id());
+ ASSERT_EQ(ref_col_0_id, col_0.id());
+
+ const auto ref_col_1_id = int32_t(schema.column_id(1));
+ const auto& col_1 = hash_schema.columns(1);
+ ASSERT_TRUE(col_1.has_id());
+ ASSERT_EQ(ref_col_1_id, col_1.id());
}
}
@@ -1148,6 +1171,8 @@ TEST_F(MasterTest,
AlterTableAddAndDropRangeWithSpecificHashSchema) {
resp.partition_schema(), received_schema, &ps));
ASSERT_TRUE(ps.HasCustomHashSchemas());
+// Verify that PartitionSchema::FromPB() translated/retrieved the data
+// from PartitionSchemaPB as expected.
const auto& table_wide_hash_schema = ps.hash_schema();
ASSERT_EQ(1, table_wide_hash_schema.size());
ASSERT_EQ(2, table_