This is an automated email from the ASF dual-hosted git repository.

laiyingchun pushed a commit to branch branch-1.17.x
in repository https://gitbox.apache.org/repos/asf/kudu.git

commit 6527f4a0fdccaddbee58cb66c2255f25eff29ddc
Author: Abhishek Chennaka <achenn...@cloudera.com>
AuthorDate: Tue Jul 18 13:33:26 2023 -0700

    KUDU-1945 Update default range partition key
    
    The default range partition key includes all the columns of the
    primary key. We should not include auto incrementing column as
    this is not expected to be a part of the partition key by design.
    
    Change-Id: I89818ceb261064369a63712f6c093f41e57ca5cc
    Reviewed-on: http://gerrit.cloudera.org:8080/20219
    Tested-by: Kudu Jenkins
    Reviewed-by: Alexey Serbin <ale...@apache.org>
    (cherry picked from commit 4d0a654024981b78e36912aaf7838705e4ca0e78)
    Reviewed-on: http://gerrit.cloudera.org:8080/20234
    Reviewed-by: Yingchun Lai <laiyingc...@apache.org>
    Tested-by: Yingchun Lai <laiyingc...@apache.org>
---
 src/kudu/common/partition-test.cc | 19 +++++++++++++++++++
 src/kudu/common/partition.cc      |  6 ++++--
 2 files changed, 23 insertions(+), 2 deletions(-)

diff --git a/src/kudu/common/partition-test.cc 
b/src/kudu/common/partition-test.cc
index 760f35b84..2de88ecea 100644
--- a/src/kudu/common/partition-test.cc
+++ b/src/kudu/common/partition-test.cc
@@ -2313,4 +2313,23 @@ TEST_F(PartitionTest, 
HasCustomHashSchemasWhenAddingAndDroppingRanges) {
   ASSERT_EQ(0, ps.hash_schema_idx_by_encoded_range_start_.size());
 }
 
+// A test scenario to verify we create the default range partition
+// with all the primary keys except auto-incrementing column, if present.
+TEST_F(PartitionTest, PartitionKeyWithAutoIncrementingColumn) {
+  // CREATE TABLE t (c1 STRING, c2 STRING),
+  // NON-UNIQUE PRIMARY KEY (c1)
+  // PARTITION BY HASH (c1) PARTITIONS 2;
+  Schema schema({ ColumnSchema("c1", STRING),
+                  ColumnSchema("auto_increment_id", INT64, false, false, true),
+                  ColumnSchema("c2", STRING) },
+                { ColumnId(0), ColumnId(1), ColumnId(2) }, 2);
+
+  PartitionSchemaPB schema_builder;
+  AddHashDimension(&schema_builder, { "c1" }, 2, 0);
+  PartitionSchema partition_schema;
+  ASSERT_OK(PartitionSchema::FromPB(schema_builder, schema, 
&partition_schema));
+
+  ASSERT_EQ(1, partition_schema.range_schema().column_ids.size());
+  ASSERT_EQ("HASH (c1) PARTITIONS 2, RANGE (c1)", 
partition_schema.DebugString(schema));
+}
 } // namespace kudu
diff --git a/src/kudu/common/partition.cc b/src/kudu/common/partition.cc
index 1669f3f5a..d51f836b2 100644
--- a/src/kudu/common/partition.cc
+++ b/src/kudu/common/partition.cc
@@ -270,11 +270,13 @@ Status PartitionSchema::FromPB(
     RETURN_NOT_OK(ExtractColumnIds(range_pb.columns(), schema,
                                    
&partition_schema->range_schema_.column_ids));
   } else {
-    // Fill in the default range partition (PK columns).
+    // Fill in the default range partition (PK columns excluding the 
auto-incrementing column).
     // like the sorting above, this should only happen during table creation
     // while deserializing the user-provided partition schema.
     for (size_t column_idx = 0; column_idx < schema.num_key_columns(); 
++column_idx) {
-      
partition_schema->range_schema_.column_ids.push_back(schema.column_id(column_idx));
+      if (schema.auto_incrementing_col_idx() != column_idx) {
+        
partition_schema->range_schema_.column_ids.push_back(schema.column_id(column_idx));
+      }
     }
   }
 

Reply via email to