This is an automated email from the ASF dual-hosted git repository.
yiguolei pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-2.1 by this push:
new 0ecb84c6384 branch-2.1: [Fix](partial update) Fix incorrect result
when partial update include delete sign columns #46194 (#46543)
0ecb84c6384 is described below
commit 0ecb84c6384616621379c61abc6212001348d328
Author: bobhan1 <[email protected]>
AuthorDate: Thu Jan 9 08:14:46 2025 +0800
branch-2.1: [Fix](partial update) Fix incorrect result when partial update
include delete sign columns #46194 (#46543)
pick https://github.com/apache/doris/pull/46194
---
be/src/olap/rowset/segment_v2/segment_writer.cpp | 37 +++---
.../rowset/segment_v2/vertical_segment_writer.cpp | 41 +++----
be/src/olap/tablet.cpp | 17 ++-
be/src/olap/tablet.h | 4 +-
...t_partial_update_with_delete_col_in_publish.out | 15 +++
.../test_partial_update_merge_type.out | 32 ++---
.../test_partial_update_seq_col_delete.out | Bin 1530 -> 1533 bytes
.../test_partial_update_seq_type_delete.out | Bin 2967 -> 2981 bytes
.../partial_update/test_with_delete_sign_col.out | 133 +++++++++++++++++++++
.../partial_update/with_delete1.csv | 6 +
...artial_update_with_delete_col_in_publish.groovy | 92 ++++++++++++++
.../test_with_delete_sign_col.groovy | 98 +++++++++++++++
12 files changed, 415 insertions(+), 60 deletions(-)
diff --git a/be/src/olap/rowset/segment_v2/segment_writer.cpp
b/be/src/olap/rowset/segment_v2/segment_writer.cpp
index 7e5a51f55ce..00aec4d533a 100644
--- a/be/src/olap/rowset/segment_v2/segment_writer.cpp
+++ b/be/src/olap/rowset/segment_v2/segment_writer.cpp
@@ -657,15 +657,21 @@ Status
SegmentWriter::fill_missing_columns(vectorized::MutableColumns& mutable_f
bool has_default_or_nullable,
const size_t& segment_start_pos,
const vectorized::Block* block) {
- if constexpr (!std::is_same_v<ExecEnv::Engine, StorageEngine>) {
- // TODO(plat1ko): cloud mode
- return Status::NotSupported("fill_missing_columns");
- }
auto tablet = static_cast<Tablet*>(_tablet.get());
// create old value columns
const auto& cids_missing =
_opts.rowset_ctx->partial_update_info->missing_cids;
+ auto cids_to_read = cids_missing;
auto old_value_block = _tablet_schema->create_block_by_cids(cids_missing);
CHECK_EQ(cids_missing.size(), old_value_block.columns());
+ // always read delete sign column from historical data
+ if (const vectorized::ColumnWithTypeAndName* old_delete_sign_column =
+ old_value_block.try_get_by_name(DELETE_SIGN);
+ old_delete_sign_column == nullptr) {
+ auto del_col_cid = _tablet_schema->field_index(DELETE_SIGN);
+ cids_to_read.emplace_back(del_col_cid);
+
old_value_block.swap(_tablet_schema->create_block_by_cids(cids_to_read));
+ }
+
bool has_row_column = _tablet_schema->store_row_column();
// record real pos, key is input line num, value is old_block line num
std::map<uint32_t, uint32_t> read_index;
@@ -681,7 +687,7 @@ Status
SegmentWriter::fill_missing_columns(vectorized::MutableColumns& mutable_f
}
if (has_row_column) {
auto st = tablet->fetch_value_through_row_column(
- rowset, *_tablet_schema, seg_it.first, rids,
cids_missing, old_value_block);
+ rowset, *_tablet_schema, seg_it.first, rids,
cids_to_read, old_value_block);
if (!st.ok()) {
LOG(WARNING) << "failed to fetch value through row column";
return st;
@@ -690,7 +696,7 @@ Status
SegmentWriter::fill_missing_columns(vectorized::MutableColumns& mutable_f
}
auto mutable_old_columns = old_value_block.mutate_columns();
for (size_t cid = 0; cid < mutable_old_columns.size(); ++cid) {
- TabletColumn tablet_column =
_tablet_schema->column(cids_missing[cid]);
+ TabletColumn tablet_column =
_tablet_schema->column(cids_to_read[cid]);
auto st = tablet->fetch_value_by_rowids(rowset, seg_it.first,
rids, tablet_column,
mutable_old_columns[cid]);
// set read value to output block
@@ -715,17 +721,14 @@ Status
SegmentWriter::fill_missing_columns(vectorized::MutableColumns& mutable_f
delete_sign_column_data = delete_sign_col.get_data().data();
}
- if (has_default_or_nullable || delete_sign_column_data != nullptr) {
- for (auto i = 0; i < cids_missing.size(); ++i) {
- const auto& column = _tablet_schema->column(cids_missing[i]);
- if (column.has_default_value()) {
- const auto& default_value =
-
_opts.rowset_ctx->partial_update_info->default_values[i];
- vectorized::ReadBuffer
rb(const_cast<char*>(default_value.c_str()),
- default_value.size());
-
RETURN_IF_ERROR(old_value_block.get_by_position(i).type->from_string(
- rb, mutable_default_value_columns[i].get()));
- }
+ for (auto i = 0; i < cids_missing.size(); ++i) {
+ const auto& column = _tablet_schema->column(cids_missing[i]);
+ if (column.has_default_value()) {
+ const auto& default_value =
_opts.rowset_ctx->partial_update_info->default_values[i];
+ vectorized::ReadBuffer rb(const_cast<char*>(default_value.c_str()),
+ default_value.size());
+
RETURN_IF_ERROR(old_value_block.get_by_position(i).type->from_string(
+ rb, mutable_default_value_columns[i].get()));
}
}
diff --git a/be/src/olap/rowset/segment_v2/vertical_segment_writer.cpp
b/be/src/olap/rowset/segment_v2/vertical_segment_writer.cpp
index 1be1f1ec180..8898c08aa80 100644
--- a/be/src/olap/rowset/segment_v2/vertical_segment_writer.cpp
+++ b/be/src/olap/rowset/segment_v2/vertical_segment_writer.cpp
@@ -346,11 +346,6 @@ Status
VerticalSegmentWriter::_partial_update_preconditions_check(size_t row_pos
// 3. set columns to data convertor and then write all columns
Status VerticalSegmentWriter::_append_block_with_partial_content(RowsInBlock&
data,
vectorized::Block& full_block) {
- if constexpr (!std::is_same_v<ExecEnv::Engine, StorageEngine>) {
- // TODO(plat1ko): CloudStorageEngine
- return Status::NotSupported("append_block_with_partial_content");
- }
-
RETURN_IF_ERROR(_partial_update_preconditions_check(data.row_pos));
auto tablet = static_cast<Tablet*>(_tablet.get());
@@ -600,15 +595,20 @@ Status VerticalSegmentWriter::_fill_missing_columns(
vectorized::MutableColumns& mutable_full_columns,
const std::vector<bool>& use_default_or_null_flag, bool
has_default_or_nullable,
const size_t& segment_start_pos, const vectorized::Block* block) {
- if constexpr (!std::is_same_v<ExecEnv::Engine, StorageEngine>) {
- // TODO(plat1ko): CloudStorageEngine
- return Status::NotSupported("fill_missing_columns");
- }
auto tablet = static_cast<Tablet*>(_tablet.get());
// create old value columns
const auto& missing_cids =
_opts.rowset_ctx->partial_update_info->missing_cids;
+ auto cids_to_read = missing_cids;
auto old_value_block = _tablet_schema->create_block_by_cids(missing_cids);
CHECK_EQ(missing_cids.size(), old_value_block.columns());
+ // always read delete sign column from historical data
+ if (const vectorized::ColumnWithTypeAndName* old_delete_sign_column =
+ old_value_block.try_get_by_name(DELETE_SIGN);
+ old_delete_sign_column == nullptr) {
+ auto del_col_cid = _tablet_schema->field_index(DELETE_SIGN);
+ cids_to_read.emplace_back(del_col_cid);
+
old_value_block.swap(_tablet_schema->create_block_by_cids(cids_to_read));
+ }
auto mutable_old_columns = old_value_block.mutate_columns();
bool has_row_column = _tablet_schema->store_row_column();
// record real pos, key is input line num, value is old_block line num
@@ -625,7 +625,7 @@ Status VerticalSegmentWriter::_fill_missing_columns(
}
if (has_row_column) {
auto st = tablet->fetch_value_through_row_column(
- rowset, *_tablet_schema, seg_it.first, rids,
missing_cids, old_value_block);
+ rowset, *_tablet_schema, seg_it.first, rids,
cids_to_read, old_value_block);
if (!st.ok()) {
LOG(WARNING) << "failed to fetch value through row column";
return st;
@@ -633,7 +633,7 @@ Status VerticalSegmentWriter::_fill_missing_columns(
continue;
}
for (size_t cid = 0; cid < mutable_old_columns.size(); ++cid) {
- TabletColumn tablet_column =
_tablet_schema->column(missing_cids[cid]);
+ TabletColumn tablet_column =
_tablet_schema->column(cids_to_read[cid]);
auto st = tablet->fetch_value_by_rowids(rowset, seg_it.first,
rids, tablet_column,
mutable_old_columns[cid]);
// set read value to output block
@@ -657,17 +657,14 @@ Status VerticalSegmentWriter::_fill_missing_columns(
delete_sign_column_data = delete_sign_col.get_data().data();
}
- if (has_default_or_nullable || delete_sign_column_data != nullptr) {
- for (auto i = 0; i < missing_cids.size(); ++i) {
- const auto& column = _tablet_schema->column(missing_cids[i]);
- if (column.has_default_value()) {
- const auto& default_value =
-
_opts.rowset_ctx->partial_update_info->default_values[i];
- vectorized::ReadBuffer
rb(const_cast<char*>(default_value.c_str()),
- default_value.size());
-
RETURN_IF_ERROR(old_value_block.get_by_position(i).type->from_string(
- rb, mutable_default_value_columns[i].get()));
- }
+ for (auto i = 0; i < missing_cids.size(); ++i) {
+ const auto& column = _tablet_schema->column(missing_cids[i]);
+ if (column.has_default_value()) {
+ const auto& default_value =
_opts.rowset_ctx->partial_update_info->default_values[i];
+ vectorized::ReadBuffer rb(const_cast<char*>(default_value.c_str()),
+ default_value.size());
+
RETURN_IF_ERROR(old_value_block.get_by_position(i).type->from_string(
+ rb, mutable_default_value_columns[i].get()));
}
}
diff --git a/be/src/olap/tablet.cpp b/be/src/olap/tablet.cpp
index af952dbec34..948d8b4b484 100644
--- a/be/src/olap/tablet.cpp
+++ b/be/src/olap/tablet.cpp
@@ -3290,7 +3290,7 @@ Status Tablet::generate_new_block_for_partial_update(
// read current rowset first, if a row in the current rowset has delete
sign mark
// we don't need to read values from old block
RETURN_IF_ERROR(read_columns_by_plan(rowset_schema, update_cids,
read_plan_update,
- rsid_to_rowset, update_block,
&read_index_update));
+ rsid_to_rowset, update_block,
&read_index_update, false));
size_t update_rows = read_index_update.size();
for (auto i = 0; i < update_cids.size(); ++i) {
@@ -3311,7 +3311,7 @@ Status Tablet::generate_new_block_for_partial_update(
// rowid in the final block(start from 0, increase, may not continuous
becasue we skip to read some rows) -> rowid to read in old_block
std::map<uint32_t, uint32_t> read_index_old;
RETURN_IF_ERROR(read_columns_by_plan(rowset_schema, missing_cids,
read_plan_ori, rsid_to_rowset,
- old_block, &read_index_old,
new_block_delete_signs));
+ old_block, &read_index_old, true,
new_block_delete_signs));
size_t old_rows = read_index_old.size();
const auto* __restrict old_block_delete_signs =
get_delete_sign_column_data(old_block, old_rows);
@@ -3375,12 +3375,23 @@ Status Tablet::generate_new_block_for_partial_update(
// read columns by read plan
// read_index: ori_pos-> block_idx
Status Tablet::read_columns_by_plan(TabletSchemaSPtr tablet_schema,
- const std::vector<uint32_t> cids_to_read,
+ std::vector<uint32_t> cids_to_read,
const PartialUpdateReadPlan& read_plan,
const std::map<RowsetId, RowsetSharedPtr>&
rsid_to_rowset,
vectorized::Block& block,
std::map<uint32_t, uint32_t>* read_index,
+ bool force_read_old_delete_signs,
const signed char* __restrict skip_map) {
+ if (force_read_old_delete_signs) {
+ // always read delete sign column from historical data
+ if (const vectorized::ColumnWithTypeAndName* old_delete_sign_column =
+ block.try_get_by_name(DELETE_SIGN);
+ old_delete_sign_column == nullptr) {
+ auto del_col_cid = tablet_schema->field_index(DELETE_SIGN);
+ cids_to_read.emplace_back(del_col_cid);
+ block.swap(tablet_schema->create_block_by_cids(cids_to_read));
+ }
+ }
bool has_row_column = tablet_schema->store_row_column();
auto mutable_columns = block.mutate_columns();
size_t read_idx = 0;
diff --git a/be/src/olap/tablet.h b/be/src/olap/tablet.h
index e22213ed79a..4da655c38ab 100644
--- a/be/src/olap/tablet.h
+++ b/be/src/olap/tablet.h
@@ -486,11 +486,11 @@ public:
Status calc_delete_bitmap_between_segments(
RowsetSharedPtr rowset, const
std::vector<segment_v2::SegmentSharedPtr>& segments,
DeleteBitmapPtr delete_bitmap);
- Status read_columns_by_plan(TabletSchemaSPtr tablet_schema,
- const std::vector<uint32_t> cids_to_read,
+ Status read_columns_by_plan(TabletSchemaSPtr tablet_schema,
std::vector<uint32_t> cids_to_read,
const PartialUpdateReadPlan& read_plan,
const std::map<RowsetId, RowsetSharedPtr>&
rsid_to_rowset,
vectorized::Block& block, std::map<uint32_t,
uint32_t>* read_index,
+ bool force_read_old_delete_signs,
const signed char* __restrict skip_map =
nullptr);
void prepare_to_read(const RowLocation& row_location, size_t pos,
PartialUpdateReadPlan* read_plan);
diff --git
a/regression-test/data/fault_injection_p0/partial_update/test_partial_update_with_delete_col_in_publish.out
b/regression-test/data/fault_injection_p0/partial_update/test_partial_update_with_delete_col_in_publish.out
new file mode 100644
index 00000000000..39b43afd898
--- /dev/null
+++
b/regression-test/data/fault_injection_p0/partial_update/test_partial_update_with_delete_col_in_publish.out
@@ -0,0 +1,15 @@
+-- This file is automatically generated. You should know what you did if you
want to edit this
+-- !sql --
+1 1 1 1 1
+2 2 2 2 2
+3 3 3 3 3
+4 4 4 4 4
+5 5 5 5 5
+
+-- !sql --
+1 1 1 987 987
+2 \N \N 987 987
+3 3 3 3 3
+4 -1 -1 987 987
+5 \N \N 987 987
+
diff --git
a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_merge_type.out
b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_merge_type.out
index 04e4b07f2dc..d21693cda10 100644
---
a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_merge_type.out
+++
b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_merge_type.out
@@ -28,7 +28,7 @@
7 7 7 7
8 8 8 8
10 \N 999 \N
-11 \N 888 \N
+11 \N \N \N
-- !sql_2_1 --
0 0 0 0
@@ -36,7 +36,7 @@
7 7 7 7
8 8 8 8
10 \N 999 \N
-11 \N 888 \N
+11 \N \N \N
-- !sql_2_2 --
0 0 0 0
@@ -90,7 +90,7 @@
7 7 7 7
8 8 8 8
10 \N 999 \N
-11 \N 888 \N
+11 \N \N \N
-- !inspect --
0 0 0 0 1 0
@@ -109,7 +109,7 @@
8 8 8 8 1 0
10 \N 999 \N 2 0
11 \N 888 \N 2 1
-11 \N 888 \N 3 0
+11 \N \N \N 3 0
-- !sql_4_1 --
0 0 0 0
@@ -117,7 +117,7 @@
7 7 7 7
8 8 8 8
10 \N 999 \N
-11 \N 888 \N
+11 \N \N \N
-- !inspect --
0 0 0 0 1 0
@@ -138,7 +138,7 @@
8 8 8 8 1 0
10 \N 999 \N 2 0
11 \N 888 \N 2 1
-11 \N 888 \N 3 0
+11 \N \N \N 3 0
-- !sql_4_2 --
0 0 0 0
@@ -166,8 +166,8 @@
8 8 8 8 1 0
10 \N 999 \N 2 0
11 \N 888 \N 2 1
-11 \N 888 \N 3 0
-11 \N 888 \N 5 1
+11 \N \N \N 3 0
+11 \N \N \N 5 1
-- !sql --
0 0 0 0
@@ -198,7 +198,7 @@
7 7 7 7
8 8 8 8
10 \N 999 \N
-11 \N 888 \N
+11 \N \N \N
-- !sql_2_1 --
0 0 0 0
@@ -206,7 +206,7 @@
7 7 7 7
8 8 8 8
10 \N 999 \N
-11 \N 888 \N
+11 \N \N \N
-- !sql_2_2 --
0 0 0 0
@@ -260,7 +260,7 @@
7 7 7 7
8 8 8 8
10 \N 999 \N
-11 \N 888 \N
+11 \N \N \N
-- !inspect --
0 0 0 0 1 0
@@ -279,7 +279,7 @@
8 8 8 8 1 0
10 \N 999 \N 2 0
11 \N 888 \N 2 1
-11 \N 888 \N 3 0
+11 \N \N \N 3 0
-- !sql_4_1 --
0 0 0 0
@@ -287,7 +287,7 @@
7 7 7 7
8 8 8 8
10 \N 999 \N
-11 \N 888 \N
+11 \N \N \N
-- !inspect --
0 0 0 0 1 0
@@ -308,7 +308,7 @@
8 8 8 8 1 0
10 \N 999 \N 2 0
11 \N 888 \N 2 1
-11 \N 888 \N 3 0
+11 \N \N \N 3 0
-- !sql_4_2 --
0 0 0 0
@@ -336,6 +336,6 @@
8 8 8 8 1 0
10 \N 999 \N 2 0
11 \N 888 \N 2 1
-11 \N 888 \N 3 0
-11 \N 888 \N 5 1
+11 \N \N \N 3 0
+11 \N \N \N 5 1
diff --git
a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_col_delete.out
b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_col_delete.out
index afa7ccfc9bb..ad704cac251 100644
Binary files
a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_col_delete.out
and
b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_col_delete.out
differ
diff --git
a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_type_delete.out
b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_type_delete.out
index 05f0c5dab4e..8621761c3a6 100644
Binary files
a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_type_delete.out
and
b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_seq_type_delete.out
differ
diff --git
a/regression-test/data/unique_with_mow_p0/partial_update/test_with_delete_sign_col.out
b/regression-test/data/unique_with_mow_p0/partial_update/test_with_delete_sign_col.out
new file mode 100644
index 00000000000..66960554a61
--- /dev/null
+++
b/regression-test/data/unique_with_mow_p0/partial_update/test_with_delete_sign_col.out
@@ -0,0 +1,133 @@
+-- This file is automatically generated. You should know what you did if you
want to edit this
+-- !sql_1 --
+0 0 0 0 0
+1 1 1 1 1
+2 2 2 2 2
+3 3 3 3 3
+4 4 4 4 4
+5 5 5 5 5
+6 6 6 6 6
+7 7 7 7 7
+8 8 8 8 8
+9 9 9 9 9
+10 10 10 10 10
+
+-- !sql_1 --
+0 0 0 0 0
+1 1 1 1 1
+4 4 4 4 4
+5 5 5 55 55
+8 8 8 8 8
+9 9 9 9 9
+10 10 10 1010 1010
+
+-- !sql_1 --
+0 0 0 0 0
+1 11 11 1 1
+2 22 22 888 777
+3 33 33 888 777
+4 4 4 4 4
+5 5 5 55 55
+6 66 66 888 777
+8 8 8 8 8
+10 10 10 1010 1010
+
+-- !sql_2 --
+0 0 0 0 0
+1 1 1 1 1
+2 2 2 2 2
+3 3 3 3 3
+4 4 4 4 4
+5 5 5 5 5
+6 6 6 6 6
+7 7 7 7 7
+8 8 8 8 8
+9 9 9 9 9
+10 10 10 10 10
+
+-- !sql_2 --
+0 0 0 0 0
+1 1 1 1 1
+4 4 4 4 4
+5 5 5 55 55
+8 8 8 8 8
+9 9 9 9 9
+10 10 10 1010 1010
+
+-- !sql_1 --
+0 0 0 0 0
+1 11 11 1 1
+2 22 22 888 777
+3 33 33 888 777
+4 4 4 4 4
+5 5 5 55 55
+6 66 66 888 777
+8 8 8 8 8
+10 10 10 1010 1010
+
+-- !sql_1 --
+0 0 0 0 0
+1 1 1 1 1
+2 2 2 2 2
+3 3 3 3 3
+4 4 4 4 4
+5 5 5 5 5
+6 6 6 6 6
+7 7 7 7 7
+8 8 8 8 8
+9 9 9 9 9
+10 10 10 10 10
+
+-- !sql_1 --
+0 0 0 0 0
+1 1 1 1 1
+4 4 4 4 4
+5 5 5 55 55
+8 8 8 8 8
+9 9 9 9 9
+10 10 10 1010 1010
+
+-- !sql_1 --
+0 0 0 0 0
+1 11 11 1 1
+2 22 22 888 777
+3 33 33 888 777
+4 4 4 4 4
+5 5 5 55 55
+6 66 66 888 777
+8 8 8 8 8
+10 10 10 1010 1010
+
+-- !sql_2 --
+0 0 0 0 0
+1 1 1 1 1
+2 2 2 2 2
+3 3 3 3 3
+4 4 4 4 4
+5 5 5 5 5
+6 6 6 6 6
+7 7 7 7 7
+8 8 8 8 8
+9 9 9 9 9
+10 10 10 10 10
+
+-- !sql_2 --
+0 0 0 0 0
+1 1 1 1 1
+4 4 4 4 4
+5 5 5 55 55
+8 8 8 8 8
+9 9 9 9 9
+10 10 10 1010 1010
+
+-- !sql_1 --
+0 0 0 0 0
+1 11 11 1 1
+2 22 22 888 777
+3 33 33 888 777
+4 4 4 4 4
+5 5 5 55 55
+6 66 66 888 777
+8 8 8 8 8
+10 10 10 1010 1010
+
diff --git
a/regression-test/data/unique_with_mow_p0/partial_update/with_delete1.csv
b/regression-test/data/unique_with_mow_p0/partial_update/with_delete1.csv
new file mode 100644
index 00000000000..c13ead6ffad
--- /dev/null
+++ b/regression-test/data/unique_with_mow_p0/partial_update/with_delete1.csv
@@ -0,0 +1,6 @@
+2,22,22,1
+3,33,33,1
+5,55,55,0
+6,66,66,1
+7,77,77,1
+10,1010,1010,0
diff --git
a/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_with_delete_col_in_publish.groovy
b/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_with_delete_col_in_publish.groovy
new file mode 100644
index 00000000000..3d7511f7f57
--- /dev/null
+++
b/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_with_delete_col_in_publish.groovy
@@ -0,0 +1,92 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.junit.Assert
+import java.util.concurrent.TimeUnit
+import org.awaitility.Awaitility
+
+suite("test_partial_update_with_delete_col_in_publish", "nonConcurrent") {
+
+ def tableName = "test_partial_update_with_delete_col_in_publish"
+ sql """ DROP TABLE IF EXISTS ${tableName} force;"""
+ sql """ CREATE TABLE ${tableName} (
+ `k` int(11) NULL,
+ `v1` BIGINT NULL,
+ `v2` BIGINT NULL,
+ `v3` BIGINT NULL,
+ `v4` BIGINT NULL,
+ ) UNIQUE KEY(`k`) DISTRIBUTED BY HASH(`k`) BUCKETS 1
+ PROPERTIES(
+ "replication_num" = "1",
+ "enable_unique_key_merge_on_write" = "true",
+ "light_schema_change" = "true",
+ "store_row_column" = "false"); """
+ def show_res = sql "show create table ${tableName}"
+ sql """insert into ${tableName}
values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5);"""
+ qt_sql "select * from ${tableName} order by k;"
+
+ def enable_publish_spin_wait = {
+
GetDebugPoint().enableDebugPointForAllBEs("EnginePublishVersionTask::execute.enable_spin_wait")
+ }
+
+ def enable_block_in_publish = {
+
GetDebugPoint().enableDebugPointForAllBEs("EnginePublishVersionTask::execute.block")
+ }
+
+ def disable_block_in_publish = {
+
GetDebugPoint().disableDebugPointForAllBEs("EnginePublishVersionTask::execute.block")
+ }
+
+ try {
+ GetDebugPoint().clearDebugPointsForAllFEs()
+ GetDebugPoint().clearDebugPointsForAllBEs()
+
+ // block the partial update in publish phase
+ enable_publish_spin_wait()
+ enable_block_in_publish()
+
+ def threads = []
+
+ threads << Thread.start {
+ sql "set enable_unique_key_partial_update=true;"
+ sql "set enable_insert_strict=false"
+ sql "sync;"
+ sql "insert into ${tableName}(k,v1,v2,__DORIS_DELETE_SIGN__)
values(2,222,222,1),(4,-1,-1,0),(5,555,555,1);"
+ }
+
+ Thread.sleep(500)
+
+ threads << Thread.start {
+ sql "set enable_unique_key_partial_update=true;"
+ sql "sync;"
+ sql "insert into ${tableName}(k,v3,v4)
values(1,987,987),(2,987,987),(4,987,987),(5,987,987);"
+ }
+
+ Thread.sleep(500)
+
+ disable_block_in_publish()
+ threads.each { t -> t.join() }
+
+ qt_sql "select * from ${tableName} order by k;"
+ } catch(Exception e) {
+ logger.info(e.getMessage())
+ throw e
+ } finally {
+ GetDebugPoint().clearDebugPointsForAllFEs()
+ GetDebugPoint().clearDebugPointsForAllBEs()
+ }
+}
diff --git
a/regression-test/suites/unique_with_mow_p0/partial_update/test_with_delete_sign_col.groovy
b/regression-test/suites/unique_with_mow_p0/partial_update/test_with_delete_sign_col.groovy
new file mode 100644
index 00000000000..ee2a56f7ee5
--- /dev/null
+++
b/regression-test/suites/unique_with_mow_p0/partial_update/test_with_delete_sign_col.groovy
@@ -0,0 +1,98 @@
+
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_with_delete_sign_col", "p0") {
+
+ String db = context.config.getDbNameByFile(context.file)
+ sql "select 1;" // to create database
+
+ def inspectRows = { sqlStr ->
+ sql "set skip_delete_sign=true;"
+ sql "set skip_delete_bitmap=true;"
+ sql "sync"
+ qt_inspect sqlStr
+ sql "set skip_delete_sign=false;"
+ sql "set skip_delete_bitmap=false;"
+ sql "sync"
+ }
+
+
+ for (def use_row_store : [false, true]) {
+ logger.info("current params: use_row_store: ${use_row_store}")
+
+ connect( context.config.jdbcUser, context.config.jdbcPassword,
context.config.jdbcUrl) {
+ sql "use ${db};"
+ def table1 = "test_with_delete_sign_col"
+ sql "DROP TABLE IF EXISTS ${table1} FORCE;"
+ sql """ CREATE TABLE IF NOT EXISTS ${table1} (
+ `k1` int NOT NULL,
+ `c1` int,
+ `c2` int default "999",
+ `c3` int default "888",
+ `c4` int default "777"
+ )UNIQUE KEY(k1)
+ DISTRIBUTED BY HASH(k1) BUCKETS 1
+ PROPERTIES (
+ "enable_mow_light_delete" = "false",
+ "disable_auto_compaction" = "true",
+ "replication_num" = "1"); """
+
+ sql """insert into ${table1} select
number,number,number,number,number from numbers("number"="11");"""
+ qt_sql_1 "select * from ${table1} order by k1;"
+
+ sql "set enable_unique_key_partial_update=true;"
+ sql "set enable_insert_strict=false;"
+ sql "sync;"
+
+ sql "insert into ${table1}(k1,c3,c4,__DORIS_DELETE_SIGN__)
values(2,22,22,1),(3,33,33,1),(5,55,55,0),(6,66,66,1),(7,77,77,1),(10,1010,1010,0);"
+ qt_sql_1 "select * from ${table1} order by k1;"
+
+ sql "insert into ${table1}(k1,c1,c2,__DORIS_DELETE_SIGN__)
values(1,11,11,0),(2,22,22,0),(3,33,33,0),(6,66,66,0),(9,99,99,1);"
+ sql "set enable_unique_key_partial_update=false;"
+ sql "set enable_insert_strict=true;"
+ sql "sync;"
+ qt_sql_1 "select * from ${table1} order by k1;"
+
+
+ sql "truncate table ${table1};"
+ sql """insert into ${table1} select
number,number,number,number,number from numbers("number"="11");"""
+ qt_sql_2 "select * from ${table1} order by k1;"
+
+ streamLoad {
+ table "${table1}"
+ set 'column_separator', ','
+ set 'format', 'csv'
+ set 'columns', 'k1,c3,c4,del'
+ set 'partial_columns', 'true'
+ set 'merge_type', 'MERGE'
+ set 'delete', 'del=1'
+ file 'with_delete1.csv'
+ time 10000
+ }
+ qt_sql_2 "select * from ${table1} order by k1;"
+ sql "set enable_unique_key_partial_update=true;"
+ sql "set enable_insert_strict=false;"
+ sql "sync;"
+ sql "insert into ${table1}(k1,c1,c2,__DORIS_DELETE_SIGN__)
values(1,11,11,0),(2,22,22,0),(3,33,33,0),(6,66,66,0),(9,99,99,1);"
+ sql "set enable_unique_key_partial_update=false;"
+ sql "set enable_insert_strict=true;"
+ sql "sync;"
+ qt_sql_1 "select * from ${table1} order by k1;"
+ }
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]