This is an automated email from the ASF dual-hosted git repository.
chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git
The following commit(s) were added to refs/heads/cbdb-postgres-merge by this
push:
new 239d1b719df Fix vacuum ao errors
239d1b719df is described below
commit 239d1b719df6c708bbf70cf2ff2b4019db7c3621
Author: Jinbao Chen <[email protected]>
AuthorDate: Sun Feb 1 00:42:41 2026 +0800
Fix vacuum ao errors
---
src/backend/commands/vacuum.c | 6 ++---
src/backend/commands/vacuum_ao.c | 2 ++
src/test/regress/expected/freeze_aux_tables.out | 28 +++++++++++-----------
src/test/regress/expected/qp_misc_jiras.out | 2 +-
src/test/regress/expected/qp_query_execution.out | 18 +++++++-------
src/test/regress/expected/toast.out | 2 +-
.../regress/expected/workfile/sisc_sort_spill.out | 12 ++++------
7 files changed, 34 insertions(+), 36 deletions(-)
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 660d5536692..244561532e2 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -2890,12 +2890,12 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams
*params,
}
/* TODO: vacuum directory table's temp files */
- if (!is_appendoptimized && (params->options & VACOPT_PROCESS_MAIN))
+ if (params->options & VACOPT_PROCESS_MAIN)
{
/*
* Do the actual work --- either FULL or "lazy" vacuum
*/
- if (params->options & VACOPT_FULL)
+ if (!is_appendoptimized && (params->options & VACOPT_FULL))
{
ClusterParams cluster_params = {0};
@@ -2909,7 +2909,7 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams
*params,
/* VACUUM FULL is now a variant of CLUSTER; see
cluster.c */
cluster_rel(relid, InvalidOid, &cluster_params);
}
- else
+ else /* Heap vacuum or AO/CO vacuum in specific phase */
table_relation_vacuum(rel, params, bstrategy);
}
diff --git a/src/backend/commands/vacuum_ao.c b/src/backend/commands/vacuum_ao.c
index 9d7f8f779e3..3a2dc7e6627 100644
--- a/src/backend/commands/vacuum_ao.c
+++ b/src/backend/commands/vacuum_ao.c
@@ -643,6 +643,7 @@ vacuum_appendonly_index(Relation indexRelation,
ivinfo.num_heap_tuples = aoRelation->rd_rel->reltuples;
ivinfo.estimated_count = true;
ivinfo.strategy = bstrategy;
+ ivinfo.heaprel = aoRelation;
/* Do bulk deletion */
stats = index_bulk_delete(&ivinfo, NULL, appendonly_tid_reaped,
@@ -816,6 +817,7 @@ scan_index(Relation indrel, Relation aorel, int elevel,
BufferAccessStrategy vac
ivinfo.num_heap_tuples = aorel->rd_rel->reltuples;
ivinfo.estimated_count = true;
ivinfo.strategy = vac_strategy;
+ ivinfo.heaprel = aorel;
/* Do post-VACUUM cleanup */
diff --git a/src/test/regress/expected/freeze_aux_tables.out
b/src/test/regress/expected/freeze_aux_tables.out
index a3fd1c36e19..b5b920003fc 100644
--- a/src/test/regress/expected/freeze_aux_tables.out
+++ b/src/test/regress/expected/freeze_aux_tables.out
@@ -159,7 +159,7 @@ select segid = -1 as is_master, relname, classify_age(age)
from aux_rel_ages('te
group by segid = -1, relname, classify_age(age);
is_master | relname | classify_age
-----------+-----------------+--------------
- f | test_table_heap | young
+ f | test_table_heap | zero
t | test_table_heap | very young
(2 rows)
@@ -177,38 +177,38 @@ select segid = -1 as is_master, relname,
classify_age(age) from aux_rel_ages('te
group by segid = -1, relname, classify_age(age);
is_master | relname | classify_age
-----------+--------------------+--------------
- f | pg_aoblkdir_<oid> | young
- f | pg_aovisimap_<oid> | young
+ t | pg_aoblkdir_<oid> | very young
+ f | pg_aoblkdir_<oid> | zero
t | pg_aovisimap_<oid> | very young
+ f | pg_aovisimap_<oid> | zero
t | pg_aoseg_<oid> | very young
- f | pg_aoseg_<oid> | young
- t | pg_aoblkdir_<oid> | very young
+ f | pg_aoseg_<oid> | zero
(6 rows)
select segid = -1 as is_master, relname, classify_age(age) from
aux_rel_ages('test_table_ao_with_toast')
group by segid = -1, relname, classify_age(age);
is_master | relname | classify_age
-----------+--------------------+--------------
- f | pg_aoblkdir_<oid> | young
- f | pg_toast_<oid> | young
+ t | pg_aoblkdir_<oid> | very young
+ f | pg_aoblkdir_<oid> | zero
t | pg_toast_<oid> | very young
- f | pg_aovisimap_<oid> | young
t | pg_aovisimap_<oid> | very young
+ f | pg_aovisimap_<oid> | zero
t | pg_aoseg_<oid> | very young
- f | pg_aoseg_<oid> | young
- t | pg_aoblkdir_<oid> | very young
+ f | pg_toast_<oid> | zero
+ f | pg_aoseg_<oid> | zero
(8 rows)
select segid = -1 as is_master, relname, classify_age(age) from
aux_rel_ages('test_table_co')
group by segid = -1, relname, classify_age(age);
is_master | relname | classify_age
-----------+--------------------+--------------
+ t | pg_aoblkdir_<oid> | very young
t | pg_aocsseg_<oid> | very young
- f | pg_aocsseg_<oid> | young
- f | pg_aoblkdir_<oid> | young
- f | pg_aovisimap_<oid> | young
+ f | pg_aoblkdir_<oid> | zero
t | pg_aovisimap_<oid> | very young
- t | pg_aoblkdir_<oid> | very young
+ f | pg_aovisimap_<oid> | zero
+ f | pg_aocsseg_<oid> | zero
(6 rows)
-- Repeat the tests on a table that has been inserted to, but all the rows
diff --git a/src/test/regress/expected/qp_misc_jiras.out
b/src/test/regress/expected/qp_misc_jiras.out
index 53b74986f4b..b07f5fc9706 100644
--- a/src/test/regress/expected/qp_misc_jiras.out
+++ b/src/test/regress/expected/qp_misc_jiras.out
@@ -4646,7 +4646,7 @@ drop table qp_misc_jiras.tbl7285_axg;
show autovacuum;
autovacuum
------------
- off
+ on
(1 row)
set autovacuum=on;
diff --git a/src/test/regress/expected/qp_query_execution.out
b/src/test/regress/expected/qp_query_execution.out
index 732399558d1..27b0c80ef4b 100644
--- a/src/test/regress/expected/qp_query_execution.out
+++ b/src/test/regress/expected/qp_query_execution.out
@@ -163,7 +163,7 @@ analyze bar;
select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join
bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = 6;', 'Hash
Right Join', 'Hash Right Join');
qx_count_operator
-------------------
- 1
+ 0
(1 row)
select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k
where foo_p.t is not null and foo_p.a = 6 order by 1, 2 desc limit 10;
@@ -346,7 +346,7 @@ select abbp.k, abbp.t from abbp left outer join b on abbp.k
= b.k where abbp.t
select qx_count_operator('select abbp.b, abbp.t from abbp left outer join b on
abbp.a = b.k where abbp.t is not null and abbp.a = E''6SOME NUMBER''', 'Hash
Right Join', 'Hash Right Join');
qx_count_operator
-------------------
- 1
+ 0
(1 row)
select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where
abbp.t is not null and abbp.a = '6SOME NUMBER' order by 1, 2 desc limit 10;
@@ -441,7 +441,7 @@ select abbp.k, abbp.t from abbp left outer join b on abbp.k
= b.k where abbp.t i
select qx_count_operator('select abbp.b, abbp.t from abbp left outer join b on
abbp.a = b.k where abbp.t is not null and abbp.a = 6;', 'Hash Right Join',
'Hash Left Join');
qx_count_operator
-------------------
- 1
+ 0
(1 row)
select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where abbp.t
is not null and abbp.a = 6 order by 1, 2 asc limit 10;
@@ -489,7 +489,7 @@ select foo_p.k, foo_p.t from foo_p left outer join bar on
foo_p.k = bar.k where
select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join
bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = 6.00;', 'Hash
Right Join', 'Hash Right Join');
qx_count_operator
-------------------
- 1
+ 0
(1 row)
select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k
where foo_p.t is not null and foo_p.a = 6.00 order by 1, 2 desc limit 10;
@@ -544,7 +544,7 @@ select abbp.k, abbp.t from abbp left outer join b on abbp.k
= b.k where abbp.t
select qx_count_operator('select abbp.b, abbp.t from abbp left outer join b on
abbp.a = b.k where abbp.t is not null and abbp.a = E''6SOME NUMBER''', 'Hash
Right Join', 'Hash Right Join');
qx_count_operator
-------------------
- 1
+ 0
(1 row)
select abbp.b, abbp.t from abbp left outer join b on abbp.a = b.k where
abbp.t is not null and abbp.a = '6SOME NUMBER' order by 1, 2 asc limit 10;
@@ -600,7 +600,7 @@ select foo_p.k, foo_p.t from foo_p left outer join bar_p on
foo_p.k = bar_p.k w
select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join
bar_p on foo_p.a = bar_p.k where foo_p.t is not null and foo_p.a = 6;', 'Hash
Right Join', 'Hash Right Join');
qx_count_operator
-------------------
- 1
+ 0
(1 row)
select foo_p.b, foo_p.t from foo_p left outer join bar_p on foo_p.a = bar_p.k
where foo_p.t is not null and foo_p.a = 6 order by 1, 2 asc limit 10;
@@ -697,7 +697,7 @@ select foo_p.b, foo_p.t from foo_p left outer join bar_p
on foo_p.a = bar_p.k a
select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join
bar_p on foo_p.a = bar_p.a where foo_p.t is not null and foo_p.a = 6;', 'Hash
Right Join', 'Hash Right Join');
qx_count_operator
-------------------
- 1
+ 0
(1 row)
select foo_p.b, foo_p.t from foo_p left outer join bar_p on foo_p.a = bar_p.a
where foo_p.t is not null and foo_p.a = 6 order by 1, 2 asc limit 10;
@@ -731,7 +731,7 @@ analyze bar;
select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join
bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a =
(array[1])[1];', 'Hash Right Join', 'Hash Right Join');
qx_count_operator
-------------------
- 1
+ 0
(1 row)
select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k
where foo_p.t is not null and foo_p.a = (array[1])[1] order by 1, 2 desc limit
10;
@@ -753,7 +753,7 @@ create function mytest(integer) returns integer as 'select
$1/100' language sql;
select qx_count_operator('select foo_p.b, foo_p.t from foo_p left outer join
bar on foo_p.a = bar.k where foo_p.t is not null and foo_p.a = mytest(100);',
'Hash Right Join', 'Hash Right Join');
qx_count_operator
-------------------
- 1
+ 0
(1 row)
select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = bar.k
where foo_p.t is not null and foo_p.a = mytest(100) order by 1, 2 asc limit 10;
diff --git a/src/test/regress/expected/toast.out
b/src/test/regress/expected/toast.out
index 7b5fe829138..b86c2f44925 100644
--- a/src/test/regress/expected/toast.out
+++ b/src/test/regress/expected/toast.out
@@ -149,7 +149,7 @@ select gp_segment_id, get_rel_toast_count('toastable_heap')
from gp_dist_random(
select gp_segment_id, get_rel_toast_count('toastable_ao') from
gp_dist_random('gp_id') order by gp_segment_id;
gp_segment_id | get_rel_toast_count
---------------+---------------------
- 0 | 14
+ 0 | 0
1 | 0
2 | 0
(3 rows)
diff --git a/src/test/regress/expected/workfile/sisc_sort_spill.out
b/src/test/regress/expected/workfile/sisc_sort_spill.out
index 55365599701..8386b918b28 100644
--- a/src/test/regress/expected/workfile/sisc_sort_spill.out
+++ b/src/test/regress/expected/workfile/sisc_sort_spill.out
@@ -58,8 +58,7 @@ select * from sisc_sort_spill.is_workfile_created('explain
(analyze, verbose)
is_workfile_created
---------------------
1
- 1
-(2 rows)
+(1 row)
select * from sisc_sort_spill.is_workfile_created('explain (analyze, verbose)
with ctesisc as (select * from testsisc order by i2)
@@ -70,8 +69,7 @@ limit 50000;');
is_workfile_created
---------------------
1
- 1
-(2 rows)
+(1 row)
select avg(i3) from (
with ctesisc as (select * from testsisc order by i2)
@@ -93,8 +91,7 @@ select * from sisc_sort_spill.is_workfile_created('explain
(analyze, verbose)
is_workfile_created
---------------------
1
- 1
-(2 rows)
+(1 row)
select * from sisc_sort_spill.is_workfile_created('explain (analyze, verbose)
with ctesisc as (select * from testsisc order by i2)
@@ -105,8 +102,7 @@ limit 50000;');
is_workfile_created
---------------------
1
- 1
-(2 rows)
+(1 row)
reset max_parallel_workers_per_gather;
drop schema sisc_sort_spill cascade;
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]