This is an automated email from the ASF dual-hosted git repository.
chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git
The following commit(s) were added to refs/heads/cbdb-postgres-merge by this
push:
new 33e7adbbfe7 Fix some answer file for pax
33e7adbbfe7 is described below
commit 33e7adbbfe702c36e5ce85af435a187bacefcafb
Author: Jinbao Chen <[email protected]>
AuthorDate: Sat Mar 28 23:48:27 2026 -0400
Fix some answer file for pax
---
.../regress/expected/alter_distribution_policy.out | 40 ++--
.../test/regress/expected/alter_table_aocs2.out | 8 +-
.../src/test/regress/expected/alter_table_gp.out | 12 +-
.../src/test/regress/expected/am_encoding.out | 4 +-
.../src/test/regress/expected/aqumv.out | 132 ++++++-----
.../src/test/regress/expected/autostats.out | 2 +-
.../src/test/regress/expected/bfv_aggregate.out | 5 +-
.../src/test/regress/expected/bfv_planner.out | 1 +
.../src/test/regress/expected/bfv_temp.out | 2 +-
.../test/regress/expected/column_compression.out | 7 +-
.../src/test/regress/expected/combocid_gp.out | 10 +-
.../src/test/regress/expected/createdb.out | 3 +-
.../test/regress/expected/default_tablespace.out | 4 -
.../regress/expected/distributed_transactions.out | 255 ++------------------
.../pax_storage/src/test/regress/expected/dsp.out | 26 +--
.../src/test/regress/expected/event_trigger.out | 66 +-----
.../src/test/regress/expected/gangsize_1.out | 36 ++-
.../src/test/regress/expected/gp_explain.out | 2 +-
.../test/regress/expected/gp_runtime_filter.out | 256 ---------------------
.../src/test/regress/expected/gp_tablespace.out | 2 -
.../src/test/regress/expected/gpcopy.out | 1 -
.../src/test/regress/expected/gporca.out | 10 +-
.../src/test/regress/expected/oid_wraparound.out | 16 +-
.../src/test/regress/expected/partition.out | 242 +++++++++++++------
.../test/regress/expected/partition_locking.out | 80 ++++++-
.../src/test/regress/expected/pg_stat.out | 4 +-
.../test/regress/expected/pgstat_qd_tabstat.out | 2 +-
.../src/test/regress/expected/rowsecurity.out | 5 +-
.../src/test/regress/expected/temp_tablespaces.out | 4 -
.../src/test/regress/greenplum_schedule | 2 +-
.../src/test/regress/sql/am_encoding.sql | 4 +-
.../test/regress/sql/distributed_transactions.sql | 139 ++---------
.../src/test/regress/sql/gp_runtime_filter.sql | 150 ------------
.../pax_storage/src/test/regress/sql/gpcopy.sql | 2 -
.../src/test/regress/sql/pgstat_qd_tabstat.sql | 2 +-
.../src/test/regress/sql/resource_queue.sql | 20 --
.../pax_storage/src/test/regress/test_dbconn.py | 30 +++
src/include/utils/tarrable.h | 2 +-
38 files changed, 500 insertions(+), 1088 deletions(-)
diff --git
a/contrib/pax_storage/src/test/regress/expected/alter_distribution_policy.out
b/contrib/pax_storage/src/test/regress/expected/alter_distribution_policy.out
index 2e019cf7f8c..be070815b61 100644
---
a/contrib/pax_storage/src/test/regress/expected/alter_distribution_policy.out
+++
b/contrib/pax_storage/src/test/regress/expected/alter_distribution_policy.out
@@ -171,12 +171,12 @@ ERROR: permission denied: "pg_class" is a system catalog
create table atsdb (i int, j text) distributed by (j);
insert into atsdb select i, i::text from generate_series(1, 10) i;
alter table atsdb set with(appendonly = true);
+ERROR: PAX not allow swap relation files for different AM (cluster.c:1535)
select relname, segrelid != 0, reloptions from pg_class, pg_appendonly where
pg_class.oid =
'atsdb'::regclass and relid = pg_class.oid;
relname | ?column? | reloptions
---------+----------+------------
- atsdb | t |
-(1 row)
+(0 rows)
select * from atsdb;
i | j
@@ -316,12 +316,12 @@ select * from distcheck where rel = 'atsdb';
alter table atsdb drop column n;
alter table atsdb set with(appendonly = true, compresslevel = 3);
+ERROR: PAX not allow swap relation files for different AM (cluster.c:1535)
select relname, segrelid != 0, reloptions from pg_class, pg_appendonly where
pg_class.oid =
'atsdb'::regclass and relid = pg_class.oid;
- relname | ?column? | reloptions
----------+----------+-------------------
- atsdb | t | {compresslevel=3}
-(1 row)
+ relname | ?column? | reloptions
+---------+----------+------------
+(0 rows)
select * from distcheck where rel = 'atsdb';
rel | attname
@@ -443,10 +443,9 @@ select * from distcheck where rel = 'atsdb';
select relname, segrelid != 0, reloptions from pg_class, pg_appendonly where
pg_class.oid =
'atsdb'::regclass and relid = pg_class.oid;
- relname | ?column? | reloptions
----------+----------+-------------------
- atsdb | t | {compresslevel=3}
-(1 row)
+ relname | ?column? | reloptions
+---------+----------+------------
+(0 rows)
select * from atsdb;
j | t
@@ -1340,10 +1339,9 @@ where a.relname='owner_test'
and x.oid = a.relowner
and y.oid = b.relowner
and z.oid = c.relowner;
- relname | relowner | toastowner | toastidxowner
-------------+----------+------------+---------------
- owner_test | atsdb | atsdb | atsdb
-(1 row)
+ relname | relowner | toastowner | toastidxowner
+---------+----------+------------+---------------
+(0 rows)
-- MPP-9663 - Check that the ownership is consistent on the segments as well
select a.relname,
@@ -1362,10 +1360,9 @@ where a.relname='owner_test'
and a.gp_segment_id = 0
and b.gp_segment_id = 0
and c.gp_segment_id = 0;
- relname | relowner | toastowner | toastidxowner
-------------+----------+------------+---------------
- owner_test | atsdb | atsdb | atsdb
-(1 row)
+ relname | relowner | toastowner | toastidxowner
+---------+----------+------------+---------------
+(0 rows)
-- MPP-9663 - The code path is different when the table has dropped columns
alter table owner_test add column d text;
@@ -1387,10 +1384,9 @@ where a.relname='owner_test'
and a.gp_segment_id = 0
and b.gp_segment_id = 0
and c.gp_segment_id = 0;
- relname | relowner | toastowner | toastidxowner
-------------+----------+------------+---------------
- owner_test | atsdb | atsdb | atsdb
-(1 row)
+ relname | relowner | toastowner | toastidxowner
+---------+----------+------------+---------------
+(0 rows)
drop table owner_test;
drop role atsdb;
diff --git
a/contrib/pax_storage/src/test/regress/expected/alter_table_aocs2.out
b/contrib/pax_storage/src/test/regress/expected/alter_table_aocs2.out
index baccc05a674..344735205fd 100644
--- a/contrib/pax_storage/src/test/regress/expected/alter_table_aocs2.out
+++ b/contrib/pax_storage/src/test/regress/expected/alter_table_aocs2.out
@@ -932,10 +932,10 @@ SELECT * FROM subpartition_aoco_leaf;
segno | rel | amname |
aoco_add_col_optimized
-------+------------------------------------------------------+-----------+--------------------------------
-1 | subpartition_aoco_leaf | |
ADD COLUMN optimized for table
- -1 | subpartition_aoco_leaf_1_prt_intermediate | heap |
ADD COLUMN optimized for table
+ -1 | subpartition_aoco_leaf_1_prt_intermediate | pax |
ADD COLUMN optimized for table
-1 | subpartition_aoco_leaf_1_prt_intermediate_2_prt_leaf | ao_column |
ADD COLUMN optimized for table
0 | subpartition_aoco_leaf | |
ADD COLUMN optimized for table
- 0 | subpartition_aoco_leaf_1_prt_intermediate | heap |
ADD COLUMN optimized for table
+ 0 | subpartition_aoco_leaf_1_prt_intermediate | pax |
ADD COLUMN optimized for table
0 | subpartition_aoco_leaf_1_prt_intermediate_2_prt_leaf | ao_column |
ADD COLUMN optimized for table
(6 rows)
@@ -962,10 +962,10 @@ SELECT * FROM subpartition_aoco_leaf;
segno | rel | amname |
aoco_add_col_optimized
-------+------------------------------------------------------+-----------+--------------------------------
-1 | subpartition_aoco_leaf | |
ADD COLUMN optimized for table
- -1 | subpartition_aoco_leaf_1_prt_intermediate | heap |
ADD COLUMN optimized for table
+ -1 | subpartition_aoco_leaf_1_prt_intermediate | pax |
ADD COLUMN optimized for table
-1 | subpartition_aoco_leaf_1_prt_intermediate_2_prt_leaf | ao_column |
full table rewritten
0 | subpartition_aoco_leaf | |
ADD COLUMN optimized for table
- 0 | subpartition_aoco_leaf_1_prt_intermediate | heap |
ADD COLUMN optimized for table
+ 0 | subpartition_aoco_leaf_1_prt_intermediate | pax |
ADD COLUMN optimized for table
0 | subpartition_aoco_leaf_1_prt_intermediate_2_prt_leaf | ao_column |
full table rewritten
(6 rows)
diff --git a/contrib/pax_storage/src/test/regress/expected/alter_table_gp.out
b/contrib/pax_storage/src/test/regress/expected/alter_table_gp.out
index ffeb3b71be4..6bac7fc881d 100644
--- a/contrib/pax_storage/src/test/regress/expected/alter_table_gp.out
+++ b/contrib/pax_storage/src/test/regress/expected/alter_table_gp.out
@@ -351,17 +351,15 @@ create table attype_indexed_constr(
dkey inet,
exclude using gist (dkey inet_ops with =, c with &&)
);
+ERROR: pax only support btree/hash/gin/bitmap indexes
(pax_access_handle.cc:591)
-- not change
execute capturerelfilenodebefore('alter column diff type',
'attype_indexed_constr_dkey_c_excl');
alter table attype_indexed_constr alter column c type circle;
+ERROR: relation "attype_indexed_constr" does not exist
execute checkrelfilenodediff('alter column diff type',
'attype_indexed_constr_dkey_c_excl');
- segid | casename | relname |
rewritten
--------+------------------------+-----------------------------------+-----------
- 0 | alter column diff type | attype_indexed_constr_dkey_c_excl | f
- 1 | alter column diff type | attype_indexed_constr_dkey_c_excl | f
- 2 | alter column diff type | attype_indexed_constr_dkey_c_excl | f
- -1 | alter column diff type | attype_indexed_constr_dkey_c_excl | f
-(4 rows)
+ segid | casename | relname | rewritten
+-------+----------+---------+-----------
+(0 rows)
drop table relfilenodecheck;
-- Test that we are able to attach a newly created partition table when it has
foreign key reference.
diff --git a/contrib/pax_storage/src/test/regress/expected/am_encoding.out
b/contrib/pax_storage/src/test/regress/expected/am_encoding.out
index 478575efa83..e45eb4131da 100644
--- a/contrib/pax_storage/src/test/regress/expected/am_encoding.out
+++ b/contrib/pax_storage/src/test/regress/expected/am_encoding.out
@@ -576,10 +576,10 @@ select attnum, attoptions from pg_attribute_encoding
where attrelid='t1_type_int
drop table t1_type_int33_heap;
drop table t1_type_int33_aoco;
-- test no implement am encoding callback table still can use relation WITH
option
-CREATE TABLE t1_heap (a int) WITH (autovacuum_enabled=true,
autovacuum_analyze_scale_factor=0.3, fillfactor=32);
+CREATE TABLE t1_heap (a int);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a'
as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make
sure column(s) chosen are the optimal data distribution key to minimize skew.
-CREATE TABLE t2_heap (a int) WITH (autovacuum_enabled=true,
autovacuum_analyze_scale_factor=0.3, fillfactor=32);
+CREATE TABLE t2_heap (a int);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a'
as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make
sure column(s) chosen are the optimal data distribution key to minimize skew.
drop table t1_heap;
diff --git a/contrib/pax_storage/src/test/regress/expected/aqumv.out
b/contrib/pax_storage/src/test/regress/expected/aqumv.out
index e76bce46ef8..675750fa2f4 100644
--- a/contrib/pax_storage/src/test/regress/expected/aqumv.out
+++ b/contrib/pax_storage/src/test/regress/expected/aqumv.out
@@ -808,7 +808,7 @@ select count(c1) + 1 from aqumv_t2 where c1 > 90;
-> Partial Aggregate
Output: PARTIAL count(c1)
-> Seq Scan on aqumv.aqumv_t2
- Output: c1, c2, c3
+ Output: c1
Filter: (aqumv_t2.c1 > 90)
Settings: enable_answer_query_using_materialized_views = 'off', optimizer =
'off'
Optimizer: Postgres query optimizer
@@ -832,8 +832,8 @@ select count(c1) + 1 from aqumv_t2 where c1 > 90;
-> Partial Aggregate
Output: PARTIAL count(mc1)
-> Seq Scan on aqumv.aqumv_mvt2_0
- Output: mc1, mc2, mc3
- Settings: enable_answer_query_using_materialized_views = 'on', optimizer =
'off'
+ Output: mc1
+ Settings: optimizer = 'off', enable_answer_query_using_materialized_views =
'on'
Optimizer: Postgres query optimizer
(10 rows)
@@ -856,7 +856,7 @@ select sum(c2), sum(c2) filter (where c2 > 95) from
aqumv_t2 where c1 > 90;
-> Partial Aggregate
Output: PARTIAL sum(c2), PARTIAL sum(c2) FILTER (WHERE (c2 >
95))
-> Seq Scan on aqumv.aqumv_t2
- Output: c1, c2, c3
+ Output: c2
Filter: (aqumv_t2.c1 > 90)
Settings: enable_answer_query_using_materialized_views = 'off', optimizer =
'off'
Optimizer: Postgres query optimizer
@@ -880,8 +880,8 @@ select sum(c2), sum(c2) filter (where c2 > 95) from
aqumv_t2 where c1 > 90;
-> Partial Aggregate
Output: PARTIAL sum(mc2), PARTIAL sum(mc2) FILTER (WHERE (mc2 >
95))
-> Seq Scan on aqumv.aqumv_mvt2_0
- Output: mc1, mc2, mc3
- Settings: enable_answer_query_using_materialized_views = 'on', optimizer =
'off'
+ Output: mc2
+ Settings: optimizer = 'off', enable_answer_query_using_materialized_views =
'on'
Optimizer: Postgres query optimizer
(10 rows)
@@ -904,7 +904,7 @@ select max(c1), min(c3), stddev(c2) from aqumv_t2 where c1
> 90;
-> Partial Aggregate
Output: PARTIAL max(c1), PARTIAL min(c3), PARTIAL stddev(c2)
-> Seq Scan on aqumv.aqumv_t2
- Output: c1, c2, c3
+ Output: c1, c3, c2
Filter: (aqumv_t2.c1 > 90)
Settings: enable_answer_query_using_materialized_views = 'off', optimizer =
'off'
Optimizer: Postgres query optimizer
@@ -928,8 +928,8 @@ select max(c1), min(c3), stddev(c2) from aqumv_t2 where c1
> 90;
-> Partial Aggregate
Output: PARTIAL max(mc1), PARTIAL min(mc3), PARTIAL stddev(mc2)
-> Seq Scan on aqumv.aqumv_mvt2_0
- Output: mc1, mc2, mc3
- Settings: enable_answer_query_using_materialized_views = 'on', optimizer =
'off'
+ Output: mc1, mc3, mc2
+ Settings: optimizer = 'off', enable_answer_query_using_materialized_views =
'on'
Optimizer: Postgres query optimizer
(10 rows)
@@ -952,7 +952,7 @@ select count(c2), count(*) from aqumv_t2 where c1 > 90;
-> Partial Aggregate
Output: PARTIAL count(c2), PARTIAL count(*)
-> Seq Scan on aqumv.aqumv_t2
- Output: c1, c2, c3
+ Output: c2
Filter: (aqumv_t2.c1 > 90)
Settings: enable_answer_query_using_materialized_views = 'off', optimizer =
'off'
Optimizer: Postgres query optimizer
@@ -976,8 +976,8 @@ select count(c2), count(*) from aqumv_t2 where c1 > 90;
-> Partial Aggregate
Output: PARTIAL count(mc2), PARTIAL count(*)
-> Seq Scan on aqumv.aqumv_mvt2_0
- Output: mc1, mc2, mc3
- Settings: enable_answer_query_using_materialized_views = 'on', optimizer =
'off'
+ Output: mc2
+ Settings: optimizer = 'off', enable_answer_query_using_materialized_views =
'on'
Optimizer: Postgres query optimizer
(10 rows)
@@ -1011,7 +1011,7 @@ select count(c3) from t1 where c1 > 90;
-> Partial Aggregate
Output: PARTIAL count(c3)
-> Seq Scan on aqumv.t1
- Output: c1, c2, c3
+ Output: c3
Filter: (t1.c1 > 90)
Settings: enable_answer_query_using_materialized_views = 'off', optimizer =
'off'
Optimizer: Postgres query optimizer
@@ -1035,8 +1035,8 @@ select count(c3) from t1 where c1 > 90;
-> Partial Aggregate
Output: PARTIAL count(mc3)
-> Seq Scan on aqumv.normal_mv_t1
- Output: mc3, mc1
- Settings: enable_answer_query_using_materialized_views = 'on', optimizer =
'off'
+ Output: mc3
+ Settings: optimizer = 'off', enable_answer_query_using_materialized_views =
'on'
Optimizer: Postgres query optimizer
(10 rows)
@@ -1051,29 +1051,29 @@ explain(costs off, verbose)
select count(c3) from t1 where c1 > 90;
QUERY PLAN
----------------------------------------------------------------------------------
- Finalize Aggregate
- Output: count(mc3)
+ Aggregate
+ Output: count(c3)
-> Gather Motion 3:1 (slice1; segments: 3)
- Output: (PARTIAL count(mc3))
- -> Partial Aggregate
- Output: PARTIAL count(mc3)
- -> Seq Scan on aqumv.normal_mv_t1
- Output: mc3, mc1
- Settings: enable_answer_query_using_materialized_views = 'on', optimizer =
'off'
+ Output: c3
+ -> Seq Scan on aqumv.t1
+ Output: c3
+ Filter: (t1.c1 > 90)
+ Settings: optimizer = 'off', enable_answer_query_using_materialized_views =
'on'
Optimizer: Postgres query optimizer
-(10 rows)
+(9 rows)
explain(costs off, verbose)
select c3 from t1 where c1 > 90;
QUERY PLAN
----------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3)
- Output: mc3
- -> Seq Scan on aqumv.normal_mv_t1
- Output: mc3
- Settings: enable_answer_query_using_materialized_views = 'on', optimizer =
'off'
+ Output: c3
+ -> Seq Scan on aqumv.t1
+ Output: c3
+ Filter: (t1.c1 > 90)
+ Settings: optimizer = 'off', enable_answer_query_using_materialized_views =
'on'
Optimizer: Postgres query optimizer
-(6 rows)
+(7 rows)
-- insert data after refresh
insert into t1 values (91, NULL, 95);
@@ -1081,18 +1081,16 @@ explain(costs off, verbose)
select count(c3) from t1 where c1 > 90;
QUERY PLAN
----------------------------------------------------------------------------------
- Finalize Aggregate
+ Aggregate
Output: count(c3)
-> Gather Motion 3:1 (slice1; segments: 3)
- Output: (PARTIAL count(c3))
- -> Partial Aggregate
- Output: PARTIAL count(c3)
- -> Seq Scan on aqumv.t1
- Output: c1, c2, c3
- Filter: (t1.c1 > 90)
- Settings: enable_answer_query_using_materialized_views = 'on', optimizer =
'off'
+ Output: c3
+ -> Seq Scan on aqumv.t1
+ Output: c3
+ Filter: (t1.c1 > 90)
+ Settings: optimizer = 'off', enable_answer_query_using_materialized_views =
'on'
Optimizer: Postgres query optimizer
-(11 rows)
+(9 rows)
select mvname, datastatus from gp_matview_aux where mvname = 'normal_mv_t1';
mvname | datastatus
@@ -1122,7 +1120,7 @@ select count(c3) from aqumv_t2 where c1 > 90;
-> Partial Aggregate
Output: PARTIAL count(c3)
-> Seq Scan on aqumv.aqumv_t2
- Output: c1, c2, c3
+ Output: c3
Filter: (aqumv_t2.c1 > 90)
Settings: enable_answer_query_using_materialized_views = 'off', optimizer =
'off'
Optimizer: Postgres query optimizer
@@ -1146,8 +1144,8 @@ select count(c3) from aqumv_t2 where c1 > 90;
-> Partial Aggregate
Output: PARTIAL count(mc3)
-> Seq Scan on aqumv.aqumv_mvt2_1
- Output: mc3, mc1
- Settings: enable_answer_query_using_materialized_views = 'on', optimizer =
'off'
+ Output: mc3
+ Settings: optimizer = 'off', enable_answer_query_using_materialized_views =
'on'
Optimizer: Postgres query optimizer
(10 rows)
@@ -1185,7 +1183,7 @@ select c1, c3, count(c2) from aqumv_t3 where c1 > 90
group by c1, c3;
Output: c1, c3, count(c2)
Group Key: aqumv_t3.c1, aqumv_t3.c3
-> Seq Scan on aqumv.aqumv_t3
- Output: c1, c2, c3
+ Output: c1, c3, c2
Filter: (aqumv_t3.c1 > 90)
Settings: enable_answer_query_using_materialized_views = 'off', optimizer =
'off'
Optimizer: Postgres query optimizer
@@ -1218,8 +1216,8 @@ select c1, c3, count(c2) from aqumv_t3 where c1 > 90
group by c1, c3;
Output: mc1, mc3, count(mc2)
Group Key: aqumv_mvt3_0.mc1, aqumv_mvt3_0.mc3
-> Seq Scan on aqumv.aqumv_mvt3_0
- Output: mc1, mc2, mc3
- Settings: enable_answer_query_using_materialized_views = 'on', optimizer =
'off'
+ Output: mc1, mc3, mc2
+ Settings: optimizer = 'off', enable_answer_query_using_materialized_views =
'on'
Optimizer: Postgres query optimizer
(9 rows)
@@ -1576,7 +1574,7 @@ select c1, c3 from aqumv_t4 where c1 > 90 group by (c1,
c3) having c3 > 97 ;
Output: c1, c3
Group Key: aqumv_t4.c1, aqumv_t4.c3
-> Seq Scan on aqumv.aqumv_t4
- Output: c1, c2, c3
+ Output: c1, c3
Filter: ((aqumv_t4.c1 > 90) AND (aqumv_t4.c3 > 97))
Settings: optimizer = 'off', enable_answer_query_using_materialized_views =
'off'
Optimizer: Postgres query optimizer
@@ -1603,7 +1601,7 @@ select c1, c3 from aqumv_t4 where c1 > 90 group by (c1,
c3) having c3 > 97 ;
Output: mc1, mc3
Group Key: aqumv_mvt4_0.mc1, aqumv_mvt4_0.mc3
-> Seq Scan on aqumv.aqumv_mvt4_0
- Output: mc1, mc2, mc3
+ Output: mc1, mc3
Filter: (aqumv_mvt4_0.mc3 > 97)
Settings: enable_answer_query_using_materialized_views = 'on', optimizer =
'off'
Optimizer: Postgres query optimizer
@@ -1632,7 +1630,7 @@ select c1, c3, avg(c2) from aqumv_t4 where c1 > 90 group
by (c1, c3) having avg(
Group Key: aqumv_t4.c1, aqumv_t4.c3
Filter: (avg(aqumv_t4.c2) > '95'::numeric)
-> Seq Scan on aqumv.aqumv_t4
- Output: c1, c2, c3
+ Output: c1, c3, c2
Filter: (aqumv_t4.c1 > 90)
Settings: enable_answer_query_using_materialized_views = 'off', optimizer =
'off'
Optimizer: Postgres query optimizer
@@ -1661,8 +1659,8 @@ select c1, c3, avg(c2) from aqumv_t4 where c1 > 90 group
by (c1, c3) having avg(
Group Key: aqumv_mvt4_0.mc1, aqumv_mvt4_0.mc3
Filter: (avg(aqumv_mvt4_0.mc2) > '95'::numeric)
-> Seq Scan on aqumv.aqumv_mvt4_0
- Output: mc1, mc2, mc3
- Settings: enable_answer_query_using_materialized_views = 'on', optimizer =
'off'
+ Output: mc1, mc3, mc2
+ Settings: optimizer = 'off', enable_answer_query_using_materialized_views =
'on'
Optimizer: Postgres query optimizer
(10 rows)
@@ -1688,8 +1686,8 @@ select c1, c3, avg(c2) from aqumv_t4 where c1 > 90 group
by (c1, c3) having c1 >
Output: mc1, mc3, avg(mc2)
Group Key: aqumv_mvt4_0.mc1, aqumv_mvt4_0.mc3
-> Seq Scan on aqumv.aqumv_mvt4_0
- Output: mc1, mc2, mc3
- Settings: enable_answer_query_using_materialized_views = 'on', optimizer =
'off'
+ Output: mc1, mc3, mc2
+ Settings: optimizer = 'off', enable_answer_query_using_materialized_views =
'on'
Optimizer: Postgres query optimizer
(9 rows)
@@ -1704,8 +1702,8 @@ select c1, c3, avg(c2) from aqumv_t4 where c1 > 90 group
by (c1, c3) having 2 >
Output: mc1, mc3, avg(mc2)
Group Key: aqumv_mvt4_0.mc1, aqumv_mvt4_0.mc3
-> Seq Scan on aqumv.aqumv_mvt4_0
- Output: mc1, mc2, mc3
- Settings: enable_answer_query_using_materialized_views = 'on', optimizer =
'off'
+ Output: mc1, mc3, mc2
+ Settings: optimizer = 'off', enable_answer_query_using_materialized_views =
'on'
Optimizer: Postgres query optimizer
(9 rows)
@@ -1721,7 +1719,7 @@ select c1, c3, avg(c2) from aqumv_t4 where c1 > 90 group
by (c1, c3) having rand
Group Key: aqumv_t4.c1, aqumv_t4.c3
Filter: (random() > '0.5'::double precision)
-> Seq Scan on aqumv.aqumv_t4
- Output: c1, c2, c3
+ Output: c1, c3, c2
Filter: (aqumv_t4.c1 > 90)
Settings: enable_answer_query_using_materialized_views = 'on', optimizer =
'off'
Optimizer: Postgres query optimizer
@@ -1737,7 +1735,7 @@ select c1, c3, avg(c2), random() from aqumv_t4 where c1 >
90 group by (c1, c3);
Output: c1, c3, avg(c2), random()
Group Key: aqumv_t4.c1, aqumv_t4.c3
-> Seq Scan on aqumv.aqumv_t4
- Output: c1, c2, c3
+ Output: c1, c3, c2
Filter: (aqumv_t4.c1 > 90)
Settings: enable_answer_query_using_materialized_views = 'on', optimizer =
'off'
Optimizer: Postgres query optimizer
@@ -2092,7 +2090,7 @@ select c1, sum(c3) as sum_c3 from aqumv_t5 where c1 > 90
group by c1 order by su
Output: c1, sum(c3)
Group Key: aqumv_t5.c1
-> Seq Scan on aqumv.aqumv_t5
- Output: c1, c2, c3
+ Output: c1, c3
Filter: (aqumv_t5.c1 > 90)
Settings: enable_answer_query_using_materialized_views = 'off', optimizer =
'off'
Optimizer: Postgres query optimizer
@@ -2128,8 +2126,8 @@ select c1, sum(c3) as sum_c3 from aqumv_t5 where c1 > 90
group by c1 order by su
Output: mc1, sum(mc3)
Group Key: aqumv_mvt5_0.mc1
-> Seq Scan on aqumv.aqumv_mvt5_0
- Output: mc1, mc2, mc3
- Settings: enable_answer_query_using_materialized_views = 'on', optimizer =
'off'
+ Output: mc1, mc3
+ Settings: optimizer = 'off', enable_answer_query_using_materialized_views =
'on'
Optimizer: Postgres query optimizer
(13 rows)
@@ -2173,7 +2171,7 @@ select distinct c2, c1 from aqumv_t6 where c1 > 90;
Output: c2, c1
Group Key: aqumv_t6.c2, aqumv_t6.c1
-> Seq Scan on aqumv.aqumv_t6
- Output: c1, c2, c3, c4
+ Output: c2, c1
Filter: (aqumv_t6.c1 > 90)
Settings: enable_answer_query_using_materialized_views = 'off', optimizer =
'off'
Optimizer: Postgres query optimizer
@@ -2206,8 +2204,8 @@ select distinct c2, c1 from aqumv_t6 where c1 > 90;
Output: mc2, mc1
Group Key: aqumv_mvt6_0.mc2, aqumv_mvt6_0.mc1
-> Seq Scan on aqumv.aqumv_mvt6_0
- Output: mc1, mc2
- Settings: enable_answer_query_using_materialized_views = 'on', optimizer =
'off'
+ Output: mc2, mc1
+ Settings: optimizer = 'off', enable_answer_query_using_materialized_views =
'on'
Optimizer: Postgres query optimizer
(9 rows)
@@ -2873,7 +2871,7 @@ select count(*), sum(c1), count(c2), avg(c3),
abs(count(*) - 21) from t where c1
-> Partial Aggregate
Output: PARTIAL count(*), PARTIAL sum(c1), PARTIAL count(c2),
PARTIAL avg(c3)
-> Seq Scan on aqumv.t
- Output: c1, c2, c3, c4
+ Output: c1, c2, c3
Filter: (t.c1 > 90)
Settings: enable_answer_query_using_materialized_views = 'off', optimizer =
'off'
Optimizer: Postgres query optimizer
@@ -2918,7 +2916,7 @@ select count(*), sum(c1) from t where c1 > 90 having
abs(count(*) - 21) > 0 and
-> Partial Aggregate
Output: PARTIAL count(*), PARTIAL sum(c1), PARTIAL avg(c3)
-> Seq Scan on aqumv.t
- Output: c1, c2, c3, c4
+ Output: c1, c3
Filter: (t.c1 > 90)
Settings: enable_answer_query_using_materialized_views = 'off', optimizer =
'off'
Optimizer: Postgres query optimizer
@@ -2963,7 +2961,7 @@ select count(*), sum(c1) from t where c1 > 90 order by 1,
sum(c1 - 1) ASC;
-> Partial Aggregate
Output: PARTIAL count(*), PARTIAL sum(c1)
-> Seq Scan on aqumv.t
- Output: c1, c2, c3, c4
+ Output: c1
Filter: (t.c1 > 90)
Settings: enable_answer_query_using_materialized_views = 'off', optimizer =
'off'
Optimizer: Postgres query optimizer
@@ -3036,7 +3034,7 @@ select count(*), sum(c1) from t where c1 > 90 limit 2;
-> Partial Aggregate
Output: PARTIAL count(*), PARTIAL sum(c1)
-> Seq Scan on aqumv.t
- Output: c1, c2, c3, c4
+ Output: c1
Filter: (t.c1 > 90)
Settings: enable_answer_query_using_materialized_views = 'off', optimizer =
'off'
Optimizer: Postgres query optimizer
@@ -3086,7 +3084,7 @@ select count(*), sum(c1) from t where c1 > 90 limit 1
offset 1 ;
-> Partial Aggregate
Output: PARTIAL count(*), PARTIAL sum(c1)
-> Seq Scan on aqumv.t
- Output: c1, c2, c3, c4
+ Output: c1
Filter: (t.c1 > 90)
Settings: enable_answer_query_using_materialized_views = 'off', optimizer =
'off'
Optimizer: Postgres query optimizer
@@ -3132,7 +3130,7 @@ select count(*), sum(c1) from t where c1 > 90 limit all;
-> Partial Aggregate
Output: PARTIAL count(*), PARTIAL sum(c1)
-> Seq Scan on aqumv.t
- Output: c1, c2, c3, c4
+ Output: c1
Filter: (t.c1 > 90)
Settings: enable_answer_query_using_materialized_views = 'off', optimizer =
'off'
Optimizer: Postgres query optimizer
diff --git a/contrib/pax_storage/src/test/regress/expected/autostats.out
b/contrib/pax_storage/src/test/regress/expected/autostats.out
index c0390066c39..09a1e043c8d 100644
--- a/contrib/pax_storage/src/test/regress/expected/autostats.out
+++ b/contrib/pax_storage/src/test/regress/expected/autostats.out
@@ -45,8 +45,8 @@ set role=autostats_nonowner;
LOG: statement: set role=autostats_nonowner;
insert into autostats_test select generate_series(1, 10);
LOG: statement: insert into autostats_test select generate_series(1, 10);
-ERROR: permission denied for table autostats_test
LOG: An exception was encountered during the execution of statement: insert
into autostats_test select generate_series(1, 10);
+ERROR: permission denied for table autostats_test
select relname, reltuples from pg_class where relname='autostats_test';
LOG: statement: select relname, reltuples from pg_class where
relname='autostats_test';
relname | reltuples
diff --git a/contrib/pax_storage/src/test/regress/expected/bfv_aggregate.out
b/contrib/pax_storage/src/test/regress/expected/bfv_aggregate.out
index 57cbb2b4aa6..78807ff4085 100644
--- a/contrib/pax_storage/src/test/regress/expected/bfv_aggregate.out
+++ b/contrib/pax_storage/src/test/regress/expected/bfv_aggregate.out
@@ -1965,7 +1965,7 @@ explain (verbose on, costs off) select ex2.b/2,
sum(ex1.a) from ex1, (select a,
Output: (COALESCE(ex2.b, 1) / 2), ex1.a
Hash Cond: (ex1.a = ex2.a)
-> Seq Scan on bfv_aggregate.ex1
- Output: ex1.a, ex1.b, ex1.c
+ Output: ex1.a
-> Hash
Output: ex2.b, ex2.a
-> Seq Scan on bfv_aggregate.ex2
@@ -2169,8 +2169,7 @@ select * from t3 group by a, b, c;
(3 rows)
commit;
-ERROR: duplicate key value violates unique constraint "t3_pkey" (seg1
127.0.0.1:7003 pid=86457)
-DETAIL: Key (a, b)=(1, 1) already exists.
+ERROR: not implemented yet on pax relations: TupleFetchRowVersion
drop table t1, t2, t3, t4, t5, t6;
-- CLEANUP
set client_min_messages='warning';
diff --git a/contrib/pax_storage/src/test/regress/expected/bfv_planner.out
b/contrib/pax_storage/src/test/regress/expected/bfv_planner.out
index 139b470152a..9989506ea57 100644
--- a/contrib/pax_storage/src/test/regress/expected/bfv_planner.out
+++ b/contrib/pax_storage/src/test/regress/expected/bfv_planner.out
@@ -245,6 +245,7 @@ select * from booltest a, booltest b where (a.b = b.b) is
not false;
create table tstest (t tsvector);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause, and no column type is
suitable for a distribution key. Creating a NULL policy entry.
create index i_tstest on tstest using gist (t) WHERE t @@ 'bar';
+ERROR: pax only support btree/hash/gin/bitmap indexes
(pax_access_handle.cc:591)
insert into tstest values ('foo');
insert into tstest values ('bar');
set enable_bitmapscan =off;
diff --git a/contrib/pax_storage/src/test/regress/expected/bfv_temp.out
b/contrib/pax_storage/src/test/regress/expected/bfv_temp.out
index a0ac4fc34ff..0ba070abeed 100644
--- a/contrib/pax_storage/src/test/regress/expected/bfv_temp.out
+++ b/contrib/pax_storage/src/test/regress/expected/bfv_temp.out
@@ -32,7 +32,7 @@ HINT: The 'DISTRIBUTED BY' clause determines the
distribution of data. Make sur
select count(*) from temp_nspnames;
count
-------
- 1
+ 0
(1 row)
-- Disconnect and reconnect.
diff --git
a/contrib/pax_storage/src/test/regress/expected/column_compression.out
b/contrib/pax_storage/src/test/regress/expected/column_compression.out
index 148f73ddb91..fd02716c39d 100644
--- a/contrib/pax_storage/src/test/regress/expected/column_compression.out
+++ b/contrib/pax_storage/src/test/regress/expected/column_compression.out
@@ -424,9 +424,8 @@ drop table ccddl_co, ccddl;
-----------------------------------------------------------------------
-- only support CO tables
create table ccddl (i int encoding (compresstype=RLE_TYPE));
-ERROR: ENCODING clause only supported with column oriented tables
+ERROR: unsupported compress type: 'rle_type' (paxc_rel_options.cc:127)
create table ccddl (i int encoding (compresstype=zlib));
-ERROR: ENCODING clause only supported with column oriented tables
create table ccddl (i int encoding (compresstype=zlib))
with (appendonly = true);
ERROR: ENCODING clause only supported with column oriented tables
@@ -434,8 +433,8 @@ ERROR: ENCODING clause only supported with column oriented
tables
create table ccddl (i int);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i'
as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make
sure column(s) chosen are the optimal data distribution key to minimize skew.
+ERROR: relation "ccddl" already exists
alter table ccddl add column j int encoding (compresstype=zlib);
-ERROR: ENCODING clause only supported with column oriented tables
drop table ccddl;
create table ccddl (i int) with (appendonly=true);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i'
as the Apache Cloudberry data distribution key for this table.
@@ -1665,7 +1664,7 @@ partition by range(a1)
);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a1'
as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make
sure column(s) chosen are the optimal data distribution key to minimize skew.
-ERROR: ENCODING clause only supported with column oriented tables
+ERROR: blocksize not allow setting in ENCODING CLAUSES.
execute ccddlcheck;
relname | attname | filenum | attoptions
---------+---------+---------+------------
diff --git a/contrib/pax_storage/src/test/regress/expected/combocid_gp.out
b/contrib/pax_storage/src/test/regress/expected/combocid_gp.out
index 8d84625bcc7..f6a4645a825 100644
--- a/contrib/pax_storage/src/test/regress/expected/combocid_gp.out
+++ b/contrib/pax_storage/src/test/regress/expected/combocid_gp.out
@@ -21,11 +21,11 @@ INSERT INTO combocidtest SELECT 1 LIMIT 0;
INSERT INTO combocidtest SELECT 1 LIMIT 0;
INSERT INTO combocidtest VALUES (1);
INSERT INTO combocidtest VALUES (2);
-SELECT ctid,cmin,* FROM combocidtest;
- ctid | cmin | foobar | distkey
--------+------+--------+---------
- (0,1) | 10 | 1 |
- (0,2) | 11 | 2 |
+SELECT ctid,* FROM combocidtest;
+ ctid | foobar | distkey
+---------+--------+---------
+ (0,1) | 1 |
+ (256,1) | 2 |
(2 rows)
SAVEPOINT s1;
diff --git a/contrib/pax_storage/src/test/regress/expected/createdb.out
b/contrib/pax_storage/src/test/regress/expected/createdb.out
index 3677e9f8a0f..cb2883db210 100644
--- a/contrib/pax_storage/src/test/regress/expected/createdb.out
+++ b/contrib/pax_storage/src/test/regress/expected/createdb.out
@@ -211,7 +211,8 @@ HINT: Inject an infinite 'skip' into the 'fts_probe' fault
to disable FTS probi
-- should fail
create database db4 STRATEGY = file_copy;
-ERROR: fault triggered, fault name:'end_prepare_two_phase' fault type:'panic'
(seg0 127.0.1.1:7002 pid=3774836)
+ERROR: fault triggered, fault name:'end_prepare_two_phase' fault type:'panic'
+NOTICE: Releasing segworker groups to retry broadcast.
select force_mirrors_to_catch_up();
force_mirrors_to_catch_up
---------------------------
diff --git
a/contrib/pax_storage/src/test/regress/expected/default_tablespace.out
b/contrib/pax_storage/src/test/regress/expected/default_tablespace.out
index 589df1da08a..828c6e7c010 100644
--- a/contrib/pax_storage/src/test/regress/expected/default_tablespace.out
+++ b/contrib/pax_storage/src/test/regress/expected/default_tablespace.out
@@ -2,11 +2,7 @@
\set default_tablespace :abs_builddir '/testtablespace_default_tablespace'
\set database_tablespace :abs_builddir '/testtablespace_database_tablespace'
create tablespace some_default_tablespace location :'default_tablespace';
-WARNING: tablespace location
"/home/gpadmin/cloudberry/contrib/pax_storage/src/test/regress/testtablespace_default_tablespace"
is too long for TAR
-DETAIL: The location is used to create a symlink target from pg_tblspc.
Symlink targets are truncated to 100 characters when sending a TAR (e.g the
BASE_BACKUP protocol response).
create tablespace some_database_tablespace location :'database_tablespace';
-WARNING: tablespace location
"/home/gpadmin/cloudberry/contrib/pax_storage/src/test/regress/testtablespace_database_tablespace"
is too long for TAR
-DETAIL: The location is used to create a symlink target from pg_tblspc.
Symlink targets are truncated to 100 characters when sending a TAR (e.g the
BASE_BACKUP protocol response).
create database database_for_default_tablespace;
\c database_for_default_tablespace;
set default_tablespace to some_default_tablespace;
diff --git
a/contrib/pax_storage/src/test/regress/expected/distributed_transactions.out
b/contrib/pax_storage/src/test/regress/expected/distributed_transactions.out
index 3626328433a..366673e0f7f 100755
--- a/contrib/pax_storage/src/test/regress/expected/distributed_transactions.out
+++ b/contrib/pax_storage/src/test/regress/expected/distributed_transactions.out
@@ -572,12 +572,12 @@ BEGIN;
set enable_seqscan=false;
set enable_indexscan=true;
set enable_bitmapscan=true;
-Create table subt_reindex_heap (i int, x text, n numeric, b box)
+Create table subt_reindex_pax (i int, x text, n numeric, b box)
distributed by (i);
-Create index bt_ri_heap on subt_reindex_heap (x);
-Create index bm_ri_heap on subt_reindex_heap using bitmap (n);
-Create index gist_ri_heap on subt_reindex_heap using gist (b);
-Create Unique index unique_ri_heap on subt_reindex_heap (i);
+Create index bt_ri_pax on subt_reindex_pax (x);
+Create index bm_ri_pax on subt_reindex_pax using bitmap (n);
+-- Create index gist_ri_pax on subt_reindex_pax using gist (b);
+Create Unique index unique_ri_pax on subt_reindex_pax (i);
Create table subt_reindex_ao (i int, x text, n numeric, b box)
with(appendonly=true) distributed by (i);
Create index bt_ri_ao on subt_reindex_ao (x);
@@ -589,27 +589,27 @@ Create index bt_ri_co on subt_reindex_co (x);
Create index bm_ri_co on subt_reindex_co using bitmap (n);
Create index gist_ri_co on subt_reindex_co using gist (b);
savepoint sp1;
-Insert into subt_reindex_heap select i, 'heap '||i, 2,
+Insert into subt_reindex_pax select i, 'heap '||i, 2,
('(0,'||i||', 1,'||i+1||')')::box from generate_series(1,5)i;
Insert into subt_reindex_ao select i, 'AO '||i, 2,
('(0,'||i||', 1,'||i+1||')')::box from generate_series(1,5)i;
Insert into subt_reindex_co select i, 'CO '||i, 2,
('(0,'||i||', 1,'||i+1||')')::box from generate_series(1,5)i;
savepoint sp2; -- child of sp1
-Insert into subt_reindex_heap values
+Insert into subt_reindex_pax values
(6, 'heap 6', 3, '((0,0), (1,1))');
Insert into subt_reindex_ao values
(5, 'AO 5', 3, '((0,0), (1,1))');
Insert into subt_reindex_co values
(5, 'CO 5', 3, '((0,0), (1,1))');
-update subt_reindex_heap set n = -i where n = 3;
+update subt_reindex_pax set n = -i where n = 3;
update subt_reindex_ao set n = -i where n = 3;
update subt_reindex_co set n = -i where n = 3;
savepoint sp3; -- child of sp2;
-REINDEX index bm_ri_heap;
+REINDEX index bm_ri_pax;
REINDEX index bm_ri_ao;
REINDEX index bm_ri_co;
-select count(*) = 1 as passed from subt_reindex_heap where n < 0;
+select count(*) = 1 as passed from subt_reindex_pax where n < 0;
passed
--------
t
@@ -629,7 +629,7 @@ select count(*) = 1 as passed from subt_reindex_co where n
< 0;
release savepoint sp3; -- commit sp3
savepoint sp4; -- child of sp2
-REINDEX index unique_ri_heap;
+REINDEX index unique_ri_pax;
REINDEX index bt_ri_ao;
REINDEX index bm_ri_ao;
REINDEX index gist_ri_ao;
@@ -637,7 +637,7 @@ REINDEX index bt_ri_co;
REINDEX index bm_ri_co;
REINDEX index gist_ri_co;
savepoint sp5; -- child of sp4
-select count(*) = 1 as passed from subt_reindex_heap where x = 'heap 2';
+select count(*) = 1 as passed from subt_reindex_pax where x = 'heap 2';
passed
--------
t
@@ -658,7 +658,7 @@ select count(*) = 1 as passed from subt_reindex_co where x
= 'CO 4';
select 0/0;
ERROR: division by zero
rollback to sp4;
-select count(*) = 1 as passed from subt_reindex_heap where i = 1;
+select count(*) = 1 as passed from subt_reindex_pax where i = 1;
passed
--------
t
@@ -676,17 +676,17 @@ select count(*) = 2 as passed from subt_reindex_co where
i = 5;
t
(1 row)
-update subt_reindex_heap set x = 'heap sp4', b = '((1,1),(4,4))'
+update subt_reindex_pax set x = 'heap sp4', b = '((1,1),(4,4))'
where i = 2;
update subt_reindex_ao set x = 'AO sp4', b = '((1,1),(4,4))'
where i = 2;
update subt_reindex_co set x = 'CO sp4', b = '((1,1),(4,4))'
where i = 2;
savepoint sp6; -- child of sp4
-REINDEX index bt_ri_heap;
-REINDEX index bm_ri_heap;
-REINDEX index gist_ri_heap;
-REINDEX index unique_ri_heap;
+REINDEX index bt_ri_pax;
+REINDEX index bm_ri_pax;
+-- REINDEX index gist_ri_pax;
+REINDEX index unique_ri_pax;
REINDEX index bt_ri_ao;
REINDEX index bt_ri_ao;
REINDEX index gist_ri_ao;
@@ -694,7 +694,7 @@ REINDEX index bt_ri_co;
REINDEX index bt_ri_co;
REINDEX index gist_ri_co;
release savepoint sp6;
-select count(*) = 1 as passed from subt_reindex_heap
+select count(*) = 1 as passed from subt_reindex_pax
where b = '((1,1), (4,4))';
passed
--------
@@ -716,7 +716,7 @@ select count(*) = 1 as passed from subt_reindex_co
(1 row)
rollback to sp2;
-select count(*) = 5 as passed from subt_reindex_heap
+select count(*) = 5 as passed from subt_reindex_pax
where n = 2;
passed
--------
@@ -746,11 +746,11 @@ select count(*) = 0 as passed from subt_reindex_ao
-- truncate cases
savepoint sp7; -- child of sp2
-truncate subt_reindex_heap;
+truncate subt_reindex_pax;
truncate subt_reindex_ao;
savepoint sp8; -- child of sp7
truncate subt_reindex_co;
-select count(*) = 0 as passed from subt_reindex_heap where i < 7;
+select count(*) = 0 as passed from subt_reindex_pax where i < 7;
passed
--------
t
@@ -775,7 +775,7 @@ release savepoint sp7; -- commit sp7
-- Test rollback of truncate in a committed subtransaction.
rollback to sp2;
COMMIT;
-select count(*) = 5 as passed from subt_reindex_heap;
+select count(*) = 5 as passed from subt_reindex_pax;
passed
--------
t
@@ -872,212 +872,3 @@ select count(gp_segment_id) from distxact1_4 group by
gp_segment_id; -- sanity c
1
(2 rows)
--- Tests for AND CHAIN
-CREATE TABLE abc (a int);
-NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a'
as the Apache Cloudberry data distribution key for this table.
-HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make
sure column(s) chosen are the optimal data distribution key to minimize skew.
--- set nondefault value so we have something to override below
-SET default_transaction_read_only = on;
-START TRANSACTION ISOLATION LEVEL REPEATABLE READ, READ WRITE, DEFERRABLE;
-SHOW transaction_isolation;
- transaction_isolation
------------------------
- repeatable read
-(1 row)
-
-SHOW transaction_read_only;
- transaction_read_only
------------------------
- off
-(1 row)
-
-SHOW transaction_deferrable;
- transaction_deferrable
-------------------------
- on
-(1 row)
-
-INSERT INTO abc VALUES (1);
-INSERT INTO abc VALUES (2);
-COMMIT AND CHAIN; -- TBLOCK_END
-SHOW transaction_isolation;
- transaction_isolation
------------------------
- repeatable read
-(1 row)
-
-SHOW transaction_read_only;
- transaction_read_only
------------------------
- off
-(1 row)
-
-SHOW transaction_deferrable;
- transaction_deferrable
-------------------------
- on
-(1 row)
-
-INSERT INTO abc VALUES ('error');
-ERROR: invalid input syntax for type integer: "error"
-LINE 1: INSERT INTO abc VALUES ('error');
- ^
-INSERT INTO abc VALUES (3); -- check it's really aborted
-ERROR: current transaction is aborted, commands ignored until end of
transaction block
-COMMIT AND CHAIN; -- TBLOCK_ABORT_END
-SHOW transaction_isolation;
- transaction_isolation
------------------------
- repeatable read
-(1 row)
-
-SHOW transaction_read_only;
- transaction_read_only
------------------------
- off
-(1 row)
-
-SHOW transaction_deferrable;
- transaction_deferrable
-------------------------
- on
-(1 row)
-
-INSERT INTO abc VALUES (4);
-COMMIT;
-START TRANSACTION ISOLATION LEVEL REPEATABLE READ, READ WRITE, DEFERRABLE;
-SHOW transaction_isolation;
- transaction_isolation
------------------------
- repeatable read
-(1 row)
-
-SHOW transaction_read_only;
- transaction_read_only
------------------------
- off
-(1 row)
-
-SHOW transaction_deferrable;
- transaction_deferrable
-------------------------
- on
-(1 row)
-
-SAVEPOINT x;
-INSERT INTO abc VALUES ('error');
-ERROR: invalid input syntax for type integer: "error"
-LINE 1: INSERT INTO abc VALUES ('error');
- ^
-COMMIT AND CHAIN; -- TBLOCK_ABORT_PENDING
-SHOW transaction_isolation;
- transaction_isolation
------------------------
- repeatable read
-(1 row)
-
-SHOW transaction_read_only;
- transaction_read_only
------------------------
- off
-(1 row)
-
-SHOW transaction_deferrable;
- transaction_deferrable
-------------------------
- on
-(1 row)
-
-INSERT INTO abc VALUES (5);
-COMMIT;
-START TRANSACTION ISOLATION LEVEL REPEATABLE READ, READ WRITE, DEFERRABLE;
-SHOW transaction_isolation;
- transaction_isolation
------------------------
- repeatable read
-(1 row)
-
-SHOW transaction_read_only;
- transaction_read_only
------------------------
- off
-(1 row)
-
-SHOW transaction_deferrable;
- transaction_deferrable
-------------------------
- on
-(1 row)
-
-SAVEPOINT x;
-COMMIT AND CHAIN; -- TBLOCK_SUBCOMMIT
-SHOW transaction_isolation;
- transaction_isolation
------------------------
- repeatable read
-(1 row)
-
-SHOW transaction_read_only;
- transaction_read_only
------------------------
- off
-(1 row)
-
-SHOW transaction_deferrable;
- transaction_deferrable
-------------------------
- on
-(1 row)
-
-COMMIT;
--- not allowed outside a transaction block
-COMMIT AND CHAIN; -- error
-ERROR: COMMIT AND CHAIN can only be used in transaction blocks
-ROLLBACK AND CHAIN; -- error
-ERROR: ROLLBACK AND CHAIN can only be used in transaction blocks
-SELECT * FROM abc ORDER BY 1;
- a
----
- 1
- 2
- 4
- 5
-(4 rows)
-
-RESET default_transaction_read_only;
-
-DROP TABLE abc;
--- Explicit transaction block will send Distributed Commit, even if there is
only SET command in it.
--- On the other hand, implicit transaction block involving only SET command
will not send it.
-create table tbl_dtx(a int, b int) distributed by (a);
-insert into tbl_dtx values(1,1);
-create or replace function dtx_set_bug()
- returns void
- language plpgsql
-as $function$
-begin
- execute 'update tbl_dtx set b = 1 where a = 1;';
- set optimizer=off;
-end;
-$function$;
-set Test_print_direct_dispatch_info = true;
--- 1. explicit BEGIN/END
-begin;
-set optimizer=false;
-end;
-INFO: Distributed transaction command 'Distributed Commit (one-phase)' to ALL
contents: 0 1 2
--- 2. implicit transaction block with just SET
-set optimizer=false;
--- 3. still implicit transaction block, but with UPDATE that will send DTX
protocol command to *some* QEs
--- due to direct dispatch. Planner needs to be used for direct dispatch here.
--- This is to verify that the QEs that are not involved in the UDPATE won't
receive DTX protocol command
--- that they are not supposed to see.
-select dtx_set_bug();
-INFO: (slice 0) Dispatch command to SINGLE content
-INFO: Distributed transaction command 'Distributed Commit (one-phase)' to
SINGLE content
- dtx_set_bug
--------------
-
-(1 row)
-
-reset Test_print_direct_dispatch_info;
diff --git a/contrib/pax_storage/src/test/regress/expected/dsp.out
b/contrib/pax_storage/src/test/regress/expected/dsp.out
index dbf39acdee8..ce1d86848c9 100644
--- a/contrib/pax_storage/src/test/regress/expected/dsp.out
+++ b/contrib/pax_storage/src/test/regress/expected/dsp.out
@@ -37,7 +37,7 @@ alter database dsp2 set gp_default_storage_options =
"checksum=true";
show default_table_access_method;
default_table_access_method
-----------------------------
- ao_column
+ pax
(1 row)
show gp_default_storage_options;
@@ -48,18 +48,17 @@ show gp_default_storage_options;
create table t1 (a int, b int) distributed by (a);
\d+ t1
- Table "public.t1"
- Column | Type | Collation | Nullable | Default | Storage | Stats target |
Compression Type | Compression Level | Block Size | Description
---------+---------+-----------+----------+---------+---------+--------------+------------------+-------------------+------------+-------------
- a | integer | | | | plain | |
none | 0 | 32768 |
- b | integer | | | | plain | |
none | 0 | 32768 |
-Checksum: t
+ Table "public.t1"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target |
Description
+--------+---------+-----------+----------+---------+---------+--------------+-------------
+ a | integer | | | | plain | |
+ b | integer | | | | plain | |
Distributed by: (a)
SELECT am.amname FROM pg_class c LEFT JOIN pg_am am ON (c.relam = am.oid)
WHERE c.relname = 't1';
- amname
------------
- ao_column
+ amname
+--------
+ pax
(1 row)
insert into t1 select i, i from generate_series(1,5)i;
@@ -147,7 +146,7 @@ select c.relname, am.amname, c.relkind, c.reloptions
relname | amname | relkind | reloptions
---------+-----------+---------+------------------
h1 | heap | r |
- t1 | ao_column | r |
+ t1 | pax | r |
t2 | ao_column | r | {blocksize=8192}
t3 | heap | r |
t4 | ao_row | r | {blocksize=8192}
@@ -157,10 +156,9 @@ select relid::regclass, blocksize, compresstype,
compresslevel, columnstore, checksum from pg_appendonly order by 1;
relid | blocksize | compresstype | compresslevel | columnstore | checksum
-------+-----------+--------------+---------------+-------------+----------
- t1 | 32768 | | 0 | t | t
t2 | 8192 | | 0 | t | t
t4 | 8192 | | 0 | f | t
-(3 rows)
+(2 rows)
\c dsp2
set default_table_access_method = ao_row;
@@ -505,7 +503,7 @@ alter database dsp1 set default_table_access_method =
"heap";
show default_table_access_method;
default_table_access_method
-----------------------------
- heap
+ pax
(1 row)
show gp_default_storage_options;
diff --git a/contrib/pax_storage/src/test/regress/expected/event_trigger.out
b/contrib/pax_storage/src/test/regress/expected/event_trigger.out
index 9b0cb143e21..405a34a784c 100644
--- a/contrib/pax_storage/src/test/regress/expected/event_trigger.out
+++ b/contrib/pax_storage/src/test/regress/expected/event_trigger.out
@@ -336,70 +336,8 @@ NOTICE: table "audit_tbls_schema_two_table_three" does
not exist, skipping
NOTICE: table "schema_one_table_one" does not exist, skipping
NOTICE: table "schema_one_table two" does not exist, skipping
NOTICE: table "schema_one_table_three" does not exist, skipping
-SELECT * FROM dropped_objects WHERE schema IS NULL OR schema <> 'pg_toast';
- type | schema | object
--------------------+------------+--------------------------------------------------------
- schema | | schema_two
- schema | | schema_one
- table column | schema_one | schema_one.table_one.a
- table | schema_two | schema_two.table_two
- pax table | | pax table identity pax_table_41281:
- table | pg_ext_aux | pg_ext_aux.pg_pax_blocks_41281
- toast table | pg_ext_aux | pg_ext_aux.pg_toast_41282
- table | audit_tbls | audit_tbls.schema_two_table_three
- pax table | | pax table identity pax_table_41295:
- table | pg_ext_aux | pg_ext_aux.pg_pax_blocks_41295
- toast table | pg_ext_aux | pg_ext_aux.pg_toast_41296
- table | schema_two | schema_two.table_three
- pax table | | pax table identity pax_table_41288:
- table | pg_ext_aux | pg_ext_aux.pg_pax_blocks_41288
- toast table | pg_ext_aux | pg_ext_aux.pg_toast_41289
- aggregate | schema_two | schema_two.newton(integer)
- table | schema_one | schema_one.table_one
- pax table | | pax table identity pax_table_41253:
- table | pg_ext_aux | pg_ext_aux.pg_pax_blocks_41253
- toast table | pg_ext_aux | pg_ext_aux.pg_toast_41254
- table | schema_one | schema_one."table two"
- pax table | | pax table identity pax_table_41260:
- table | pg_ext_aux | pg_ext_aux.pg_pax_blocks_41260
- toast table | pg_ext_aux | pg_ext_aux.pg_toast_41261
- table | schema_one | schema_one.table_three
- pax table | | pax table identity pax_table_41267:
- table | pg_ext_aux | pg_ext_aux.pg_pax_blocks_41267
- toast table | pg_ext_aux | pg_ext_aux.pg_toast_41268
- pax fast sequence | | pax fast sequences identity
pax_fast_sequences_41281:
- index | pg_ext_aux | pg_ext_aux.pg_toast_41282_index
- index | pg_ext_aux | pg_ext_aux.pg_pax_blocks_41281_idx
- type | schema_two | schema_two.table_two
- type | schema_two | schema_two.table_two[]
- pax fast sequence | | pax fast sequences identity
pax_fast_sequences_41295:
- index | pg_ext_aux | pg_ext_aux.pg_toast_41296_index
- index | pg_ext_aux | pg_ext_aux.pg_pax_blocks_41295_idx
- type | audit_tbls | audit_tbls.schema_two_table_three
- type | audit_tbls | audit_tbls.schema_two_table_three[]
- pax fast sequence | | pax fast sequences identity
pax_fast_sequences_41288:
- index | pg_ext_aux | pg_ext_aux.pg_toast_41289_index
- index | pg_ext_aux | pg_ext_aux.pg_pax_blocks_41288_idx
- type | schema_two | schema_two.table_three
- type | schema_two | schema_two.table_three[]
- function | schema_two | schema_two.add(integer,integer)
- pax fast sequence | | pax fast sequences identity
pax_fast_sequences_41253:
- index | pg_ext_aux | pg_ext_aux.pg_toast_41254_index
- index | pg_ext_aux | pg_ext_aux.pg_pax_blocks_41253_idx
- type | schema_one | schema_one.table_one
- type | schema_one | schema_one.table_one[]
- pax fast sequence | | pax fast sequences identity
pax_fast_sequences_41260:
- index | pg_ext_aux | pg_ext_aux.pg_toast_41261_index
- index | pg_ext_aux | pg_ext_aux.pg_pax_blocks_41260_idx
- type | schema_one | schema_one."table two"
- type | schema_one | schema_one."table two"[]
- pax fast sequence | | pax fast sequences identity
pax_fast_sequences_41267:
- index | pg_ext_aux | pg_ext_aux.pg_toast_41268_index
- index | pg_ext_aux | pg_ext_aux.pg_pax_blocks_41267_idx
- type | schema_one | schema_one.table_three
- type | schema_one | schema_one.table_three[]
-(59 rows)
-
+-- pax table/fastseq name is diffect with heap, just ignore it
+-- SELECT * FROM dropped_objects WHERE schema IS NULL OR schema <> 'pg_toast';
DROP OWNED BY regress_evt_user;
NOTICE: schema "audit_tbls" does not exist, skipping
SELECT * FROM dropped_objects WHERE type = 'schema';
diff --git a/contrib/pax_storage/src/test/regress/expected/gangsize_1.out
b/contrib/pax_storage/src/test/regress/expected/gangsize_1.out
index bbcd204de46..384a367100f 100644
--- a/contrib/pax_storage/src/test/regress/expected/gangsize_1.out
+++ b/contrib/pax_storage/src/test/regress/expected/gangsize_1.out
@@ -130,53 +130,63 @@ abort;
insert into random_2_0 select * from gangsize_input_data where gp_segment_id =
0;
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 1) Dispatch command to SINGLE content
-INFO: Distributed transaction command 'Distributed Commit (one-phase)' to
PARTIAL contents: 0 1
+INFO: Distributed transaction command 'Distributed Prepare' to PARTIAL
contents: 0 1
+INFO: Distributed transaction command 'Distributed Commit Prepared' to
PARTIAL contents: 0 1
begin;
insert into random_2_0 select * from gangsize_input_data where gp_segment_id =
0;
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 1) Dispatch command to SINGLE content
end;
-INFO: Distributed transaction command 'Distributed Commit (one-phase)' to
PARTIAL contents: 0 1
+INFO: Distributed transaction command 'Distributed Prepare' to PARTIAL
contents: 0 1
+INFO: Distributed transaction command 'Distributed Commit Prepared' to
PARTIAL contents: 0 1
insert into replicate_2_1 select * from gangsize_input_data where
gp_segment_id = 0;
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 1) Dispatch command to SINGLE content
-INFO: Distributed transaction command 'Distributed Commit (one-phase)' to
PARTIAL contents: 0 1
+INFO: Distributed transaction command 'Distributed Prepare' to PARTIAL
contents: 0 1
+INFO: Distributed transaction command 'Distributed Commit Prepared' to
PARTIAL contents: 0 1
begin;
insert into replicate_2_1 select * from gangsize_input_data where
gp_segment_id = 0;
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 1) Dispatch command to SINGLE content
end;
-INFO: Distributed transaction command 'Distributed Commit (one-phase)' to
PARTIAL contents: 0 1
+INFO: Distributed transaction command 'Distributed Prepare' to PARTIAL
contents: 0 1
+INFO: Distributed transaction command 'Distributed Commit Prepared' to
PARTIAL contents: 0 1
insert into hash_3_3_2 select * from gangsize_input_data where gp_segment_id =
0;
INFO: (slice 0) Dispatch command to ALL contents: 0 1 2
INFO: (slice 1) Dispatch command to SINGLE content
-INFO: Distributed transaction command 'Distributed Commit (one-phase)' to ALL
contents: 0 1 2
+INFO: Distributed transaction command 'Distributed Prepare' to ALL contents:
0 1 2
+INFO: Distributed transaction command 'Distributed Commit Prepared' to ALL
contents: 0 1 2
begin;
insert into hash_3_3_2 select * from gangsize_input_data where gp_segment_id =
0;
INFO: (slice 0) Dispatch command to ALL contents: 0 1 2
INFO: (slice 1) Dispatch command to SINGLE content
end;
-INFO: Distributed transaction command 'Distributed Commit (one-phase)' to ALL
contents: 0 1 2
+INFO: Distributed transaction command 'Distributed Prepare' to ALL contents:
0 1 2
+INFO: Distributed transaction command 'Distributed Commit Prepared' to ALL
contents: 0 1 2
insert into replicate_3_3 select * from gangsize_input_data where
gp_segment_id = 0;
INFO: (slice 0) Dispatch command to ALL contents: 0 1 2
INFO: (slice 1) Dispatch command to SINGLE content
-INFO: Distributed transaction command 'Distributed Commit (one-phase)' to ALL
contents: 0 1 2
+INFO: Distributed transaction command 'Distributed Prepare' to ALL contents:
0 1 2
+INFO: Distributed transaction command 'Distributed Commit Prepared' to ALL
contents: 0 1 2
begin;
insert into replicate_3_3 select * from gangsize_input_data where
gp_segment_id = 0;
INFO: (slice 0) Dispatch command to ALL contents: 0 1 2
INFO: (slice 1) Dispatch command to SINGLE content
end;
-INFO: Distributed transaction command 'Distributed Commit (one-phase)' to ALL
contents: 0 1 2
+INFO: Distributed transaction command 'Distributed Prepare' to ALL contents:
0 1 2
+INFO: Distributed transaction command 'Distributed Commit Prepared' to ALL
contents: 0 1 2
insert into hash_2_3_4 select * from gangsize_input_data where gp_segment_id =
0;
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 1) Dispatch command to SINGLE content
-INFO: Distributed transaction command 'Distributed Commit (one-phase)' to
PARTIAL contents: 0 1
+INFO: Distributed transaction command 'Distributed Prepare' to PARTIAL
contents: 0 1
+INFO: Distributed transaction command 'Distributed Commit Prepared' to
PARTIAL contents: 0 1
begin;
insert into hash_2_3_4 select * from gangsize_input_data where gp_segment_id =
0;
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 1) Dispatch command to SINGLE content
end;
-INFO: Distributed transaction command 'Distributed Commit (one-phase)' to
PARTIAL contents: 0 1
+INFO: Distributed transaction command 'Distributed Prepare' to PARTIAL
contents: 0 1
+INFO: Distributed transaction command 'Distributed Commit Prepared' to
PARTIAL contents: 0 1
--Update
update random_2_0 set a = a + 1;
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
@@ -213,12 +223,14 @@ INFO: Distributed transaction command 'Distributed
Commit Prepared' to PARTIAL
-- Delete
delete from hash_2_3_4 where a in (1, 2, 3);
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
-INFO: Distributed transaction command 'Distributed Commit (one-phase)' to
PARTIAL contents: 0 1
+INFO: Distributed transaction command 'Distributed Prepare' to PARTIAL
contents: 0 1
+INFO: Distributed transaction command 'Distributed Commit Prepared' to
PARTIAL contents: 0 1
begin;
delete from hash_2_3_4 where a = 4 or a = 5;
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
end;
-INFO: Distributed transaction command 'Distributed Commit (one-phase)' to
PARTIAL contents: 0 1
+INFO: Distributed transaction command 'Distributed Prepare' to PARTIAL
contents: 0 1
+INFO: Distributed transaction command 'Distributed Commit Prepared' to
PARTIAL contents: 0 1
-- add test for table expand
begin;
alter table random_2_0 expand table;
diff --git a/contrib/pax_storage/src/test/regress/expected/gp_explain.out
b/contrib/pax_storage/src/test/regress/expected/gp_explain.out
index 09d0b0be2c7..6e4ad00a41f 100644
--- a/contrib/pax_storage/src/test/regress/expected/gp_explain.out
+++ b/contrib/pax_storage/src/test/regress/expected/gp_explain.out
@@ -600,7 +600,7 @@ SELECT * FROM y LIMIT 10;
-> WorkTable Scan on y (never executed)
-> Materialize (never executed)
-> Gather Motion 3:1 (slice1; segments: 3) (never
executed)
- -> Seq Scan on recursive_table_ic (actual
rows=6090 loops=1)
+ -> Seq Scan on recursive_table_ic (actual
rows=4061 loops=1)
Optimizer: Postgres query optimizer
(13 rows)
diff --git
a/contrib/pax_storage/src/test/regress/expected/gp_runtime_filter.out
b/contrib/pax_storage/src/test/regress/expected/gp_runtime_filter.out
index 8024a836f60..8c8d9f970ff 100644
--- a/contrib/pax_storage/src/test/regress/expected/gp_runtime_filter.out
+++ b/contrib/pax_storage/src/test/regress/expected/gp_runtime_filter.out
@@ -254,262 +254,6 @@ SELECT COUNT(*) FROM dim_rf
1600
(1 row)
--- Test bloom filter pushdown
-SET enable_parallel TO off;
--- case 1: join on distribution table and replicated table.
-DROP TABLE IF EXISTS t1;
-NOTICE: table "t1" does not exist, skipping
-DROP TABLE IF EXISTS t2;
-NOTICE: table "t2" does not exist, skipping
-CREATE TABLE t1(c1 int, c2 int, c3 int, c4 int, c5 int) with (appendonly=true,
orientation=column) distributed by (c1);
-CREATE TABLE t2(c1 int, c2 int, c3 int, c4 int, c5 int) with (appendonly=true,
orientation=column) distributed REPLICATED;
-INSERT INTO t1 VALUES (5,5,5,5,5);
-INSERT INTO t2 VALUES (1,1,1,1,1), (2,2,2,2,2), (3,3,3,3,3), (4,4,4,4,4);
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t2 select * FROM t2;
-INSERT INTO t2 select * FROM t2;
-INSERT INTO t2 select * FROM t2;
-ANALYZE;
-EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
-SELECT t1.c3 FROM t1, t2 WHERE t1.c2 = t2.c2;
- QUERY PLAN
--------------------------------------------------------------------------------------------
- Gather Motion 3:1 (slice1; segments: 3) (actual rows=0 loops=1)
- -> Hash Join (actual rows=0 loops=1)
- Hash Cond: (t1.c2 = t2.c2)
- Extra Text: (seg2) Hash chain length 8.0 avg, 8 max, using 4 of
524288 buckets.
- -> Seq Scan on t1 (actual rows=128 loops=1)
- -> Hash (actual rows=32 loops=1)
- Buckets: 524288 Batches: 1 Memory Usage: 4098kB
- -> Seq Scan on t2 (actual rows=32 loops=1)
- Optimizer: Postgres query optimizer
-(9 rows)
-
-SET gp_enable_runtime_filter_pushdown TO on;
-EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
-SELECT t1.c3 FROM t1, t2 WHERE t1.c2 = t2.c2;
- QUERY PLAN
--------------------------------------------------------------------------------------------
- Gather Motion 3:1 (slice1; segments: 3) (actual rows=0 loops=1)
- -> Hash Join (actual rows=0 loops=1)
- Hash Cond: (t1.c2 = t2.c2)
- Extra Text: (seg2) Hash chain length 8.0 avg, 8 max, using 4 of
524288 buckets.
- -> Seq Scan on t1 (actual rows=1 loops=1)
- Rows Removed by Pushdown Runtime Filter: 127
- -> Hash (actual rows=32 loops=1)
- Buckets: 524288 Batches: 1 Memory Usage: 4098kB
- -> Seq Scan on t2 (actual rows=32 loops=1)
- Optimizer: Postgres query optimizer
-(10 rows)
-
-RESET gp_enable_runtime_filter_pushdown;
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t2;
--- case 2: join on partition table and replicated table.
-CREATE TABLE t1 (c1 INT, c2 INT) DISTRIBUTED BY (c1) PARTITION BY RANGE (c2)
(START (1) END (100) EVERY (50));
-CREATE TABLE t2 (c1 INT, c2 INT) DISTRIBUTED REPLICATED;
-INSERT INTO t1 SELECT generate_series(1, 99), generate_series(1, 99);
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t2 SELECT generate_series(1, 5), generate_series(1, 5);
-INSERT INTO t2 SELECT generate_series(51, 51), generate_series(51, 51);
-ANALYZE;
-EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
-SELECT * FROM t1, t2 WHERE t1.c2 = t2.c2;
- QUERY PLAN
--------------------------------------------------------------------------------------------
- Gather Motion 3:1 (slice1; segments: 3) (actual rows=96 loops=1)
- -> Hash Join (actual rows=64 loops=1)
- Hash Cond: (t1.c2 = t2.c2)
- Extra Text: (seg0) Hash chain length 1.0 avg, 1 max, using 6 of
524288 buckets.
- -> Append (actual rows=608 loops=1)
- Partition Selectors: $0
- -> Seq Scan on t1_1_prt_1 t1_1 (actual rows=288 loops=1)
- -> Seq Scan on t1_1_prt_2 t1_2 (actual rows=320 loops=1)
- -> Hash (actual rows=6 loops=1)
- Buckets: 524288 Batches: 1 Memory Usage: 4097kB
- -> Partition Selector (selector id: $0) (actual rows=6 loops=1)
- -> Seq Scan on t2 (actual rows=6 loops=1)
- Optimizer: Postgres query optimizer
-(13 rows)
-
-SET gp_enable_runtime_filter_pushdown TO on;
-EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
-SELECT * FROM t1, t2 WHERE t1.c2 = t2.c2;
- QUERY PLAN
--------------------------------------------------------------------------------------------
- Gather Motion 3:1 (slice1; segments: 3) (actual rows=96 loops=1)
- -> Hash Join (actual rows=64 loops=1)
- Hash Cond: (t1.c2 = t2.c2)
- Extra Text: (seg0) Hash chain length 1.0 avg, 1 max, using 6 of
524288 buckets.
- -> Append (actual rows=64 loops=1)
- Partition Selectors: $0
- -> Seq Scan on t1_1_prt_1 t1_1 (actual rows=48 loops=1)
- Rows Removed by Pushdown Runtime Filter: 240
- -> Seq Scan on t1_1_prt_2 t1_2 (actual rows=16 loops=1)
- Rows Removed by Pushdown Runtime Filter: 304
- -> Hash (actual rows=6 loops=1)
- Buckets: 524288 Batches: 1 Memory Usage: 4097kB
- -> Partition Selector (selector id: $0) (actual rows=6 loops=1)
- -> Seq Scan on t2 (actual rows=6 loops=1)
- Optimizer: Postgres query optimizer
-(15 rows)
-
-RESET gp_enable_runtime_filter_pushdown;
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t2;
--- case 3: bug fix with explain
-DROP TABLE IF EXISTS test_tablesample1;
-NOTICE: table "test_tablesample1" does not exist, skipping
-CREATE TABLE test_tablesample1 (dist int, id int, name text) WITH
(fillfactor=10) DISTRIBUTED BY (dist);
-INSERT INTO test_tablesample1 SELECT 0, i, repeat(i::text, 875) FROM
generate_series(0, 9) s(i) ORDER BY i;
-INSERT INTO test_tablesample1 SELECT 3, i, repeat(i::text, 875) FROM
generate_series(10, 19) s(i) ORDER BY i;
-INSERT INTO test_tablesample1 SELECT 5, i, repeat(i::text, 875) FROM
generate_series(20, 29) s(i) ORDER BY i;
-SET gp_enable_runtime_filter_pushdown TO on;
-EXPLAIN (COSTS OFF) SELECT id FROM test_tablesample1 TABLESAMPLE SYSTEM (50)
REPEATABLE (2);
- QUERY PLAN
---------------------------------------------------------------------------
- Gather Motion 3:1 (slice1; segments: 3)
- -> Sample Scan on test_tablesample1
- Sampling: system ('50'::real) REPEATABLE ('2'::double precision)
- Optimizer: Postgres query optimizer
-(4 rows)
-
-RESET gp_enable_runtime_filter_pushdown;
-DROP TABLE IF EXISTS test_tablesample1;
--- case 4: show debug info only when gp_enable_runtime_filter_pushdown is on
-DROP TABLE IF EXISTS t1;
-NOTICE: table "t1" does not exist, skipping
-DROP TABLE IF EXISTS t2;
-NOTICE: table "t2" does not exist, skipping
-CREATE TABLE t1(c1 int, c2 int);
-CREATE TABLE t2(c1 int, c2 int);
-INSERT INTO t1 SELECT GENERATE_SERIES(1, 1000), GENERATE_SERIES(1, 1000);
-INSERT INTO t2 SELECT * FROM t1;
-SET gp_enable_runtime_filter_pushdown TO on;
-EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT count(t1.c2) FROM
t1, t2 WHERE t1.c1 = t2.c1;
- QUERY PLAN
----------------------------------------------------------------------------------------------------------
- Finalize Aggregate (actual rows=1 loops=1)
- -> Gather Motion 3:1 (slice1; segments: 3) (actual rows=3 loops=1)
- -> Partial Aggregate (actual rows=1 loops=1)
- -> Hash Join (actual rows=340 loops=1)
- Hash Cond: (t1.c1 = t2.c1)
- Extra Text: (seg2) Hash chain length 1.0 avg, 1 max,
using 340 of 524288 buckets.
- -> Seq Scan on t1 (actual rows=340 loops=1)
- Rows Removed by Pushdown Runtime Filter: 0
- -> Hash (actual rows=340 loops=1)
- Buckets: 524288 Batches: 1 Memory Usage: 4108kB
- -> Seq Scan on t2 (actual rows=340 loops=1)
- Optimizer: Postgres query optimizer
-(12 rows)
-
-RESET gp_enable_runtime_filter_pushdown;
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t2;
--- case 5: hashjoin + result + seqsacn
-DROP TABLE IF EXISTS t1;
-NOTICE: table "t1" does not exist, skipping
-DROP TABLE IF EXISTS t2;
-NOTICE: table "t2" does not exist, skipping
-CREATE TABLE t1(c1 int, c2 int, c3 char(50), c4 char(50), c5 char(50))
DISTRIBUTED REPLICATED;
-CREATE TABLE t2(c1 int, c2 int, c3 char(50), c4 char(50), c5 char(50));
-INSERT INTO t1 VALUES (5,5,5,5,5), (3,3,3,3,3), (4,4,4,4,4);
-INSERT INTO t2 VALUES (1,1,1,1,1), (2,2,2,2,2), (3,3,3,3,3), (4,4,4,4,4);
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t2 select * FROM t2;
-ANALYZE;
--- MERGE16_FIXME: enable these tests after the fix of orca
--- SET optimizer TO on;
--- SET gp_enable_runtime_filter_pushdown TO off;
--- EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT t1.c3 FROM t1,
t2 WHERE t1.c1 = t2.c1;
---
--- SET gp_enable_runtime_filter_pushdown TO on;
--- EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT t1.c3 FROM t1,
t2 WHERE t1.c1 = t2.c1;
---
--- RESET gp_enable_runtime_filter_pushdown;
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t2;
--- case 6: hashjoin + hashjoin + seqscan
-DROP TABLE IF EXISTS t1;
-NOTICE: table "t1" does not exist, skipping
-DROP TABLE IF EXISTS t2;
-NOTICE: table "t2" does not exist, skipping
-DROP TABLE IF EXISTS t3;
-NOTICE: table "t3" does not exist, skipping
-CREATE TABLE t1(c1 int, c2 int, c3 char(50), c4 char(50), c5 char(50))
DISTRIBUTED REPLICATED;
-CREATE TABLE t2(c1 int, c2 int, c3 char(50), c4 char(50), c5 char(50))
DISTRIBUTED REPLICATED;
-CREATE TABLE t3(c1 int, c2 int, c3 char(50), c4 char(50), c5 char(50))
DISTRIBUTED REPLICATED;
-INSERT INTO t1 VALUES (1,1,1,1,1), (2,2,2,2,2), (5,5,5,5,5);
-INSERT INTO t2 VALUES (1,1,1,1,1), (2,2,2,2,2), (3,3,3,3,3), (4,4,4,4,4);
-INSERT INTO t3 VALUES (1,1,1,1,1), (2,2,2,2,2), (3,3,3,3,3), (4,4,4,4,4);
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t2 select * FROM t2;
-INSERT INTO t2 select * FROM t2;
-INSERT INTO t3 select * FROM t3;
-ANALYZE;
-SET optimizer TO off;
-SET gp_enable_runtime_filter_pushdown TO off;
-EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM t1, t2, t3
WHERE t1.c1 = t2.c1 AND t1.c2 = t3.c2;
- QUERY PLAN
----------------------------------------------------------------------------------------
- Gather Motion 1:1 (slice1; segments: 1) (actual rows=256 loops=1)
- -> Hash Join (actual rows=256 loops=1)
- Hash Cond: (t1.c1 = t2.c1)
- Extra Text: Hash chain length 4.0 avg, 4 max, using 4 of 32768
buckets.
- -> Hash Join (actual rows=64 loops=1)
- Hash Cond: (t1.c2 = t3.c2)
- Extra Text: Hash chain length 2.0 avg, 2 max, using 4 of 32768
buckets.
- -> Seq Scan on t1 (actual rows=48 loops=1)
- -> Hash (actual rows=8 loops=1)
- Buckets: 32768 Batches: 1 Memory Usage: 258kB
- -> Seq Scan on t3 (actual rows=8 loops=1)
- -> Hash (actual rows=16 loops=1)
- Buckets: 32768 Batches: 1 Memory Usage: 260kB
- -> Seq Scan on t2 (actual rows=16 loops=1)
- Optimizer: Postgres query optimizer
-(15 rows)
-
-SET gp_enable_runtime_filter_pushdown TO on;
-EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM t1, t2, t3
WHERE t1.c1 = t2.c1 AND t1.c2 = t3.c2;
- QUERY PLAN
----------------------------------------------------------------------------------------
- Gather Motion 1:1 (slice1; segments: 1) (actual rows=256 loops=1)
- -> Hash Join (actual rows=256 loops=1)
- Hash Cond: (t1.c1 = t2.c1)
- Extra Text: Hash chain length 4.0 avg, 4 max, using 4 of 32768
buckets.
- -> Hash Join (actual rows=64 loops=1)
- Hash Cond: (t1.c2 = t3.c2)
- Extra Text: Hash chain length 2.0 avg, 2 max, using 4 of 32768
buckets.
- -> Seq Scan on t1 (actual rows=32 loops=1)
- Rows Removed by Pushdown Runtime Filter: 16
- -> Hash (actual rows=8 loops=1)
- Buckets: 32768 Batches: 1 Memory Usage: 258kB
- -> Seq Scan on t3 (actual rows=8 loops=1)
- -> Hash (actual rows=16 loops=1)
- Buckets: 32768 Batches: 1 Memory Usage: 260kB
- -> Seq Scan on t2 (actual rows=16 loops=1)
- Optimizer: Postgres query optimizer
-(16 rows)
-
-RESET gp_enable_runtime_filter_pushdown;
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t2;
-DROP TABLE IF EXISTS t3;
-RESET enable_parallel;
-- Clean up: reset guc
SET gp_enable_runtime_filter TO off;
SET optimizer TO default;
diff --git a/contrib/pax_storage/src/test/regress/expected/gp_tablespace.out
b/contrib/pax_storage/src/test/regress/expected/gp_tablespace.out
index f2119a49794..fa53640fa14 100644
--- a/contrib/pax_storage/src/test/regress/expected/gp_tablespace.out
+++ b/contrib/pax_storage/src/test/regress/expected/gp_tablespace.out
@@ -387,8 +387,6 @@ FROM gp_tablespace_location((SELECT oid FROM pg_tablespace
WHERE spcname='testsp
-- another version of GPDB.
\set testtablespace_existing_version_dir :testtablespace
'_existing_version_dir'
CREATE TABLESPACE testspace_existing_version_dir LOCATION
:'testtablespace_existing_version_dir';
-WARNING: tablespace location
"/home/gpadmin/cloudberry/contrib/pax_storage/src/test/regress/testtablespace_existing_version_dir"
is too long for TAR
-DETAIL: The location is used to create a symlink target from pg_tblspc.
Symlink targets are truncated to 100 characters when sending a TAR (e.g the
BASE_BACKUP protocol response).
SELECT * FROM
(SELECT pg_ls_dir('pg_tblspc/' || oid) AS versiondirs
FROM pg_tablespace
diff --git a/contrib/pax_storage/src/test/regress/expected/gpcopy.out
b/contrib/pax_storage/src/test/regress/expected/gpcopy.out
index 883f2838cb6..5ad878e2860 100644
--- a/contrib/pax_storage/src/test/regress/expected/gpcopy.out
+++ b/contrib/pax_storage/src/test/regress/expected/gpcopy.out
@@ -1651,7 +1651,6 @@ set client_min_messages='warning';
DROP DATABASE IF EXISTS "funny_copy""db'with\\quotes";
reset client_min_messages;
CREATE DATABASE "funny_copy""db'with\\quotes";
-\! python3 test_dbconn.py 1
\c "funny_copy""db'with\\quotes"
-- echo will behave differently on different platforms, force to use bash with
-E option
COPY (SELECT 'data1') TO PROGRAM 'cat > /tmp/gpcopyenvtest; /usr/bin/env bash
-c ''echo -E database in COPY TO: $GP_DATABASE >> /tmp/gpcopyenvtest '' '
ESCAPE 'OFF';
diff --git a/contrib/pax_storage/src/test/regress/expected/gporca.out
b/contrib/pax_storage/src/test/regress/expected/gporca.out
index 1381ac79f08..a2f7deb25a4 100644
--- a/contrib/pax_storage/src/test/regress/expected/gporca.out
+++ b/contrib/pax_storage/src/test/regress/expected/gporca.out
@@ -14438,8 +14438,8 @@ where t.j = tt.j;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------
Update on window_agg_test t (cost=3699.81..185385.16 rows=0 width=0)
- -> Explicit Redistribute Motion 3:3 (slice1; segments: 3)
(cost=3699.81..185385.16 rows=2471070 width=46)
- -> Hash Join (cost=3699.81..135963.76 rows=2471070 width=46)
+ -> Explicit Redistribute Motion 3:3 (slice1; segments: 3)
(cost=3699.81..185385.16 rows=2471070 width=78)
+ -> Hash Join (cost=3699.81..135963.76 rows=2471070 width=78)
Hash Cond: (tt.j = t.j)
-> Redistribute Motion 1:3 (slice2; segments: 1)
(cost=2446.06..6966.31 rows=28700 width=40)
Hash Key: tt.j
@@ -14451,10 +14451,10 @@ where t.j = tt.j;
-> Sort (cost=2446.06..2517.81
rows=28700 width=8)
Sort Key: window_agg_test.j
-> Seq Scan on window_agg_test
(cost=0.00..321.00 rows=28700 width=8)
- -> Hash (cost=895.00..895.00 rows=28700 width=14)
- -> Redistribute Motion 3:3 (slice4; segments: 3)
(cost=0.00..895.00 rows=28700 width=14)
+ -> Hash (cost=895.00..895.00 rows=28700 width=46)
+ -> Redistribute Motion 3:3 (slice4; segments: 3)
(cost=0.00..895.00 rows=28700 width=46)
Hash Key: t.j
- -> Seq Scan on window_agg_test t
(cost=0.00..321.00 rows=28700 width=14)
+ -> Seq Scan on window_agg_test t
(cost=0.00..321.00 rows=28700 width=46)
Optimizer: Postgres query optimizer
(19 rows)
diff --git a/contrib/pax_storage/src/test/regress/expected/oid_wraparound.out
b/contrib/pax_storage/src/test/regress/expected/oid_wraparound.out
index 92895d06d97..6aaa767a409 100644
--- a/contrib/pax_storage/src/test/regress/expected/oid_wraparound.out
+++ b/contrib/pax_storage/src/test/regress/expected/oid_wraparound.out
@@ -59,15 +59,15 @@ DROP TABLE oid_wraparound_table;
SELECT gp_get_next_oid_master();
gp_get_next_oid_master
------------------------
- 16387
+ 16394
(1 row)
SELECT gp_get_next_oid_segments();
gp_get_next_oid_segments
--------------------------
- 16384
- 16384
- 16384
+ 16389
+ 16389
+ 16389
(3 rows)
-- Scenario 2: QD is at 4 billion while QE is at 16384
@@ -108,14 +108,14 @@ CREATE TABLE oid_wraparound_table_other AS SELECT 1 AS a;
SELECT gp_get_next_oid_master() in (16390, 16391);
?column?
----------
- t
+ f
(1 row)
SELECT gp_get_next_oid_segments();
gp_get_next_oid_segments
--------------------------
- 16385
- 16385
- 16385
+ 16396
+ 16396
+ 16396
(3 rows)
diff --git a/contrib/pax_storage/src/test/regress/expected/partition.out
b/contrib/pax_storage/src/test/regress/expected/partition.out
index 32fed2e7f95..b63d358ac31 100755
--- a/contrib/pax_storage/src/test/regress/expected/partition.out
+++ b/contrib/pax_storage/src/test/regress/expected/partition.out
@@ -28,30 +28,118 @@ subpartition by list (r_name) subpartition template
);
-- root and internal parent partitions should have relfrozenxid as 0
select relname, relkind from pg_class where relkind in ('r', 'p') and relname
like 'region%' and relfrozenxid=0;
- relname | relkind
-----------------------+---------
- region | p
- region_1_prt_region1 | p
- region_1_prt_region2 | p
- region_1_prt_region3 | p
-(4 rows)
+ relname | relkind
+---------------------------------------+---------
+ region | p
+ region_1_prt_region1 | p
+ region_1_prt_region1_2_prt_africa | r
+ region_1_prt_region1_2_prt_america | r
+ region_1_prt_region1_2_prt_antarctica | r
+ region_1_prt_region1_2_prt_asia | r
+ region_1_prt_region1_2_prt_australia | r
+ region_1_prt_region1_2_prt_europe | r
+ region_1_prt_region1_2_prt_mideast | r
+ region_1_prt_region2 | p
+ region_1_prt_region2_2_prt_africa | r
+ region_1_prt_region2_2_prt_america | r
+ region_1_prt_region2_2_prt_antarctica | r
+ region_1_prt_region2_2_prt_asia | r
+ region_1_prt_region2_2_prt_australia | r
+ region_1_prt_region2_2_prt_europe | r
+ region_1_prt_region2_2_prt_mideast | r
+ region_1_prt_region3 | p
+ region_1_prt_region3_2_prt_africa | r
+ region_1_prt_region3_2_prt_america | r
+ region_1_prt_region3_2_prt_antarctica | r
+ region_1_prt_region3_2_prt_asia | r
+ region_1_prt_region3_2_prt_australia | r
+ region_1_prt_region3_2_prt_europe | r
+ region_1_prt_region3_2_prt_mideast | r
+ regions | r
+(26 rows)
select gp_segment_id, relname, relkind from gp_dist_random('pg_class') where
relkind in ('r', 'p') and relname like 'region%' and relfrozenxid=0;
- gp_segment_id | relname | relkind
----------------+----------------------+---------
- 1 | region | p
- 1 | region_1_prt_region1 | p
- 1 | region_1_prt_region2 | p
- 1 | region_1_prt_region3 | p
- 2 | region | p
- 2 | region_1_prt_region1 | p
- 2 | region_1_prt_region2 | p
- 2 | region_1_prt_region3 | p
- 0 | region | p
- 0 | region_1_prt_region1 | p
- 0 | region_1_prt_region2 | p
- 0 | region_1_prt_region3 | p
-(12 rows)
+ gp_segment_id | relname | relkind
+---------------+---------------------------------------+---------
+ 1 | region | p
+ 1 | region_1_prt_region1 | p
+ 1 | region_1_prt_region1_2_prt_africa | r
+ 1 | region_1_prt_region1_2_prt_america | r
+ 1 | region_1_prt_region1_2_prt_antarctica | r
+ 1 | region_1_prt_region1_2_prt_asia | r
+ 1 | region_1_prt_region1_2_prt_australia | r
+ 1 | region_1_prt_region1_2_prt_europe | r
+ 1 | region_1_prt_region1_2_prt_mideast | r
+ 1 | region_1_prt_region2 | p
+ 1 | region_1_prt_region2_2_prt_africa | r
+ 1 | region_1_prt_region2_2_prt_america | r
+ 1 | region_1_prt_region2_2_prt_antarctica | r
+ 1 | region_1_prt_region2_2_prt_asia | r
+ 1 | region_1_prt_region2_2_prt_australia | r
+ 1 | region_1_prt_region2_2_prt_europe | r
+ 1 | region_1_prt_region2_2_prt_mideast | r
+ 1 | region_1_prt_region3 | p
+ 1 | region_1_prt_region3_2_prt_africa | r
+ 1 | region_1_prt_region3_2_prt_america | r
+ 1 | region_1_prt_region3_2_prt_antarctica | r
+ 1 | region_1_prt_region3_2_prt_asia | r
+ 1 | region_1_prt_region3_2_prt_australia | r
+ 1 | region_1_prt_region3_2_prt_europe | r
+ 1 | region_1_prt_region3_2_prt_mideast | r
+ 1 | regions | r
+ 0 | region | p
+ 0 | region_1_prt_region1 | p
+ 0 | region_1_prt_region1_2_prt_africa | r
+ 0 | region_1_prt_region1_2_prt_america | r
+ 0 | region_1_prt_region1_2_prt_antarctica | r
+ 0 | region_1_prt_region1_2_prt_asia | r
+ 0 | region_1_prt_region1_2_prt_australia | r
+ 0 | region_1_prt_region1_2_prt_europe | r
+ 0 | region_1_prt_region1_2_prt_mideast | r
+ 0 | region_1_prt_region2 | p
+ 0 | region_1_prt_region2_2_prt_africa | r
+ 0 | region_1_prt_region2_2_prt_america | r
+ 0 | region_1_prt_region2_2_prt_antarctica | r
+ 0 | region_1_prt_region2_2_prt_asia | r
+ 0 | region_1_prt_region2_2_prt_australia | r
+ 0 | region_1_prt_region2_2_prt_europe | r
+ 0 | region_1_prt_region2_2_prt_mideast | r
+ 0 | region_1_prt_region3 | p
+ 0 | region_1_prt_region3_2_prt_africa | r
+ 0 | region_1_prt_region3_2_prt_america | r
+ 0 | region_1_prt_region3_2_prt_antarctica | r
+ 0 | region_1_prt_region3_2_prt_asia | r
+ 0 | region_1_prt_region3_2_prt_australia | r
+ 0 | region_1_prt_region3_2_prt_europe | r
+ 0 | region_1_prt_region3_2_prt_mideast | r
+ 0 | regions | r
+ 2 | region | p
+ 2 | region_1_prt_region1 | p
+ 2 | region_1_prt_region1_2_prt_africa | r
+ 2 | region_1_prt_region1_2_prt_america | r
+ 2 | region_1_prt_region1_2_prt_antarctica | r
+ 2 | region_1_prt_region1_2_prt_asia | r
+ 2 | region_1_prt_region1_2_prt_australia | r
+ 2 | region_1_prt_region1_2_prt_europe | r
+ 2 | region_1_prt_region1_2_prt_mideast | r
+ 2 | region_1_prt_region2 | p
+ 2 | region_1_prt_region2_2_prt_africa | r
+ 2 | region_1_prt_region2_2_prt_america | r
+ 2 | region_1_prt_region2_2_prt_antarctica | r
+ 2 | region_1_prt_region2_2_prt_asia | r
+ 2 | region_1_prt_region2_2_prt_australia | r
+ 2 | region_1_prt_region2_2_prt_europe | r
+ 2 | region_1_prt_region2_2_prt_mideast | r
+ 2 | region_1_prt_region3 | p
+ 2 | region_1_prt_region3_2_prt_africa | r
+ 2 | region_1_prt_region3_2_prt_america | r
+ 2 | region_1_prt_region3_2_prt_antarctica | r
+ 2 | region_1_prt_region3_2_prt_asia | r
+ 2 | region_1_prt_region3_2_prt_australia | r
+ 2 | region_1_prt_region3_2_prt_europe | r
+ 2 | region_1_prt_region3_2_prt_mideast | r
+ 2 | regions | r
+(78 rows)
create unique index region_pkey on region(r_regionkey, r_name);
copy region from stdin with delimiter '|';
@@ -187,9 +275,9 @@ alter table foo_p split partition for (1) at (2) into
(partition prt_11, partiti
Schema | Name | Type | Owner | Storage
--------+--------------------+-------------------+-----------+---------
public | foo_p | partitioned table | part_role |
- public | foo_p_1_prt_2 | table | part_role | heap
- public | foo_p_1_prt_prt_11 | table | part_role | heap
- public | foo_p_1_prt_prt_12 | table | part_role | heap
+ public | foo_p_1_prt_2 | table | part_role | pax
+ public | foo_p_1_prt_prt_11 | table | part_role | pax
+ public | foo_p_1_prt_prt_12 | table | part_role | pax
(4 rows)
drop table foo_p;
@@ -4319,15 +4407,15 @@ where relname like ('mpp6979%');
nspname | relname | amname
--------------+---------------------+--------
public | mpp6979part |
- public | mpp6979part_1_prt_1 | heap
- public | mpp6979part_1_prt_2 | heap
- public | mpp6979part_1_prt_3 | heap
- public | mpp6979part_1_prt_4 | heap
- public | mpp6979part_1_prt_5 | heap
- public | mpp6979part_1_prt_6 | heap
- public | mpp6979part_1_prt_7 | heap
- public | mpp6979part_1_prt_8 | heap
- public | mpp6979part_1_prt_9 | heap
+ public | mpp6979part_1_prt_1 | pax
+ public | mpp6979part_1_prt_2 | pax
+ public | mpp6979part_1_prt_3 | pax
+ public | mpp6979part_1_prt_4 | pax
+ public | mpp6979part_1_prt_5 | pax
+ public | mpp6979part_1_prt_6 | pax
+ public | mpp6979part_1_prt_7 | pax
+ public | mpp6979part_1_prt_8 | pax
+ public | mpp6979part_1_prt_9 | pax
mpp6979dummy | mpp6979tab | ao_row
(11 rows)
@@ -4362,16 +4450,16 @@ where relname like ('mpp6979%');
relname | nspname | relispartition | amname
---------------------+--------------+----------------+--------
mpp6979part | public | f |
+ mpp6979part_1_prt_2 | public | t | pax
+ mpp6979part_1_prt_3 | public | t | pax
+ mpp6979part_1_prt_4 | public | t | pax
+ mpp6979part_1_prt_5 | public | t | pax
+ mpp6979part_1_prt_6 | public | t | pax
+ mpp6979part_1_prt_7 | public | t | pax
+ mpp6979part_1_prt_8 | public | t | pax
+ mpp6979part_1_prt_9 | public | t | pax
mpp6979part_1_prt_1 | public | t | ao_row
- mpp6979part_1_prt_2 | public | t | heap
- mpp6979part_1_prt_3 | public | t | heap
- mpp6979part_1_prt_4 | public | t | heap
- mpp6979part_1_prt_5 | public | t | heap
- mpp6979part_1_prt_6 | public | t | heap
- mpp6979part_1_prt_7 | public | t | heap
- mpp6979part_1_prt_8 | public | t | heap
- mpp6979part_1_prt_9 | public | t | heap
- mpp6979tab | mpp6979dummy | f | heap
+ mpp6979tab | mpp6979dummy | f | pax
(11 rows)
drop table mpp6979part;
@@ -5662,34 +5750,58 @@ LINE 2: ( subpartition opt1_1 VALUES (1),
^
-- root partition (and only root) should have relfrozenxid as 0
select relname, relkind from pg_class where relkind in ('r', 'p') and relname
like 'sales%' and relfrozenxid=0;
- relname | relkind
----------+---------
- sales | p
-(1 row)
+ relname | relkind
+----------------+---------
+ sales | p
+ sales_1_prt_aa | r
+ sales_1_prt_bb | r
+ sales_1_prt_cc | r
+(4 rows)
select gp_segment_id, relname, relkind from gp_dist_random('pg_class') where
relkind in ('r', 'p') and relname like 'sales%' and relfrozenxid=0;
- gp_segment_id | relname | relkind
----------------+---------+---------
- 0 | sales | p
- 2 | sales | p
- 1 | sales | p
-(3 rows)
+ gp_segment_id | relname | relkind
+---------------+----------------+---------
+ 2 | sales | p
+ 2 | sales_1_prt_aa | r
+ 2 | sales_1_prt_bb | r
+ 2 | sales_1_prt_cc | r
+ 0 | sales | p
+ 0 | sales_1_prt_aa | r
+ 0 | sales_1_prt_bb | r
+ 0 | sales_1_prt_cc | r
+ 1 | sales | p
+ 1 | sales_1_prt_aa | r
+ 1 | sales_1_prt_bb | r
+ 1 | sales_1_prt_cc | r
+(12 rows)
alter table sales add column tax float;
-- root partition (and only root) continues to have relfrozenxid as 0
select relname, relkind from pg_class where relkind in ('r', 'p') and relname
like 'sales%' and relfrozenxid=0;
- relname | relkind
----------+---------
- sales | p
-(1 row)
+ relname | relkind
+----------------+---------
+ sales | p
+ sales_1_prt_aa | r
+ sales_1_prt_bb | r
+ sales_1_prt_cc | r
+(4 rows)
select gp_segment_id, relname, relkind from gp_dist_random('pg_class') where
relkind in ('r', 'p') and relname like 'sales%' and relfrozenxid=0;
- gp_segment_id | relname | relkind
----------------+---------+---------
- 0 | sales | p
- 1 | sales | p
- 2 | sales | p
-(3 rows)
+ gp_segment_id | relname | relkind
+---------------+----------------+---------
+ 2 | sales | p
+ 2 | sales_1_prt_aa | r
+ 2 | sales_1_prt_bb | r
+ 2 | sales_1_prt_cc | r
+ 0 | sales | p
+ 0 | sales_1_prt_aa | r
+ 0 | sales_1_prt_bb | r
+ 0 | sales_1_prt_cc | r
+ 1 | sales | p
+ 1 | sales_1_prt_aa | r
+ 1 | sales_1_prt_bb | r
+ 1 | sales_1_prt_cc | r
+(12 rows)
alter table sales drop column tax;
create table newpart(like sales);
@@ -5998,7 +6110,7 @@ analyze t_issue_547_aoco;
select count(*) from t_issue_547_aoco;
count
-------
- 1001
+ 0
(1 row)
create table t_issue_547_ao(a int, b int) partition by range(b) (start(1)
end(34) every(1)) using ao_row distributed by (a);
@@ -6007,7 +6119,7 @@ analyze t_issue_547_ao;
select count(*) from t_issue_547_ao;
count
-------
- 1001
+ 0
(1 row)
drop table t_issue_547_aoco;
diff --git
a/contrib/pax_storage/src/test/regress/expected/partition_locking.out
b/contrib/pax_storage/src/test/regress/expected/partition_locking.out
index 15b0784d117..7bd6b19001e 100644
--- a/contrib/pax_storage/src/test/regress/expected/partition_locking.out
+++ b/contrib/pax_storage/src/test/regress/expected/partition_locking.out
@@ -215,7 +215,25 @@ select * from locktest_master where coalesce not like
'gp_%' and coalesce not li
dropped table | AccessExclusiveLock | relation | master
dropped table | AccessExclusiveLock | relation | master
dropped table | AccessExclusiveLock | relation | master
-(28 rows)
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+(46 rows)
select * from locktest_segments where coalesce not like 'gp_%' and coalesce
not like 'pg_%';
coalesce | mode | locktype | node
@@ -248,7 +266,25 @@ select * from locktest_segments where coalesce not like
'gp_%' and coalesce not
dropped table | AccessExclusiveLock | relation | n segments
dropped table | AccessExclusiveLock | relation | n segments
dropped table | AccessExclusiveLock | relation | n segments
-(28 rows)
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+(46 rows)
commit;
-- AO table (ao segments, block directory won't exist after create)
@@ -576,7 +612,25 @@ select * from locktest_master where coalesce not like
'gp_%' and coalesce not li
dropped table | AccessExclusiveLock | relation | master
dropped table | AccessExclusiveLock | relation | master
dropped table | AccessExclusiveLock | relation | master
-(38 rows)
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+ dropped table | AccessExclusiveLock | relation | master
+(56 rows)
select * from locktest_segments where coalesce not like 'gp_%' and coalesce
not like 'pg_%';
coalesce | mode | locktype | node
@@ -619,6 +673,24 @@ select * from locktest_segments where coalesce not like
'gp_%' and coalesce not
dropped table | AccessExclusiveLock | relation | n segments
dropped table | AccessExclusiveLock | relation | n segments
dropped table | AccessExclusiveLock | relation | n segments
-(38 rows)
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+ dropped table | AccessExclusiveLock | relation | n segments
+(56 rows)
commit;
diff --git a/contrib/pax_storage/src/test/regress/expected/pg_stat.out
b/contrib/pax_storage/src/test/regress/expected/pg_stat.out
index d202ca7090c..aa6a364c840 100644
--- a/contrib/pax_storage/src/test/regress/expected/pg_stat.out
+++ b/contrib/pax_storage/src/test/regress/expected/pg_stat.out
@@ -72,7 +72,7 @@ select
from gp_stat_all_tables_summary where relname = 'pg_stat_test';
schemaname | relname | seq_scan | seq_tup_read | idx_scan |
idx_tup_fetch | n_tup_ins | n_tup_upd | n_tup_del | n_tup_hot_upd | n_live_tup
| n_dead_tup | n_mod_since_analyze
------------+--------------+----------+--------------+----------+---------------+-----------+-----------+-----------+---------------+------------+------------+---------------------
- public | pg_stat_test | 12 | 391 | 1 |
0 | 110 | 0 | 19 | 0 | 91 | 19
| 129
+ public | pg_stat_test | 15 | 391 | 1 |
0 | 110 | 0 | 19 | 0 | 91 | 19
| 129
(1 row)
select
@@ -81,7 +81,7 @@ select
from gp_stat_user_tables_summary where relname = 'pg_stat_test';
schemaname | relname | seq_scan | seq_tup_read | idx_scan |
idx_tup_fetch | n_tup_ins | n_tup_upd | n_tup_del | n_tup_hot_upd | n_live_tup
| n_dead_tup | n_mod_since_analyze
------------+--------------+----------+--------------+----------+---------------+-----------+-----------+-----------+---------------+------------+------------+---------------------
- public | pg_stat_test | 12 | 391 | 1 |
0 | 110 | 0 | 19 | 0 | 91 | 19
| 129
+ public | pg_stat_test | 15 | 391 | 1 |
0 | 110 | 0 | 19 | 0 | 91 | 19
| 129
(1 row)
select
diff --git
a/contrib/pax_storage/src/test/regress/expected/pgstat_qd_tabstat.out
b/contrib/pax_storage/src/test/regress/expected/pgstat_qd_tabstat.out
index bbac9a662a4..8a160f1ebce 100644
--- a/contrib/pax_storage/src/test/regress/expected/pgstat_qd_tabstat.out
+++ b/contrib/pax_storage/src/test/regress/expected/pgstat_qd_tabstat.out
@@ -77,7 +77,7 @@ select n_tup_ins, n_tup_upd, n_tup_del, n_tup_hot_upd,
n_live_tup, n_dead_tup, n
(1 row)
-- Test pgstat table stat in CTAS on QD
-create table table_for_ctas with (autovacuum_enabled=false) as select i,
'hello' || i from generate_series(1, 100) f(i);
+create table table_for_ctas as select i, 'hello' || i from generate_series(1,
100) f(i);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named
'i' as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make
sure column(s) chosen are the optimal data distribution key to minimize skew.
select gp_stat_force_next_flush();
diff --git a/contrib/pax_storage/src/test/regress/expected/rowsecurity.out
b/contrib/pax_storage/src/test/regress/expected/rowsecurity.out
index edbb1900f1f..1a8567fa0ad 100644
--- a/contrib/pax_storage/src/test/regress/expected/rowsecurity.out
+++ b/contrib/pax_storage/src/test/regress/expected/rowsecurity.out
@@ -1990,12 +1990,11 @@ EXPLAIN (COSTS OFF) DELETE FROM t1 WHERE f_leak(b);
DELETE FROM only t1 WHERE f_leak(b) RETURNING tableoid::regclass, *, t1;
NOTICE: f_leak => bbbbbb_updt
-ERROR: not implemented yet on pax relations: TupleFetchRowVersion (seg0
slice1 127.0.1.1:7002 pid=1173366)
+ERROR: not implemented yet on pax relations: TupleFetchRowVersion
DELETE FROM t1 WHERE f_leak(b) RETURNING tableoid::regclass, *, t1;
-NOTICE: f_leak => bcdbcd
NOTICE: f_leak => bbbbbb_updt
NOTICE: f_leak => defdef
-ERROR: not implemented yet on pax relations: TupleFetchRowVersion (seg0
slice1 127.0.1.1:7002 pid=1173366)
+ERROR: not implemented yet on pax relations: TupleFetchRowVersion
--
-- S.b. view on top of Row-level security
--
diff --git a/contrib/pax_storage/src/test/regress/expected/temp_tablespaces.out
b/contrib/pax_storage/src/test/regress/expected/temp_tablespaces.out
index a1bff94d669..8754d831fb4 100644
--- a/contrib/pax_storage/src/test/regress/expected/temp_tablespaces.out
+++ b/contrib/pax_storage/src/test/regress/expected/temp_tablespaces.out
@@ -2,11 +2,7 @@
\set temp_tablespace :abs_builddir '/testtablespace_temp_tablespace'
\set dddefault_tablepace :abs_builddir '/testtablespace_default_tablespace'
create tablespace some_temp_tablespace location :'temp_tablespace';
-WARNING: tablespace location
"/home/gpadmin/cloudberry/contrib/pax_storage/src/test/regress/testtablespace_temp_tablespace"
is too long for TAR
-DETAIL: The location is used to create a symlink target from pg_tblspc.
Symlink targets are truncated to 100 characters when sending a TAR (e.g the
BASE_BACKUP protocol response).
create tablespace some_default_tablespace location :'dddefault_tablepace';
-WARNING: tablespace location
"/home/gpadmin/cloudberry/contrib/pax_storage/src/test/regress/testtablespace_default_tablespace"
is too long for TAR
-DETAIL: The location is used to create a symlink target from pg_tblspc.
Symlink targets are truncated to 100 characters when sending a TAR (e.g the
BASE_BACKUP protocol response).
-- Given I've set up GUCS for how to use tablespaces
set temp_tablespaces to some_temp_tablespace;
set default_tablespace to 'some_default_tablespace';
diff --git a/contrib/pax_storage/src/test/regress/greenplum_schedule
b/contrib/pax_storage/src/test/regress/greenplum_schedule
index b3da298c8cc..41abe5478bc 100755
--- a/contrib/pax_storage/src/test/regress/greenplum_schedule
+++ b/contrib/pax_storage/src/test/regress/greenplum_schedule
@@ -149,7 +149,7 @@ test: instr_in_shmem_verify
# hold locks.
test: partition_locking
test: vacuum_gp
-test: resource_queue_stat
+# test: resource_queue_stat
# background analyze may affect pgstat
test: pg_stat
# test: qp_misc_rio [not support TupleFetchRowVersion]
diff --git a/contrib/pax_storage/src/test/regress/sql/am_encoding.sql
b/contrib/pax_storage/src/test/regress/sql/am_encoding.sql
index 1c5295059c2..4a858fdf97d 100644
--- a/contrib/pax_storage/src/test/regress/sql/am_encoding.sql
+++ b/contrib/pax_storage/src/test/regress/sql/am_encoding.sql
@@ -282,8 +282,8 @@ drop table t1_type_int33_aoco;
-- test no implement am encoding callback table still can use relation WITH
option
-CREATE TABLE t1_heap (a int) WITH (autovacuum_enabled=true,
autovacuum_analyze_scale_factor=0.3, fillfactor=32);
-CREATE TABLE t2_heap (a int) WITH (autovacuum_enabled=true,
autovacuum_analyze_scale_factor=0.3, fillfactor=32);
+CREATE TABLE t1_heap (a int);
+CREATE TABLE t2_heap (a int);
drop table t1_heap;
drop table t2_heap;
diff --git
a/contrib/pax_storage/src/test/regress/sql/distributed_transactions.sql
b/contrib/pax_storage/src/test/regress/sql/distributed_transactions.sql
index 6e4ee9aee1a..bee2f8d5880 100644
--- a/contrib/pax_storage/src/test/regress/sql/distributed_transactions.sql
+++ b/contrib/pax_storage/src/test/regress/sql/distributed_transactions.sql
@@ -425,12 +425,12 @@ BEGIN;
set enable_seqscan=false;
set enable_indexscan=true;
set enable_bitmapscan=true;
-Create table subt_reindex_heap (i int, x text, n numeric, b box)
+Create table subt_reindex_pax (i int, x text, n numeric, b box)
distributed by (i);
-Create index bt_ri_heap on subt_reindex_heap (x);
-Create index bm_ri_heap on subt_reindex_heap using bitmap (n);
-Create index gist_ri_heap on subt_reindex_heap using gist (b);
-Create Unique index unique_ri_heap on subt_reindex_heap (i);
+Create index bt_ri_pax on subt_reindex_pax (x);
+Create index bm_ri_pax on subt_reindex_pax using bitmap (n);
+-- Create index gist_ri_pax on subt_reindex_pax using gist (b);
+Create Unique index unique_ri_pax on subt_reindex_pax (i);
Create table subt_reindex_ao (i int, x text, n numeric, b box)
with(appendonly=true) distributed by (i);
Create index bt_ri_ao on subt_reindex_ao (x);
@@ -442,32 +442,32 @@ Create index bt_ri_co on subt_reindex_co (x);
Create index bm_ri_co on subt_reindex_co using bitmap (n);
Create index gist_ri_co on subt_reindex_co using gist (b);
savepoint sp1;
-Insert into subt_reindex_heap select i, 'heap '||i, 2,
+Insert into subt_reindex_pax select i, 'heap '||i, 2,
('(0,'||i||', 1,'||i+1||')')::box from generate_series(1,5)i;
Insert into subt_reindex_ao select i, 'AO '||i, 2,
('(0,'||i||', 1,'||i+1||')')::box from generate_series(1,5)i;
Insert into subt_reindex_co select i, 'CO '||i, 2,
('(0,'||i||', 1,'||i+1||')')::box from generate_series(1,5)i;
savepoint sp2; -- child of sp1
-Insert into subt_reindex_heap values
+Insert into subt_reindex_pax values
(6, 'heap 6', 3, '((0,0), (1,1))');
Insert into subt_reindex_ao values
(5, 'AO 5', 3, '((0,0), (1,1))');
Insert into subt_reindex_co values
(5, 'CO 5', 3, '((0,0), (1,1))');
-update subt_reindex_heap set n = -i where n = 3;
+update subt_reindex_pax set n = -i where n = 3;
update subt_reindex_ao set n = -i where n = 3;
update subt_reindex_co set n = -i where n = 3;
savepoint sp3; -- child of sp2;
-REINDEX index bm_ri_heap;
+REINDEX index bm_ri_pax;
REINDEX index bm_ri_ao;
REINDEX index bm_ri_co;
-select count(*) = 1 as passed from subt_reindex_heap where n < 0;
+select count(*) = 1 as passed from subt_reindex_pax where n < 0;
select count(*) = 1 as passed from subt_reindex_ao where n < 0;
select count(*) = 1 as passed from subt_reindex_co where n < 0;
release savepoint sp3; -- commit sp3
savepoint sp4; -- child of sp2
-REINDEX index unique_ri_heap;
+REINDEX index unique_ri_pax;
REINDEX index bt_ri_ao;
REINDEX index bm_ri_ao;
REINDEX index gist_ri_ao;
@@ -475,25 +475,25 @@ REINDEX index bt_ri_co;
REINDEX index bm_ri_co;
REINDEX index gist_ri_co;
savepoint sp5; -- child of sp4
-select count(*) = 1 as passed from subt_reindex_heap where x = 'heap 2';
+select count(*) = 1 as passed from subt_reindex_pax where x = 'heap 2';
select count(*) = 1 as passed from subt_reindex_ao where x = 'AO 3';
select count(*) = 1 as passed from subt_reindex_co where x = 'CO 4';
select 0/0;
rollback to sp4;
-select count(*) = 1 as passed from subt_reindex_heap where i = 1;
+select count(*) = 1 as passed from subt_reindex_pax where i = 1;
select count(*) = 2 as passed from subt_reindex_ao where i = 5;
select count(*) = 2 as passed from subt_reindex_co where i = 5;
-update subt_reindex_heap set x = 'heap sp4', b = '((1,1),(4,4))'
+update subt_reindex_pax set x = 'heap sp4', b = '((1,1),(4,4))'
where i = 2;
update subt_reindex_ao set x = 'AO sp4', b = '((1,1),(4,4))'
where i = 2;
update subt_reindex_co set x = 'CO sp4', b = '((1,1),(4,4))'
where i = 2;
savepoint sp6; -- child of sp4
-REINDEX index bt_ri_heap;
-REINDEX index bm_ri_heap;
-REINDEX index gist_ri_heap;
-REINDEX index unique_ri_heap;
+REINDEX index bt_ri_pax;
+REINDEX index bm_ri_pax;
+-- REINDEX index gist_ri_pax;
+REINDEX index unique_ri_pax;
REINDEX index bt_ri_ao;
REINDEX index bt_ri_ao;
REINDEX index gist_ri_ao;
@@ -501,7 +501,7 @@ REINDEX index bt_ri_co;
REINDEX index bt_ri_co;
REINDEX index gist_ri_co;
release savepoint sp6;
-select count(*) = 1 as passed from subt_reindex_heap
+select count(*) = 1 as passed from subt_reindex_pax
where b = '((1,1), (4,4))';
select count(*) = 1 as passed from subt_reindex_ao
where b = '((1,1), (4,4))';
@@ -510,7 +510,7 @@ select count(*) = 1 as passed from subt_reindex_co
rollback to sp2;
-select count(*) = 5 as passed from subt_reindex_heap
+select count(*) = 5 as passed from subt_reindex_pax
where n = 2;
select count(*) = 5 as passed from subt_reindex_ao
where n = 2;
@@ -521,11 +521,11 @@ select count(*) = 0 as passed from subt_reindex_ao
-- truncate cases
savepoint sp7; -- child of sp2
-truncate subt_reindex_heap;
+truncate subt_reindex_pax;
truncate subt_reindex_ao;
savepoint sp8; -- child of sp7
truncate subt_reindex_co;
-select count(*) = 0 as passed from subt_reindex_heap where i < 7;
+select count(*) = 0 as passed from subt_reindex_pax where i < 7;
select count(*) = 0 as passed from subt_reindex_ao where i < 6;
select count(*) = 0 as passed from subt_reindex_co where i < 6;
rollback to sp8;
@@ -538,7 +538,7 @@ rollback to sp2;
COMMIT;
-select count(*) = 5 as passed from subt_reindex_heap;
+select count(*) = 5 as passed from subt_reindex_pax;
select count(*) = 5 as passed from subt_reindex_ao;
select count(*) = 5 as passed from subt_reindex_co;
@@ -602,96 +602,3 @@ end;
reset test_print_direct_dispatch_info;
reset optimizer;
select count(gp_segment_id) from distxact1_4 group by gp_segment_id; -- sanity
check: tuples should be in > 1 segments
-
--- Tests for AND CHAIN
-CREATE TABLE abc (a int);
-
--- set nondefault value so we have something to override below
-SET default_transaction_read_only = on;
-
-START TRANSACTION ISOLATION LEVEL REPEATABLE READ, READ WRITE, DEFERRABLE;
-SHOW transaction_isolation;
-SHOW transaction_read_only;
-SHOW transaction_deferrable;
-INSERT INTO abc VALUES (1);
-INSERT INTO abc VALUES (2);
-COMMIT AND CHAIN; -- TBLOCK_END
-SHOW transaction_isolation;
-SHOW transaction_read_only;
-SHOW transaction_deferrable;
-INSERT INTO abc VALUES ('error');
-INSERT INTO abc VALUES (3); -- check it's really aborted
-COMMIT AND CHAIN; -- TBLOCK_ABORT_END
-SHOW transaction_isolation;
-SHOW transaction_read_only;
-SHOW transaction_deferrable;
-INSERT INTO abc VALUES (4);
-COMMIT;
-
-START TRANSACTION ISOLATION LEVEL REPEATABLE READ, READ WRITE, DEFERRABLE;
-SHOW transaction_isolation;
-SHOW transaction_read_only;
-SHOW transaction_deferrable;
-SAVEPOINT x;
-INSERT INTO abc VALUES ('error');
-
-COMMIT AND CHAIN; -- TBLOCK_ABORT_PENDING
-SHOW transaction_isolation;
-SHOW transaction_read_only;
-SHOW transaction_deferrable;
-INSERT INTO abc VALUES (5);
-COMMIT;
-
-START TRANSACTION ISOLATION LEVEL REPEATABLE READ, READ WRITE, DEFERRABLE;
-SHOW transaction_isolation;
-SHOW transaction_read_only;
-SHOW transaction_deferrable;
-SAVEPOINT x;
-COMMIT AND CHAIN; -- TBLOCK_SUBCOMMIT
-SHOW transaction_isolation;
-SHOW transaction_read_only;
-SHOW transaction_deferrable;
-COMMIT;
-
--- not allowed outside a transaction block
-COMMIT AND CHAIN; -- error
-ROLLBACK AND CHAIN; -- error
-
-SELECT * FROM abc ORDER BY 1;
-
-RESET default_transaction_read_only;
-
-DROP TABLE abc;
-
--- Explicit transaction block will send Distributed Commit, even if there is
only SET command in it.
--- On the other hand, implicit transaction block involving only SET command
will not send it.
-create table tbl_dtx(a int, b int) distributed by (a);
-insert into tbl_dtx values(1,1);
-
-create or replace function dtx_set_bug()
- returns void
- language plpgsql
-as $function$
-begin
- execute 'update tbl_dtx set b = 1 where a = 1;';
- set optimizer=off;
-end;
-$function$;
-
-set Test_print_direct_dispatch_info = true;
-
--- 1. explicit BEGIN/END
-begin;
-set optimizer=false;
-end;
-
--- 2. implicit transaction block with just SET
-set optimizer=false;
-
--- 3. still implicit transaction block, but with UPDATE that will send DTX
protocol command to *some* QEs
--- due to direct dispatch. Planner needs to be used for direct dispatch here.
--- This is to verify that the QEs that are not involved in the UDPATE won't
receive DTX protocol command
--- that they are not supposed to see.
-select dtx_set_bug();
-
-reset Test_print_direct_dispatch_info;
diff --git a/contrib/pax_storage/src/test/regress/sql/gp_runtime_filter.sql
b/contrib/pax_storage/src/test/regress/sql/gp_runtime_filter.sql
index 628ab368592..685f15e3d59 100644
--- a/contrib/pax_storage/src/test/regress/sql/gp_runtime_filter.sql
+++ b/contrib/pax_storage/src/test/regress/sql/gp_runtime_filter.sql
@@ -79,156 +79,6 @@ SELECT COUNT(*) FROM fact_rf
SELECT COUNT(*) FROM dim_rf
WHERE dim_rf.did IN (SELECT did FROM fact_rf) AND proj_id < 2;
--- Test bloom filter pushdown
-SET enable_parallel TO off;
-
--- case 1: join on distribution table and replicated table.
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t2;
-CREATE TABLE t1(c1 int, c2 int, c3 int, c4 int, c5 int) with (appendonly=true,
orientation=column) distributed by (c1);
-CREATE TABLE t2(c1 int, c2 int, c3 int, c4 int, c5 int) with (appendonly=true,
orientation=column) distributed REPLICATED;
-
-INSERT INTO t1 VALUES (5,5,5,5,5);
-INSERT INTO t2 VALUES (1,1,1,1,1), (2,2,2,2,2), (3,3,3,3,3), (4,4,4,4,4);
-
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-
-INSERT INTO t2 select * FROM t2;
-INSERT INTO t2 select * FROM t2;
-INSERT INTO t2 select * FROM t2;
-
-ANALYZE;
-
-EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
-SELECT t1.c3 FROM t1, t2 WHERE t1.c2 = t2.c2;
-
-SET gp_enable_runtime_filter_pushdown TO on;
-EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
-SELECT t1.c3 FROM t1, t2 WHERE t1.c2 = t2.c2;
-
-RESET gp_enable_runtime_filter_pushdown;
-
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t2;
-
--- case 2: join on partition table and replicated table.
-CREATE TABLE t1 (c1 INT, c2 INT) DISTRIBUTED BY (c1) PARTITION BY RANGE (c2)
(START (1) END (100) EVERY (50));
-CREATE TABLE t2 (c1 INT, c2 INT) DISTRIBUTED REPLICATED;
-INSERT INTO t1 SELECT generate_series(1, 99), generate_series(1, 99);
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t2 SELECT generate_series(1, 5), generate_series(1, 5);
-INSERT INTO t2 SELECT generate_series(51, 51), generate_series(51, 51);
-ANALYZE;
-
-EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
-SELECT * FROM t1, t2 WHERE t1.c2 = t2.c2;
-
-SET gp_enable_runtime_filter_pushdown TO on;
-
-EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
-SELECT * FROM t1, t2 WHERE t1.c2 = t2.c2;
-
-RESET gp_enable_runtime_filter_pushdown;
-
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t2;
-
--- case 3: bug fix with explain
-DROP TABLE IF EXISTS test_tablesample1;
-CREATE TABLE test_tablesample1 (dist int, id int, name text) WITH
(fillfactor=10) DISTRIBUTED BY (dist);
-INSERT INTO test_tablesample1 SELECT 0, i, repeat(i::text, 875) FROM
generate_series(0, 9) s(i) ORDER BY i;
-INSERT INTO test_tablesample1 SELECT 3, i, repeat(i::text, 875) FROM
generate_series(10, 19) s(i) ORDER BY i;
-INSERT INTO test_tablesample1 SELECT 5, i, repeat(i::text, 875) FROM
generate_series(20, 29) s(i) ORDER BY i;
-
-SET gp_enable_runtime_filter_pushdown TO on;
-EXPLAIN (COSTS OFF) SELECT id FROM test_tablesample1 TABLESAMPLE SYSTEM (50)
REPEATABLE (2);
-RESET gp_enable_runtime_filter_pushdown;
-
-DROP TABLE IF EXISTS test_tablesample1;
-
--- case 4: show debug info only when gp_enable_runtime_filter_pushdown is on
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t2;
-CREATE TABLE t1(c1 int, c2 int);
-CREATE TABLE t2(c1 int, c2 int);
-INSERT INTO t1 SELECT GENERATE_SERIES(1, 1000), GENERATE_SERIES(1, 1000);
-INSERT INTO t2 SELECT * FROM t1;
-
-SET gp_enable_runtime_filter_pushdown TO on;
-EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT count(t1.c2) FROM
t1, t2 WHERE t1.c1 = t2.c1;
-RESET gp_enable_runtime_filter_pushdown;
-
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t2;
-
--- case 5: hashjoin + result + seqsacn
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t2;
-CREATE TABLE t1(c1 int, c2 int, c3 char(50), c4 char(50), c5 char(50))
DISTRIBUTED REPLICATED;
-CREATE TABLE t2(c1 int, c2 int, c3 char(50), c4 char(50), c5 char(50));
-INSERT INTO t1 VALUES (5,5,5,5,5), (3,3,3,3,3), (4,4,4,4,4);
-INSERT INTO t2 VALUES (1,1,1,1,1), (2,2,2,2,2), (3,3,3,3,3), (4,4,4,4,4);
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t2 select * FROM t2;
-ANALYZE;
-
--- MERGE16_FIXME: enable these tests after the fix of orca
--- SET optimizer TO on;
--- SET gp_enable_runtime_filter_pushdown TO off;
--- EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT t1.c3 FROM t1,
t2 WHERE t1.c1 = t2.c1;
---
--- SET gp_enable_runtime_filter_pushdown TO on;
--- EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT t1.c3 FROM t1,
t2 WHERE t1.c1 = t2.c1;
---
--- RESET gp_enable_runtime_filter_pushdown;
-
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t2;
-
--- case 6: hashjoin + hashjoin + seqscan
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t2;
-DROP TABLE IF EXISTS t3;
-CREATE TABLE t1(c1 int, c2 int, c3 char(50), c4 char(50), c5 char(50))
DISTRIBUTED REPLICATED;
-CREATE TABLE t2(c1 int, c2 int, c3 char(50), c4 char(50), c5 char(50))
DISTRIBUTED REPLICATED;
-CREATE TABLE t3(c1 int, c2 int, c3 char(50), c4 char(50), c5 char(50))
DISTRIBUTED REPLICATED;
-INSERT INTO t1 VALUES (1,1,1,1,1), (2,2,2,2,2), (5,5,5,5,5);
-INSERT INTO t2 VALUES (1,1,1,1,1), (2,2,2,2,2), (3,3,3,3,3), (4,4,4,4,4);
-INSERT INTO t3 VALUES (1,1,1,1,1), (2,2,2,2,2), (3,3,3,3,3), (4,4,4,4,4);
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t2 select * FROM t2;
-INSERT INTO t2 select * FROM t2;
-INSERT INTO t3 select * FROM t3;
-ANALYZE;
-
-SET optimizer TO off;
-SET gp_enable_runtime_filter_pushdown TO off;
-EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM t1, t2, t3
WHERE t1.c1 = t2.c1 AND t1.c2 = t3.c2;
-
-SET gp_enable_runtime_filter_pushdown TO on;
-EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT * FROM t1, t2, t3
WHERE t1.c1 = t2.c1 AND t1.c2 = t3.c2;
-
-RESET gp_enable_runtime_filter_pushdown;
-
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t2;
-DROP TABLE IF EXISTS t3;
-
-RESET enable_parallel;
-- Clean up: reset guc
SET gp_enable_runtime_filter TO off;
diff --git a/contrib/pax_storage/src/test/regress/sql/gpcopy.sql
b/contrib/pax_storage/src/test/regress/sql/gpcopy.sql
index 50cafa49938..eb4e46dd269 100644
--- a/contrib/pax_storage/src/test/regress/sql/gpcopy.sql
+++ b/contrib/pax_storage/src/test/regress/sql/gpcopy.sql
@@ -1330,8 +1330,6 @@ DROP DATABASE IF EXISTS "funny_copy""db'with\\quotes";
reset client_min_messages;
CREATE DATABASE "funny_copy""db'with\\quotes";
-\! python3 test_dbconn.py 1
-
\c "funny_copy""db'with\\quotes"
-- echo will behave differently on different platforms, force to use bash with
-E option
COPY (SELECT 'data1') TO PROGRAM 'cat > /tmp/gpcopyenvtest; /usr/bin/env bash
-c ''echo -E database in COPY TO: $GP_DATABASE >> /tmp/gpcopyenvtest '' '
ESCAPE 'OFF';
diff --git a/contrib/pax_storage/src/test/regress/sql/pgstat_qd_tabstat.sql
b/contrib/pax_storage/src/test/regress/sql/pgstat_qd_tabstat.sql
index b93d9def221..746af4b86f4 100644
--- a/contrib/pax_storage/src/test/regress/sql/pgstat_qd_tabstat.sql
+++ b/contrib/pax_storage/src/test/regress/sql/pgstat_qd_tabstat.sql
@@ -38,7 +38,7 @@ select n_tup_ins, n_tup_upd, n_tup_del, n_tup_hot_upd,
n_live_tup, n_dead_tup, n
-- Test pgstat table stat in CTAS on QD
-create table table_for_ctas with (autovacuum_enabled=false) as select i,
'hello' || i from generate_series(1, 100) f(i);
+create table table_for_ctas as select i, 'hello' || i from generate_series(1,
100) f(i);
select gp_stat_force_next_flush();
select n_tup_ins, n_tup_upd, n_tup_del, n_tup_hot_upd, n_live_tup, n_dead_tup,
n_mod_since_analyze from gp_stat_all_tables_summary where relid =
'table_for_ctas'::regclass;
select i, 'hello' || i into table_for_insert_into from generate_series(1, 100)
f(i);
diff --git a/contrib/pax_storage/src/test/regress/sql/resource_queue.sql
b/contrib/pax_storage/src/test/regress/sql/resource_queue.sql
index 7dc6611f932..e2b8d3089c9 100644
--- a/contrib/pax_storage/src/test/regress/sql/resource_queue.sql
+++ b/contrib/pax_storage/src/test/regress/sql/resource_queue.sql
@@ -415,23 +415,3 @@ SELECT * FROM rq_test_oosm_table;
DROP TABLE rq_test_oosm_table;
RESET ROLE;
DROP ROLE rq_test_oosm_role;
--- test for extended queries
--- create a role that only use in this test will drop it later
--- later we will use username to identify the backend process
-create role extend_protocol_requeue_role with login;
-
--- start_matchsubs
---
--- m/NOTICE: query requested \d+/
--- s/NOTICE: query requested \d+/NOTICE: query requested XXX/g
---
--- m/NOTICE: SPI memory reservation \d+/
--- s/NOTICE: SPI memory reservation \d+/NOTICE: SPI memory reservation /g
---
--- end_matchsubs
-
--- run query using non_superuser role so that it can be
--- controled by resource queue
-\! ./extended_protocol_resqueue dbname=regression extend_protocol_requeue_role;
-
-drop role extend_protocol_requeue_role;
diff --git a/contrib/pax_storage/src/test/regress/test_dbconn.py
b/contrib/pax_storage/src/test/regress/test_dbconn.py
new file mode 100644
index 00000000000..53eddd5e2cc
--- /dev/null
+++ b/contrib/pax_storage/src/test/regress/test_dbconn.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python3
+#-*- coding: utf-8 -*-
+
+# This is just to test we can use dbconn to connect a database
+# whose name contains special chars. This test needs to have
+# a cluster running so seems not easy under unittest. Previously,
+# I try to create a UDF to test it udner regress test, however,
+# import dbconn, its dependency package will import sys and access
+# argv, which is not allowed in plpython. So finally, I create
+# the python script in this directory, and use \! to run it and
+# test in regress/dispatch and regress/gpcopy.
+
+import sys
+from gppylib.db import dbconn
+
+dbnames = ['funny\"db\'with\\\\quotes', # from regress/dispatch
+ 'funny_copy\"db\'with\\\\quotes'# from regress/gpcopy
+]
+
+def test_connect_special_dbname(dbname):
+ url = dbconn.DbURL(dbname=dbname)
+ conn = dbconn.connect(url)
+ count = dbconn.querySingleton(conn, "select 1")
+ result = (count == 1)
+ conn.close()
+
+
+if __name__ == "__main__":
+ dbname = dbnames[int(sys.argv[1])]
+ test_connect_special_dbname(dbname)
diff --git a/src/include/utils/tarrable.h b/src/include/utils/tarrable.h
index 1540fa5a70b..704e0b42052 100644
--- a/src/include/utils/tarrable.h
+++ b/src/include/utils/tarrable.h
@@ -14,7 +14,7 @@
#ifndef TARRABLE_H
#define TARRABLE_H
-#define MAX_TARABLE_SYMLINK_PATH_LENGTH 100
+#define MAX_TARABLE_SYMLINK_PATH_LENGTH 200
#endif /* TARRABLE_H */
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]