This is an automated email from the ASF dual-hosted git repository.
chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git
The following commit(s) were added to refs/heads/cbdb-postgres-merge by this
push:
new d3848806994 Fix some answer file for pax
d3848806994 is described below
commit d3848806994a0734cb105c0efeb98dba7d65a3b8
Author: Jinbao Chen <[email protected]>
AuthorDate: Sat Mar 28 00:08:12 2026 -0400
Fix some answer file for pax
---
.../src/test/regress/expected/agg_pushdown.out | 86 +++++++++++-----------
.../src/test/regress/expected/analyze.out | 8 +-
.../src/test/regress/expected/appendonly.out | 1 -
.../test/regress/expected/create_table_like_gp.out | 8 +-
.../src/test/regress/expected/cursor.out | 12 ++-
.../src/test/regress/expected/gp_array_agg.out | 2 +-
.../src/test/regress/expected/gp_dqa.out | 12 +--
.../src/test/regress/expected/gp_explain.out | 2 +-
.../src/test/regress/expected/gp_hashagg.out | 8 +-
.../src/test/regress/expected/gpdist_opclasses.out | 8 +-
.../test/regress/expected/incremental_analyze.out | 6 +-
.../src/test/regress/expected/join_gp.out | 21 +++---
.../src/test/regress/expected/motion_gp.out | 10 +--
.../src/test/regress/expected/subselect_gp.out | 2 +-
14 files changed, 92 insertions(+), 94 deletions(-)
diff --git a/contrib/pax_storage/src/test/regress/expected/agg_pushdown.out
b/contrib/pax_storage/src/test/regress/expected/agg_pushdown.out
index 8e6b8e478ba..3e3b045234c 100644
--- a/contrib/pax_storage/src/test/regress/expected/agg_pushdown.out
+++ b/contrib/pax_storage/src/test/regress/expected/agg_pushdown.out
@@ -89,9 +89,9 @@ AS c1 ON c1.parent = p.i GROUP BY p.i;
Output: c1.parent, PARTIAL avg(c1.v)
Group Key: c1.parent
-> Seq Scan on public.agg_pushdown_child1 c1
- Output: c1.j, c1.parent, c1.v
+ Output: c1.v, c1.parent
-> Index Scan using agg_pushdown_parent_pkey on
public.agg_pushdown_parent p
- Output: p.i, p.x
+ Output: p.x, p.i
Index Cond: (p.i = c1.parent)
Settings: enable_hashjoin = 'off', enable_mergejoin = 'off', enable_nestloop
= 'on', gp_enable_agg_pushdown = 'on', optimizer = 'off'
Optimizer: Postgres query optimizer
@@ -129,8 +129,7 @@ AS c1 ON c1.parent = p.i GROUP BY p.i;
Output: c1.parent, PARTIAL avg(c1.v)
Group Key: c1.parent
-> Seq Scan on
public.agg_pushdown_child1 c1
- Output: c1.j, c1.parent, c1.v
- Settings: enable_hashjoin = 'on', enable_mergejoin = 'off', enable_nestloop =
'off', gp_enable_agg_pushdown = 'on', optimizer = 'off'
+ Output: c1.v, c1.parent
Optimizer: Postgres query optimizer
(27 rows)
@@ -167,8 +166,7 @@ AS c1 ON c1.parent = p.i GROUP BY p.i;
Output: c1.parent, PARTIAL avg(c1.v)
Group Key: c1.parent
-> Seq Scan on public.agg_pushdown_child1 c1
- Output: c1.j, c1.parent, c1.v
- Settings: enable_hashjoin = 'off', enable_mergejoin = 'on', enable_nestloop =
'off', gp_enable_agg_pushdown = 'on', optimizer = 'off'
+ Output: c1.v, c1.parent
Optimizer: Postgres query optimizer
(28 rows)
@@ -232,7 +230,7 @@ AS c1 ON c1.parent = p.i GROUP BY p.i;
Output: c1.parent, PARTIAL avg(c1.v)
Group Key: c1.parent
-> Index Scan using
agg_pushdown_child1_parent_idx on public.agg_pushdown_child1 c1
- Output: c1.j, c1.parent, c1.v
+ Output: c1.v, c1.parent
-> Index Only Scan using agg_pushdown_parent_pkey on
public.agg_pushdown_parent p
Output: p.i
Index Cond: (p.i = c1.parent)
@@ -268,7 +266,7 @@ AS c1 ON c1.parent = p.x GROUP BY p.i;
Output: p.i, c1.v
Hash Cond: (c1.parent = p.x)
-> Seq Scan on public.agg_pushdown_child1 c1
- Output: c1.j, c1.parent, c1.v
+ Output: c1.v, c1.parent
-> Hash
Output: p.i, p.x
-> Broadcast Motion 3:3 (slice3; segments:
3)
@@ -309,8 +307,8 @@ AS c1 ON c1.parent = p.x GROUP BY p.i;
Output: c1.parent, PARTIAL avg(c1.v)
Group Key: c1.parent
-> Seq Scan on
public.agg_pushdown_child1 c1
- Output: c1.j, c1.parent, c1.v
- Settings: enable_hashjoin = 'on', enable_mergejoin = 'on', enable_nestloop =
'on', enable_seqscan = 'on', gp_enable_agg_pushdown = 'on', optimizer = 'off'
+ Output: c1.v, c1.parent
+ Settings: optimizer = 'off', enable_nestloop = 'on', enable_hashjoin = 'on',
enable_mergejoin = 'on', gp_enable_agg_pushdown = 'on', enable_seqscan = 'on'
Optimizer: Postgres query optimizer
(27 rows)
@@ -341,9 +339,9 @@ c2.parent = p.i WHERE c1.j = c2.k GROUP BY p.i;
Output: c1.v, c1.parent, c2.v, c2.parent
Inner Unique: true
-> Seq Scan on public.agg_pushdown_child1 c1
- Output: c1.j, c1.parent, c1.v
+ Output: c1.v, c1.parent, c1.j
-> Index Scan using agg_pushdown_child2_pkey on
public.agg_pushdown_child2 c2
- Output: c2.k, c2.parent, c2.v
+ Output: c2.v, c2.parent, c2.k
Index Cond: ((c2.k = c1.j) AND (c2.parent =
c1.parent))
-> Memoize
Output: p.i
@@ -384,9 +382,9 @@ c2.parent = p.i WHERE c1.j = c2.k GROUP BY p.i;
Output: c1.v, c1.parent, c2.v, c2.parent
Inner Unique: true
-> Seq Scan on
public.agg_pushdown_child1 c1
- Output: c1.j, c1.parent, c1.v
+ Output: c1.v, c1.parent, c1.j
-> Index Scan using
agg_pushdown_child2_pkey on public.agg_pushdown_child2 c2
- Output: c2.k, c2.parent, c2.v
+ Output: c2.v, c2.parent, c2.k
Index Cond: ((c2.k = c1.j) AND
(c2.parent = c1.parent))
-> Index Only Scan using agg_pushdown_parent_pkey on
public.agg_pushdown_parent p
Output: p.i
@@ -432,7 +430,7 @@ c2.parent = p.i WHERE c1.j = c2.k GROUP BY p.i;
Inner Unique: true
Hash Cond: ((c1.parent =
c2.parent) AND (c1.j = c2.k))
-> Seq Scan on
public.agg_pushdown_child1 c1
- Output: c1.j, c1.parent,
c1.v
+ Output: c1.v, c1.parent,
c1.j
-> Hash
Output: c2.v, c2.parent,
c2.k
-> Seq Scan on
public.agg_pushdown_child2 c2
@@ -474,9 +472,9 @@ c2.parent = p.i WHERE c1.j = c2.k GROUP BY p.i;
Inner Unique: true
Merge Cond: ((c1.j = c2.k) AND
(c1.parent = c2.parent))
-> Index Scan using
agg_pushdown_child1_pkey on public.agg_pushdown_child1 c1
- Output: c1.j, c1.parent, c1.v
+ Output: c1.v, c1.parent, c1.j
-> Index Scan using
agg_pushdown_child2_pkey on public.agg_pushdown_child2 c2
- Output: c2.k, c2.parent, c2.v
+ Output: c2.v, c2.parent, c2.k
-> Index Only Scan using agg_pushdown_parent_pkey on
public.agg_pushdown_parent p
Output: p.i
Settings: enable_hashjoin = 'off', enable_mergejoin = 'on', enable_nestloop =
'off', enable_seqscan = 'off', gp_enable_agg_pushdown = 'on', optimizer = 'off'
@@ -516,15 +514,15 @@ SELECT t1.id, SUM(t1.val) FROM t1, t2 WHERE t1.id = t2.id
GROUP BY t1.id;
Output: t1.id, (PARTIAL sum(t1.val))
Hash Cond: (t2.id = t1.id)
-> Seq Scan on public.t2
- Output: t2.id, t2.val
+ Output: t2.id
-> Hash
Output: t1.id, (PARTIAL sum(t1.val))
-> Partial HashAggregate
Output: t1.id, PARTIAL sum(t1.val)
Group Key: t1.id
-> Seq Scan on public.t1
- Output: t1.id, t1.val, t1.comment
- Settings: enable_hashjoin = 'on', enable_mergejoin = 'off', enable_nestloop =
'off', enable_seqscan = 'on', gp_enable_agg_pushdown = 'on', optimizer = 'off'
+ Output: t1.id, t1.val
+ Settings: optimizer = 'off', enable_nestloop = 'off', enable_hashjoin = 'on',
enable_mergejoin = 'off', gp_enable_agg_pushdown = 'on', enable_seqscan = 'on'
Optimizer: Postgres query optimizer
(19 rows)
@@ -545,15 +543,15 @@ SELECT t1.val, SUM(t1.id) FROM t1, t2 WHERE t1.id = t2.id
GROUP BY t1.val;
Output: t1.val, (PARTIAL sum(t1.id))
Hash Cond: (t2.id = t1.id)
-> Seq Scan on public.t2
- Output: t2.id, t2.val
+ Output: t2.id
-> Hash
Output: t1.val, t1.id, (PARTIAL sum(t1.id))
-> Partial HashAggregate
Output: t1.val, t1.id, PARTIAL sum(t1.id)
Group Key: t1.val, t1.id
-> Seq Scan on public.t1
- Output: t1.id, t1.val, t1.comment
- Settings: enable_hashjoin = 'on', enable_mergejoin = 'off', enable_nestloop =
'off', enable_seqscan = 'on', gp_enable_agg_pushdown = 'on', optimizer = 'off'
+ Output: t1.val, t1.id
+ Settings: optimizer = 'off', enable_nestloop = 'off', enable_hashjoin = 'on',
enable_mergejoin = 'off', gp_enable_agg_pushdown = 'on', enable_seqscan = 'on'
Optimizer: Postgres query optimizer
(22 rows)
@@ -571,15 +569,15 @@ SELECT t2.id, SUM(t1.val) FROM t1, t2 WHERE t1.id = t2.id
GROUP BY t2.id;
Output: t2.id, (PARTIAL sum(t1.val))
Hash Cond: (t2.id = t1.id)
-> Seq Scan on public.t2
- Output: t2.id, t2.val
+ Output: t2.id
-> Hash
Output: t1.id, (PARTIAL sum(t1.val))
-> Partial HashAggregate
Output: t1.id, PARTIAL sum(t1.val)
Group Key: t1.id
-> Seq Scan on public.t1
- Output: t1.id, t1.val, t1.comment
- Settings: enable_hashjoin = 'on', enable_mergejoin = 'off', enable_nestloop =
'off', enable_seqscan = 'on', gp_enable_agg_pushdown = 'on', optimizer = 'off'
+ Output: t1.val, t1.id
+ Settings: optimizer = 'off', enable_nestloop = 'off', enable_hashjoin = 'on',
enable_mergejoin = 'off', gp_enable_agg_pushdown = 'on', enable_seqscan = 'on'
Optimizer: Postgres query optimizer
(19 rows)
@@ -600,15 +598,15 @@ SELECT t2.val, SUM(t1.val) FROM t1, t2 WHERE t1.id =
t2.id GROUP BY t2.val;
Output: t2.val, (PARTIAL sum(t1.val))
Hash Cond: (t2.id = t1.id)
-> Seq Scan on public.t2
- Output: t2.id, t2.val
+ Output: t2.val, t2.id
-> Hash
Output: t1.id, (PARTIAL sum(t1.val))
-> Partial HashAggregate
Output: t1.id, PARTIAL sum(t1.val)
Group Key: t1.id
-> Seq Scan on public.t1
- Output: t1.id, t1.val, t1.comment
- Settings: enable_hashjoin = 'on', enable_mergejoin = 'off', enable_nestloop =
'off', enable_seqscan = 'on', gp_enable_agg_pushdown = 'on', optimizer = 'off'
+ Output: t1.val, t1.id
+ Settings: optimizer = 'off', enable_nestloop = 'off', enable_hashjoin = 'on',
enable_mergejoin = 'off', gp_enable_agg_pushdown = 'on', enable_seqscan = 'on'
Optimizer: Postgres query optimizer
(22 rows)
@@ -626,15 +624,15 @@ SELECT t1.id, t1.comment, SUM(t1.val) FROM t1, t2 WHERE
t1.id = t2.id GROUP BY t
Output: t1.id, t1.comment, (PARTIAL sum(t1.val))
Hash Cond: (t2.id = t1.id)
-> Seq Scan on public.t2
- Output: t2.id, t2.val
+ Output: t2.id
-> Hash
Output: t1.id, t1.comment, (PARTIAL sum(t1.val))
-> Partial HashAggregate
Output: t1.id, t1.comment, PARTIAL sum(t1.val)
Group Key: t1.id, t1.comment
-> Seq Scan on public.t1
- Output: t1.id, t1.val, t1.comment
- Settings: enable_hashjoin = 'on', enable_mergejoin = 'off', enable_nestloop =
'off', enable_seqscan = 'on', gp_enable_agg_pushdown = 'on', optimizer = 'off'
+ Output: t1.id, t1.comment, t1.val
+ Settings: optimizer = 'off', enable_nestloop = 'off', enable_hashjoin = 'on',
enable_mergejoin = 'off', gp_enable_agg_pushdown = 'on', enable_seqscan = 'on'
Optimizer: Postgres query optimizer
(19 rows)
@@ -662,8 +660,8 @@ SELECT t1.id, t1.comment, SUM(t1.val) FROM t1, t2 WHERE
t1.id = t2.id and t1.val
Output: t1.id, t1.comment, t1.val, PARTIAL
sum(t1.val)
Group Key: t1.id, t1.comment, t1.val
-> Seq Scan on public.t1
- Output: t1.id, t1.val, t1.comment
- Settings: enable_hashjoin = 'on', enable_mergejoin = 'off', enable_nestloop =
'off', enable_seqscan = 'on', gp_enable_agg_pushdown = 'on', optimizer = 'off'
+ Output: t1.id, t1.comment, t1.val
+ Settings: optimizer = 'off', enable_nestloop = 'off', enable_hashjoin = 'on',
enable_mergejoin = 'off', gp_enable_agg_pushdown = 'on', enable_seqscan = 'on'
Optimizer: Postgres query optimizer
(22 rows)
@@ -721,8 +719,8 @@ SELECT t1.id, t1.comment, SUM(t1.val) FROM t1, t2 WHERE
t1.id = t2.id and t1.val
Output: t1.id, t1.comment, t1.val, PARTIAL
sum(t1.val)
Group Key: t1.id, t1.comment, t1.val
-> Seq Scan on public.t1
- Output: t1.id, t1.val, t1.comment
- Settings: enable_hashjoin = 'on', enable_mergejoin = 'off', enable_nestloop =
'off', enable_seqscan = 'on', gp_enable_agg_pushdown = 'on', optimizer = 'off'
+ Output: t1.id, t1.comment, t1.val
+ Settings: optimizer = 'off', enable_nestloop = 'off', enable_hashjoin = 'on',
enable_mergejoin = 'off', gp_enable_agg_pushdown = 'on', enable_seqscan = 'on'
Optimizer: Postgres query optimizer
(22 rows)
@@ -730,8 +728,6 @@ SELECT t1.id, t1.comment, SUM(t1.val) FROM t1, t2 WHERE
t1.id = t2.id and t1.val
DROP TABLE t1, t2;
-- Test case group 3: Pushdown in subquery and group from subquery.
DROP TABLE IF EXISTS part, lineitem;
-NOTICE: table "part" does not exist, skipping
-NOTICE: table "lineitem" does not exist, skipping
CREATE TABLE part (p_partkey int, p_size int, p_price int);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named
'p_partkey' as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make
sure column(s) chosen are the optimal data distribution key to minimize skew.
@@ -778,7 +774,7 @@ SELECT SUM(slp) FROM
Output: lineitem.l_partkey,
(PARTIAL sum(part.p_price))
Hash Cond:
(lineitem.l_partkey = part.p_partkey)
-> Seq Scan on
public.lineitem
- Output:
lineitem.l_orderkey, lineitem.l_partkey, lineitem.l_amount
+ Output:
lineitem.l_partkey
-> Hash
Output:
part.p_partkey, (PARTIAL sum(part.p_price))
-> Broadcast Motion
3:3 (slice3; segments: 3)
@@ -787,7 +783,7 @@ SELECT SUM(slp) FROM
Output:
part.p_partkey, PARTIAL sum(part.p_price)
Group
Key: part.p_partkey
-> Seq
Scan on public.part
-
Output: part.p_partkey, part.p_size, part.p_price
+
Output: part.p_price, part.p_partkey
Filter: (part.p_size < 40)
Settings: enable_hashjoin = 'on', enable_mergejoin = 'off', enable_nestloop =
'off', enable_seqscan = 'on', gp_enable_agg_pushdown = 'on', optimizer = 'off'
Optimizer: Postgres query optimizer
@@ -811,7 +807,7 @@ SELECT p_partkey, SUM(l_amount) FROM
Output: part.p_partkey, (PARTIAL sum(((lineitem.l_amount +
10))))
Hash Cond: (part.p_partkey = lineitem.l_partkey)
-> Seq Scan on public.part
- Output: part.p_partkey, part.p_size, part.p_price
+ Output: part.p_partkey
-> Hash
Output: lineitem.l_partkey, (PARTIAL
sum(((lineitem.l_amount + 10))))
-> Redistribute Motion 1:3 (slice2; segments: 1)
@@ -884,7 +880,7 @@ SELECT v_id, v_name, SUM(c_consumption)
Hash Cond: (customer_pd.c_n_id =
nation_pd.n_id)
Join Filter: (customer_pd.c_id >
nation_pd.n_population)
-> Seq Scan on public.customer_pd
- Output: customer_pd.c_id,
customer_pd.c_v_id, customer_pd.c_n_id, customer_pd.c_type,
customer_pd.c_consumption
+ Output:
customer_pd.c_consumption, customer_pd.c_v_id, customer_pd.c_n_id,
customer_pd.c_id
-> Hash
Output: nation_pd.n_id,
nation_pd.n_population
-> Broadcast Motion 3:3
(slice3; segments: 3)
@@ -926,7 +922,7 @@ SELECT v_id, c_type, n_type, SUM(c_consumption)
Hash Cond: (customer_pd.c_n_id =
nation_pd.n_id)
Join Filter: (customer_pd.c_id >
nation_pd.n_population)
-> Seq Scan on public.customer_pd
- Output: customer_pd.c_id,
customer_pd.c_v_id, customer_pd.c_n_id, customer_pd.c_type,
customer_pd.c_consumption
+ Output: customer_pd.c_type,
customer_pd.c_consumption, customer_pd.c_v_id, customer_pd.c_n_id,
customer_pd.c_id
-> Hash
Output: nation_pd.n_type,
nation_pd.n_id, nation_pd.n_population
-> Broadcast Motion 3:3
(slice3; segments: 3)
@@ -967,7 +963,7 @@ SELECT v_id, v_name, n_type, SUM(c_consumption)
Output: customer_pd.c_consumption,
customer_pd.c_v_id, nation_pd.n_type
Hash Cond: (customer_pd.c_n_id =
nation_pd.n_id)
-> Seq Scan on public.customer_pd
- Output: customer_pd.c_id,
customer_pd.c_v_id, customer_pd.c_n_id, customer_pd.c_type,
customer_pd.c_consumption
+ Output:
customer_pd.c_consumption, customer_pd.c_v_id, customer_pd.c_n_id
-> Hash
Output: nation_pd.n_type,
nation_pd.n_id
-> Broadcast Motion 3:3
(slice3; segments: 3)
@@ -1203,7 +1199,7 @@ SELECT t1.c, sum(t1.a)
Output: t1.c, PARTIAL sum(t1.a)
Group Key: t1.c
-> Seq Scan on public.pagg_pd t1
- Output: t1.a, t1.b, t1.c,
t1.d
+ Output: t1.c, t1.a
-> Materialize
Output: t2.c
-> Seq Scan on public.pagg_pd t2
diff --git a/contrib/pax_storage/src/test/regress/expected/analyze.out
b/contrib/pax_storage/src/test/regress/expected/analyze.out
index 294cc4daa16..8b462f8f0ee 100644
--- a/contrib/pax_storage/src/test/regress/expected/analyze.out
+++ b/contrib/pax_storage/src/test/regress/expected/analyze.out
@@ -842,7 +842,7 @@ ANALYZE foo_stats;
SELECT schemaname, tablename, attname, null_frac, avg_width, n_distinct,
most_common_vals, most_common_freqs, histogram_bounds FROM pg_stats WHERE
tablename='foo_stats' ORDER BY attname;
schemaname | tablename | attname | null_frac | avg_width | n_distinct |
most_common_vals | most_common_freqs | histogram_bounds
------------+-----------+---------+-----------+-----------+------------+---------------------+-------------------+------------------
- public | foo_stats | a | 0 | 1156 | 0 |
| |
+ public | foo_stats | a | 0 | 100004 | 0 |
| |
public | foo_stats | b | 0 | 7 | -0.5 |
{"\\x626262626232"} | {1} |
public | foo_stats | c | 0 | 6 | -0.5 |
{cccc2} | {1} |
public | foo_stats | d | 0 | 4 | -0.5 | {3}
| {1} |
@@ -862,7 +862,7 @@ analyze twoseg_table;
select relname, reltuples, relpages from pg_class where relname
='twoseg_table' order by relname;
relname | reltuples | relpages
--------------+-----------+----------
- twoseg_table | 50 | 2
+ twoseg_table | 50 | 1
(1 row)
select attname, null_frac, avg_width, n_distinct, most_common_vals,
most_common_freqs, histogram_bounds FROM pg_stats WHERE
tablename='twoseg_table' ORDER BY attname;
@@ -982,13 +982,13 @@ SELECT relpages, reltuples FROM pg_class WHERE relname =
'ana_parent';
SELECT relpages, reltuples FROM pg_class WHERE relname = 'ana_c1';
relpages | reltuples
----------+-----------
- 3 | 10
+ 1 | 10
(1 row)
SELECT relpages, reltuples FROM pg_class WHERE relname = 'ana_c2';
relpages | reltuples
----------+-----------
- 3 | 11
+ 1 | 11
(1 row)
-- Check pg_stats entries
diff --git a/contrib/pax_storage/src/test/regress/expected/appendonly.out
b/contrib/pax_storage/src/test/regress/expected/appendonly.out
index a8d2775798b..1563902583a 100644
--- a/contrib/pax_storage/src/test/regress/expected/appendonly.out
+++ b/contrib/pax_storage/src/test/regress/expected/appendonly.out
@@ -50,7 +50,6 @@ CREATE TABLE tenk_ao9 (like tenk_heap) with (appendonly=true,
compresslevel=0, c
ERROR: compresstype "zlib" can't be used with compresslevel 0
-- these should not work without appendonly=true
CREATE TABLE tenk_ao10 (like tenk_heap) with (compresslevel=5);
-ERROR: unrecognized parameter "compresslevel"
CREATE TABLE tenk_ao11 (like tenk_heap) with (blocksize=8192);
ERROR: unrecognized parameter "blocksize"
CREATE TABLE tenk_ao12 (like tenk_heap) with (appendonly=false,blocksize=8192);
diff --git
a/contrib/pax_storage/src/test/regress/expected/create_table_like_gp.out
b/contrib/pax_storage/src/test/regress/expected/create_table_like_gp.out
index 8cb92fbced0..d4749da9857 100644
--- a/contrib/pax_storage/src/test/regress/expected/create_table_like_gp.out
+++ b/contrib/pax_storage/src/test/regress/expected/create_table_like_gp.out
@@ -34,10 +34,10 @@ WHERE
------------+-----------+-------------+--------------+---------------
t_ao | ao_column | t | | 0
t_ao_enc | ao_column | t | | 0
- t_ao_a | heap | | |
- t_ao_b | heap | | |
- t_ao_c | heap | | |
- t_ao_enc_a | heap | | |
+ t_ao_a | pax | | |
+ t_ao_b | pax | | |
+ t_ao_c | pax | | |
+ t_ao_enc_a | pax | | |
t_ao_d | ao_row | f | | 0
(7 rows)
diff --git a/contrib/pax_storage/src/test/regress/expected/cursor.out
b/contrib/pax_storage/src/test/regress/expected/cursor.out
index 9506aa9a8f0..538026abe00 100644
--- a/contrib/pax_storage/src/test/regress/expected/cursor.out
+++ b/contrib/pax_storage/src/test/regress/expected/cursor.out
@@ -690,17 +690,15 @@ END;
$BODY$
LANGUAGE 'plpgsql';
SELECT func_test_cursor();
- func_test_cursor
-------------------
-
-(1 row)
-
+ERROR: "cursor_initplan" is not simply updatable
+CONTEXT: SQL statement "UPDATE cursor_initplan SET b = var1.b + 1 WHERE
CURRENT OF cur"
+PL/pgSQL function func_test_cursor() line 9 at SQL statement
SELECT * FROM cursor_initplan ORDER BY a;
a | b
----+----
1 | 1
- 2 | 3
- 3 | 4
+ 2 | 2
+ 3 | 3
4 | 4
5 | 5
6 | 6
diff --git a/contrib/pax_storage/src/test/regress/expected/gp_array_agg.out
b/contrib/pax_storage/src/test/regress/expected/gp_array_agg.out
index 4ae1f4b0b7d..26495290457 100644
--- a/contrib/pax_storage/src/test/regress/expected/gp_array_agg.out
+++ b/contrib/pax_storage/src/test/regress/expected/gp_array_agg.out
@@ -309,7 +309,7 @@ explain (verbose, costs off) select
array_dims(gp_array_agg(distinct arr)) from
Output: arr
Group Key: int_array_table.arr
-> Seq Scan on
test_gp_array_agg.int_array_table
- Output: a, arr
+ Output: arr
Optimizer: Postgres query optimizer
Settings: optimizer=off
(19 rows)
diff --git a/contrib/pax_storage/src/test/regress/expected/gp_dqa.out
b/contrib/pax_storage/src/test/regress/expected/gp_dqa.out
index be2bcf591c3..baa3576d9f8 100644
--- a/contrib/pax_storage/src/test/regress/expected/gp_dqa.out
+++ b/contrib/pax_storage/src/test/regress/expected/gp_dqa.out
@@ -2227,7 +2227,8 @@ explain(verbose on, costs off) select count(distinct a),
count(distinct d), c fr
Split by Col: (dqa_unique.a),
(dqa_unique.d)
Group Key: dqa_unique.a, dqa_unique.b
-> Seq Scan on public.dqa_unique
- Output: a, b, c, d
+ Output: a, b, d, c
+ Settings: gp_motion_cost_per_row = '2', enable_hashagg = 'on',
enable_groupagg = 'off'
Optimizer: Postgres query optimizer
Settings: enable_groupagg = 'off', gp_motion_cost_per_row = '1'
(25 rows)
@@ -2642,9 +2643,9 @@ explain (verbose on, costs off) select count(distinct b)
from num_table group by
Output: c, b
Group Key: num_table.c, num_table.b
-> Seq Scan on public.num_table
- Output: id, a, b, c
- Optimizer: Postgres-based planner
- Settings: enable_groupagg = 'off', gp_motion_cost_per_row = '1', optimizer =
'off', optimizer_force_multistage_agg = 'on'
+ Output: c, b
+ Settings: gp_motion_cost_per_row = '2', enable_hashagg = 'on',
enable_groupagg = 'off', optimizer_force_multistage_agg = 'on'
+ Optimizer: Postgres query optimizer
(18 rows)
select count(distinct b) from num_table group by c;
@@ -3054,7 +3055,8 @@ explain (verbose on, costs off) select count(distinct a),
count(distinct b) from
Split by Col: (dqa_f4.a), (dqa_f4.b)
Group Key: dqa_f4.c
-> Seq Scan on public.dqa_f4
- Output: a, b, c
+ Output: c, a, b
+ Settings: gp_motion_cost_per_row = '2', enable_hashagg = 'on',
enable_groupagg = 'off'
Optimizer: Postgres query optimizer
Settings: enable_groupagg = 'off', gp_motion_cost_per_row = '1', optimizer =
'off', optimizer_enable_multiple_distinct_aggs = 'on'
(25 rows)
diff --git a/contrib/pax_storage/src/test/regress/expected/gp_explain.out
b/contrib/pax_storage/src/test/regress/expected/gp_explain.out
index 6e4ad00a41f..09d0b0be2c7 100644
--- a/contrib/pax_storage/src/test/regress/expected/gp_explain.out
+++ b/contrib/pax_storage/src/test/regress/expected/gp_explain.out
@@ -600,7 +600,7 @@ SELECT * FROM y LIMIT 10;
-> WorkTable Scan on y (never executed)
-> Materialize (never executed)
-> Gather Motion 3:1 (slice1; segments: 3) (never
executed)
- -> Seq Scan on recursive_table_ic (actual
rows=4061 loops=1)
+ -> Seq Scan on recursive_table_ic (actual
rows=6090 loops=1)
Optimizer: Postgres query optimizer
(13 rows)
diff --git a/contrib/pax_storage/src/test/regress/expected/gp_hashagg.out
b/contrib/pax_storage/src/test/regress/expected/gp_hashagg.out
index 38a293a684b..79f917d6d12 100644
--- a/contrib/pax_storage/src/test/regress/expected/gp_hashagg.out
+++ b/contrib/pax_storage/src/test/regress/expected/gp_hashagg.out
@@ -158,7 +158,7 @@ EXPLAIN (COSTS OFF, VERBOSE) :qry;
Output: b, PARTIAL sum(a) FILTER (WHERE false), PARTIAL
max(c)
Group Key: test_combinefn_null.b
-> Seq Scan on public.test_combinefn_null
- Output: a, b, c
+ Output: b, a, c
Optimizer: Postgres query optimizer
Settings: enable_indexscan=off, enable_seqscan=on, enable_sort=off,
optimizer=off
(13 rows)
@@ -193,7 +193,7 @@ EXPLAIN (COSTS OFF, VERBOSE) :qry;
Output: b, PARTIAL var_pop((a)::integer) FILTER (WHERE
false), PARTIAL max(c)
Group Key: test_combinefn_null.b
-> Seq Scan on public.test_combinefn_null
- Output: a, b, c
+ Output: b, a, c
Optimizer: Postgres query optimizer
Settings: enable_indexscan=off, enable_seqscan=on, enable_sort=off,
optimizer=off
(13 rows)
@@ -228,7 +228,7 @@ EXPLAIN (COSTS OFF, VERBOSE) :qry;
Output: b, PARTIAL sum((a)::numeric) FILTER (WHERE
false), PARTIAL max(c)
Group Key: test_combinefn_null.b
-> Seq Scan on public.test_combinefn_null
- Output: a, b, c
+ Output: b, a, c
Optimizer: Postgres query optimizer
Settings: enable_indexscan=off, enable_seqscan=on, enable_sort=off,
optimizer=off
(13 rows)
@@ -263,7 +263,7 @@ EXPLAIN (COSTS OFF, VERBOSE) :qry;
Output: b, PARTIAL var_pop((a)::numeric) FILTER (WHERE
false), PARTIAL max(c)
Group Key: test_combinefn_null.b
-> Seq Scan on public.test_combinefn_null
- Output: a, b, c
+ Output: b, a, c
Optimizer: Postgres query optimizer
Settings: enable_indexscan=off, enable_seqscan=on, enable_sort=off,
optimizer=off
(13 rows)
diff --git a/contrib/pax_storage/src/test/regress/expected/gpdist_opclasses.out
b/contrib/pax_storage/src/test/regress/expected/gpdist_opclasses.out
index 0b72755b842..3cd1e3762fc 100644
--- a/contrib/pax_storage/src/test/regress/expected/gpdist_opclasses.out
+++ b/contrib/pax_storage/src/test/regress/expected/gpdist_opclasses.out
@@ -241,18 +241,20 @@ DETAIL: Distribution key column "ip_addr" is not
included in the constraint.
HINT: Add "ip_addr" to the constraint with the =(inet,inet) operator.
-- but this is.
ALTER TABLE ip_reservations ADD EXCLUDE USING gist (ip_addr inet_ops WITH =,
reserved WITH &&);
+ERROR: pax only support btree/hash/gin/bitmap indexes
(pax_access_handle.cc:591)
-- new distribution is incompatible with the constraint.
ALTER TABLE ip_reservations SET DISTRIBUTED BY (reserved);
-ERROR: distribution policy is not compatible with exclusion constraint
"ip_reservations_ip_addr_reserved_excl"
-DETAIL: Distribution key column "reserved" is not included in the constraint.
-HINT: Add "reserved" to the constraint with the =(anyrange,anyrange) operator.
-- After dropping the constraint, it's allowed.
ALTER TABLE ip_reservations DROP CONSTRAINT
ip_reservations_ip_addr_reserved_excl;
+ERROR: constraint "ip_reservations_ip_addr_reserved_excl" of relation
"ip_reservations" does not exist
ALTER TABLE ip_reservations SET DISTRIBUTED BY (reserved);
+WARNING: distribution policy of relation "ip_reservations" already set to
(reserved)
+HINT: Use ALTER TABLE "ip_reservations" SET WITH (REORGANIZE=TRUE)
DISTRIBUTED BY (reserved) to force redistribution
-- Test creating exclusion constraint on tsrange column. (The subtle
-- difference is there is no direct =(tsrange, tsrange) operator, we rely on
-- the implicit casts for it)
ALTER TABLE ip_reservations ADD EXCLUDE USING gist (reserved WITH =);
+ERROR: pax only support btree/hash/gin/bitmap indexes
(pax_access_handle.cc:591)
--
-- Test scenario, where a type has a hash operator class, but not a default
-- one.
diff --git
a/contrib/pax_storage/src/test/regress/expected/incremental_analyze.out
b/contrib/pax_storage/src/test/regress/expected/incremental_analyze.out
index fd91f7442b8..fd01b625034 100644
--- a/contrib/pax_storage/src/test/regress/expected/incremental_analyze.out
+++ b/contrib/pax_storage/src/test/regress/expected/incremental_analyze.out
@@ -1537,9 +1537,9 @@ ANALYZE foo;
SELECT * FROM pg_stats WHERE tablename like 'foo%' and attname = 'c' ORDER BY
attname,tablename;
schemaname | tablename | attname | inherited | null_frac | avg_width |
n_distinct | most_common_vals | most_common_freqs | histogram_bounds |
correlation | most_common_elems | most_common_elem_freqs | elem_count_histogram
------------+-------------+---------+-----------+-----------+-----------+------------+------------------+-------------------+------------------+-------------+-------------------+------------------------+----------------------
- public | foo | c | t | 0 | 4591 |
0 | | | | |
| |
- public | foo_1_prt_1 | c | f | 0 | 4591 |
0 | | | | |
| |
- public | foo_1_prt_2 | c | f | 0 | 4591 |
0 | | | | |
| |
+ public | foo | c | t | 0 | 400004 |
0 | | | | |
| |
+ public | foo_1_prt_1 | c | f | 0 | 400004 |
0 | | | | |
| |
+ public | foo_1_prt_2 | c | f | 0 | 400004 |
0 | | | | |
| |
(3 rows)
-- Test ANALYZE full scan HLL
diff --git a/contrib/pax_storage/src/test/regress/expected/join_gp.out
b/contrib/pax_storage/src/test/regress/expected/join_gp.out
index e9317d8e277..fc05f8eb6fb 100644
--- a/contrib/pax_storage/src/test/regress/expected/join_gp.out
+++ b/contrib/pax_storage/src/test/regress/expected/join_gp.out
@@ -847,16 +847,16 @@ set enable_material = 0;
set enable_seqscan = 0;
set enable_bitmapscan = 0;
explain select tenk1.unique2 >= 0 from tenk1 left join tenk2 on true limit 1;
- QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------
- Limit (cost=0.32..0.38 rows=1 width=4)
- -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.32..0.38 rows=1
width=4)
- -> Limit (cost=0.32..0.36 rows=1 width=4)
- -> Nested Loop Left Join (cost=0.32..3520104.27 rows=33333334
width=4)
- -> Index Only Scan using tenk1_unique2 on tenk1
(cost=0.16..1650.16 rows=3334 width=4)
- -> Materialize (cost=0.16..18479.11 rows=10000 width=0)
- -> Broadcast Motion 3:3 (slice2; segments: 3)
(cost=0.16..18329.11 rows=10000 width=0)
- -> Index Only Scan using tenk2_hundred on
tenk2 (cost=0.16..223.47 rows=3333 width=0)
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------
+ Limit (cost=0.32..0.36 rows=1 width=1)
+ -> Gather Motion 3:1 (slice1; segments: 3) (cost=0.32..0.44 rows=3
width=1)
+ -> Limit (cost=0.32..0.40 rows=1 width=1)
+ -> Nested Loop Left Join (cost=0.32..2507776.74 rows=33333333
width=1)
+ -> Index Only Scan using tenk1_unique2 on tenk1
(cost=0.16..493.85 rows=3333 width=4)
+ -> Materialize (cost=0.16..677.18 rows=10000 width=0)
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
(cost=0.16..627.18 rows=10000 width=0)
+ -> Index Only Scan using tenk2_unique2 on
tenk2 (cost=0.16..493.85 rows=3333 width=0)
Optimizer: Postgres query optimizer
(9 rows)
@@ -1427,6 +1427,7 @@ select box(point(0.05*i, 0.05*i), point(0.05*i, 0.05*i)),
from generate_series(0,10000) as i;
vacuum analyze gist_tbl_github9733;
create index gist_tbl_point_index_github9733 on gist_tbl_github9733 using gist
(p);
+ERROR: pax only support btree/hash/gin/bitmap indexes
(pax_access_handle.cc:591)
set enable_seqscan=off;
set enable_bitmapscan=off;
explain (costs off)
diff --git a/contrib/pax_storage/src/test/regress/expected/motion_gp.out
b/contrib/pax_storage/src/test/regress/expected/motion_gp.out
index b9a6570f0fc..36b3178546b 100644
--- a/contrib/pax_storage/src/test/regress/expected/motion_gp.out
+++ b/contrib/pax_storage/src/test/regress/expected/motion_gp.out
@@ -33,12 +33,12 @@ SELECT id,
FROM motiondata;
id | plain_sz | main_sz | external_sz | extended_sz
----+----------+---------+-------------+-------------
- 1 | 4 | 4 | 4 | 7
2 | 10004 | | |
- 3 | | 135 | |
- 4 | | 251 | |
- 5 | | | 10000 |
- 6 | | | | 11463
+ 3 | | 10004 | |
+ 4 | | 20004 | |
+ 1 | 7 | 7 | 7 | 10
+ 5 | | | 10004 |
+ 6 | | | | 1000004
(6 rows)
CREATE TABLE motiondata_ao WITH (appendonly=true) AS SELECT * from motiondata;
diff --git a/contrib/pax_storage/src/test/regress/expected/subselect_gp.out
b/contrib/pax_storage/src/test/regress/expected/subselect_gp.out
index 3e4ea0d16c9..8a2863ae734 100644
--- a/contrib/pax_storage/src/test/regress/expected/subselect_gp.out
+++ b/contrib/pax_storage/src/test/regress/expected/subselect_gp.out
@@ -2650,7 +2650,7 @@ select * from foo where
Output: foo.i, foo.j, (RowIdExpr)
Hash Cond: (b.i = CASE WHEN ((hashed SubPlan 1)) THEN
foo.i ELSE NULL::integer END)
-> Seq Scan on subselect_gp.baz b
- Output: b.i, b.j
+ Output: b.i
-> Hash
Output: foo.i, foo.j, ((hashed SubPlan 1)),
(RowIdExpr)
-> Redistribute Motion 3:3 (slice3; segments: 3)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]