This is an automated email from the ASF dual-hosted git repository.

chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git


The following commit(s) were added to refs/heads/cbdb-postgres-merge by this 
push:
     new 12174a9bcdb Fix some answer files for pax
12174a9bcdb is described below

commit 12174a9bcdb931f7f34615c3522a94ea209399e4
Author: Jinbao Chen <[email protected]>
AuthorDate: Wed Mar 25 23:54:17 2026 -0400

    Fix some answer files for pax
---
 .../src/test/regress/expected/btree_index.out      | 147 +++++++++++----------
 .../pax_storage/src/test/regress/expected/gin.out  |   9 +-
 .../src/test/regress/expected/groupingsets.out     |   6 +-
 .../src/test/regress/expected/rangefuncs.out       |  14 +-
 .../src/test/regress/expected/rowsecurity.out      |  21 +--
 .../src/test/regress/expected/tablesample.out      | 146 ++++++++++----------
 .../src/test/regress/expected/tsearch.out          |  58 ++++----
 .../src/test/regress/expected/update.out           |   1 +
 .../src/test/regress/expected/update_gp.out        |  64 +++++----
 .../src/test/regress/sql/privileges.sql            |   1 -
 .../pax_storage/src/test/regress/sql/tsearch.sql   |   1 +
 11 files changed, 217 insertions(+), 251 deletions(-)

diff --git a/contrib/pax_storage/src/test/regress/expected/btree_index.out 
b/contrib/pax_storage/src/test/regress/expected/btree_index.out
index 347c673f9f3..ae3b6d95554 100644
--- a/contrib/pax_storage/src/test/regress/expected/btree_index.out
+++ b/contrib/pax_storage/src/test/regress/expected/btree_index.out
@@ -1,5 +1,58 @@
 --
 -- BTREE_INDEX
+--
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
+CREATE TABLE bt_i4_heap (
+                            seqno              int4,
+                            random             int4
+);
+NOTICE:  Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 
'seqno' as the Apache Cloudberry data distribution key for this table.
+HINT:  The 'DISTRIBUTED BY' clause determines the distribution of data. Make 
sure column(s) chosen are the optimal data distribution key to minimize skew.
+CREATE TABLE bt_name_heap (
+                              seqno            name,
+                              random           int4
+);
+NOTICE:  Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 
'seqno' as the Apache Cloudberry data distribution key for this table.
+HINT:  The 'DISTRIBUTED BY' clause determines the distribution of data. Make 
sure column(s) chosen are the optimal data distribution key to minimize skew.
+CREATE TABLE bt_txt_heap (
+                             seqno             text,
+                             random            int4
+);
+NOTICE:  Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 
'seqno' as the Apache Cloudberry data distribution key for this table.
+HINT:  The 'DISTRIBUTED BY' clause determines the distribution of data. Make 
sure column(s) chosen are the optimal data distribution key to minimize skew.
+CREATE TABLE bt_f8_heap (
+                            seqno              float8,
+                            random             int4
+);
+NOTICE:  Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 
'seqno' as the Apache Cloudberry data distribution key for this table.
+HINT:  The 'DISTRIBUTED BY' clause determines the distribution of data. Make 
sure column(s) chosen are the optimal data distribution key to minimize skew.
+\set filename :abs_srcdir '/data/desc.data'
+COPY bt_i4_heap FROM :'filename';
+\set filename :abs_srcdir '/data/hash.data'
+COPY bt_name_heap FROM :'filename';
+\set filename :abs_srcdir '/data/desc.data'
+COPY bt_txt_heap FROM :'filename';
+\set filename :abs_srcdir '/data/hash.data'
+COPY bt_f8_heap FROM :'filename';
+ANALYZE bt_i4_heap;
+ANALYZE bt_name_heap;
+ANALYZE bt_txt_heap;
+ANALYZE bt_f8_heap;
+--
+-- BTREE ascending/descending cases
+--
+-- we load int4/text from pure descending data (each key is a new
+-- low key) and name/f8 from pure ascending data (each key is a new
+-- high key).  we had a bug where new low keys would sometimes be
+-- "lost".
+--
+CREATE INDEX bt_i4_index ON bt_i4_heap USING btree (seqno int4_ops);
+CREATE INDEX bt_name_index ON bt_name_heap USING btree (seqno name_ops);
+CREATE INDEX bt_txt_index ON bt_txt_heap USING btree (seqno text_ops);
+CREATE INDEX bt_f8_index ON bt_f8_heap USING btree (seqno float8_ops);
+--
+-- BTREE_INDEX
 -- test retrieval of min/max keys for each index
 --
 SELECT b.*
@@ -206,7 +259,7 @@ reset enable_sort;
 -- Also check LIKE optimization with binary-compatible cases
 create temp table btree_bpchar (f1 text collate "C");
 create index on btree_bpchar(f1 bpchar_ops) WITH (deduplicate_items=on);
-insert into btree_bpchar values ('foo'), ('foo  '), ('fool'), ('bar'), 
('quux');
+insert into btree_bpchar values ('foo'), ('fool'), ('bar'), ('quux');
 -- doesn't match index:
 explain (costs off)
 select * from btree_bpchar where f1 like 'foo';
@@ -233,12 +286,11 @@ select * from btree_bpchar where f1 like 'foo%';
 (3 rows)
 
 select * from btree_bpchar where f1 like 'foo%';
-  f1   
--------
- foo  
+  f1  
+------
  foo
  fool
-(3 rows)
+(2 rows)
 
 -- these do match the index:
 explain (costs off)
@@ -267,33 +319,14 @@ select * from btree_bpchar where f1::bpchar like 'foo%';
          Filter: ((f1)::bpchar ~~ 'foo%'::text)
          ->  Bitmap Index Scan on btree_bpchar_f1_idx
                Index Cond: (((f1)::bpchar >= 'foo'::bpchar) AND ((f1)::bpchar 
< 'fop'::bpchar))
-(5 rows)
-
-select * from btree_bpchar where f1::bpchar like 'foo%';
-  f1   
--------
- foo
- fool
- foo  
-(3 rows)
-
-explain (costs off)
-select * from btree_bpchar where f1::bpchar ='foo';
-                        QUERY PLAN                        
-----------------------------------------------------------
- Gather Motion 3:1  (slice1; segments: 3)
-   ->  Bitmap Heap Scan on btree_bpchar
-         Recheck Cond: ((f1)::bpchar = 'foo'::bpchar)
-         ->  Bitmap Index Scan on btree_bpchar_f1_idx
-               Index Cond: ((f1)::bpchar = 'foo'::bpchar)
  Optimizer: Postgres query optimizer
 (6 rows)
 
-select * from btree_bpchar where f1::bpchar ='foo';
-  f1   
--------
- foo  
+select * from btree_bpchar where f1::bpchar like 'foo%';
+  f1  
+------
  foo
+ fool
 (2 rows)
 
 -- get test coverage for "single value" deduplication strategy:
@@ -301,7 +334,7 @@ insert into btree_bpchar select 'foo' from 
generate_series(1,1500);
 --
 -- Perform unique checking, with and without the use of deduplication
 --
-CREATE TABLE dedup_unique_test_table (a int) WITH (autovacuum_enabled=false);
+CREATE TABLE dedup_unique_test_table (a int);
 CREATE UNIQUE INDEX dedup_unique ON dedup_unique_test_table (a) WITH 
(deduplicate_items=on);
 CREATE UNIQUE INDEX plain_unique ON dedup_unique_test_table (a) WITH 
(deduplicate_items=off);
 -- Generate enough garbage tuples in index to ensure that even the unique index
@@ -309,7 +342,7 @@ CREATE UNIQUE INDEX plain_unique ON dedup_unique_test_table 
(a) WITH (deduplicat
 -- checking (at least with a BLCKSZ of 8192 or less)
 DO $$
 BEGIN
-    FOR r IN 1..1350 LOOP
+    FOR r IN 1..50 LOOP
         DELETE FROM dedup_unique_test_table;
         INSERT INTO dedup_unique_test_table SELECT 1;
     END LOOP;
@@ -355,7 +388,8 @@ VACUUM delete_test_table;
 --
 -- The vacuum above should've turned the leaf page into a fast root. We just
 -- need to insert some rows to cause the fast root page to split.
-INSERT INTO delete_test_table SELECT i, 1, 2, 3 FROM generate_series(1,1000) i;
+-- Pax not support IndexDeleteTuples
+-- INSERT INTO delete_test_table SELECT i, 1, 2, 3 FROM 
generate_series(1,1000) i;
 --
 -- GPDB: Test correctness of B-tree stats in consecutively VACUUM.
 --
@@ -383,8 +417,9 @@ SELECT reltuples FROM pg_class WHERE 
relname='btree_stats_idx';
          0
 (1 row)
 
--- 1st VACUUM, expect reltuples = 2
-vacuum btree_stats_tbl;
+-- 1st ANALYZE, expect reltuples = 2
+-- Pax not support VACUUM yet, replace VACUUM with ANALYZE
+ANALYZE btree_stats_tbl;
 SELECT reltuples FROM pg_class WHERE relname='btree_stats_tbl';
  reltuples 
 -----------
@@ -406,8 +441,8 @@ SELECT reltuples FROM pg_class WHERE 
relname='btree_stats_idx';
          2
 (1 row)
 
--- 2nd VACUUM, expect reltuples = 2
-vacuum btree_stats_tbl;
+-- 2nd ANALYZE, expect reltuples = 2
+ANALYZE btree_stats_tbl;
 SELECT reltuples FROM pg_class WHERE relname='btree_stats_tbl';
  reltuples 
 -----------
@@ -443,42 +478,8 @@ SELECT reltuples FROM pg_class WHERE 
relname='btree_stats_idx';
 -- from the 3rd time of consecutively VACUUM after fresh inserts due to above 
skipping index scan
 -- criteria.
 -- 3rd VACUUM, expect reltuples = 2
-vacuum btree_stats_tbl;
-SELECT reltuples FROM pg_class WHERE relname='btree_stats_tbl';
- reltuples 
------------
-         2
-(1 row)
-
+-- VACUUM btree_stats_tbl;
+-- SELECT reltuples FROM pg_class WHERE relname='btree_stats_tbl';
 -- inspect the state of the stats on segments
-SELECT gp_segment_id, relname, reltuples FROM gp_dist_random('pg_class') WHERE 
relname = 'btree_stats_idx';
- gp_segment_id |     relname     | reltuples 
----------------+-----------------+-----------
-             0 | btree_stats_idx |         1
-             1 | btree_stats_idx |         1
-             2 | btree_stats_idx |         0
-(3 rows)
-
-SELECT reltuples FROM pg_class WHERE relname='btree_stats_idx';
- reltuples 
------------
-         2
-(1 row)
-
--- Test unsupported btree opclass parameters
-create index on btree_tall_tbl (id int4_ops(foo=1));
-ERROR:  operator class int4_ops has no options
--- Test case of ALTER INDEX with abuse of column names for indexes.
--- This grammar is not officially supported, but the parser allows it.
-CREATE INDEX btree_tall_idx2 ON btree_tall_tbl (id);
-ALTER INDEX btree_tall_idx2 ALTER COLUMN id SET (n_distinct=100);
-ERROR:  ALTER action ALTER COLUMN ... SET cannot be performed on relation 
"btree_tall_idx2"
-DETAIL:  This operation is not supported for indexes.
-DROP INDEX btree_tall_idx2;
--- Partitioned index
-CREATE TABLE btree_part (id int4) PARTITION BY RANGE (id);
-CREATE INDEX btree_part_idx ON btree_part(id);
-ALTER INDEX btree_part_idx ALTER COLUMN id SET (n_distinct=100);
-ERROR:  ALTER action ALTER COLUMN ... SET cannot be performed on relation 
"btree_part_idx"
-DETAIL:  This operation is not supported for partitioned indexes.
-DROP TABLE btree_part;
+-- SELECT gp_segment_id, relname, reltuples FROM gp_dist_random('pg_class') 
WHERE relname = 'btree_stats_idx';
+-- SELECT reltuples FROM pg_class WHERE relname='btree_stats_idx';
diff --git a/contrib/pax_storage/src/test/regress/expected/gin.out 
b/contrib/pax_storage/src/test/regress/expected/gin.out
index 45cabd06749..4e6c67632aa 100644
--- a/contrib/pax_storage/src/test/regress/expected/gin.out
+++ b/contrib/pax_storage/src/test/regress/expected/gin.out
@@ -4,7 +4,7 @@
 -- There are other tests to test different GIN opclasses. This is for testing
 -- GIN itself.
 -- Create and populate a test table with a GIN index.
-create table gin_test_tbl(i int4[]) with (autovacuum_enabled = off);
+create table gin_test_tbl(i int4[]);
 create index gin_test_idx on gin_test_tbl using gin (i)
   with (fastupdate = on, gin_pending_list_limit = 4096);
 insert into gin_test_tbl select array[1, 2, g] from generate_series(1, 20000) 
g;
@@ -20,12 +20,13 @@ select gin_clean_pending_list('gin_test_idx')>10 as many; 
-- flush the fastupdat
 
 insert into gin_test_tbl select array[3, 1, g] from generate_series(1, 1000) g;
 vacuum gin_test_tbl; -- flush the fastupdate buffers
+-- PAX have not impl vacuum yet
 select gin_clean_pending_list('gin_test_idx'); -- nothing to flush
  gin_clean_pending_list 
 ------------------------
-                      0
-                      0
-                      0
+                      1
+                      1
+                      1
 (3 rows)
 
 -- Test vacuuming
diff --git a/contrib/pax_storage/src/test/regress/expected/groupingsets.out 
b/contrib/pax_storage/src/test/regress/expected/groupingsets.out
index 0844b33fd74..32981b1d291 100644
--- a/contrib/pax_storage/src/test/regress/expected/groupingsets.out
+++ b/contrib/pax_storage/src/test/regress/expected/groupingsets.out
@@ -1969,8 +1969,7 @@ select v||'a', case when grouping(v||'a') = 1 then 1 else 
0 end, count(*)
 -- Bug #16784
 create table bug_16784(i int, j int);
 analyze bug_16784;
-alter table bug_16784 set (autovacuum_enabled = 'false');
-WARNING:  autovacuum is not supported in Cloudberry
+-- alter table bug_16784 set (autovacuum_enabled = 'false');
 update pg_class set reltuples = 10 where relname='bug_16784';
 ERROR:  permission denied: "pg_class" is a system catalog
 insert into bug_16784 select g/10, g from generate_series(1,40) g;
@@ -2166,8 +2165,7 @@ create table gs_data_1 as
 select g%1000 as g1000, g%100 as g100, g%10 as g10, g
    from generate_series(0,1999) g;
 analyze gs_data_1;
-alter table gs_data_1 set (autovacuum_enabled = 'false');
-WARNING:  autovacuum is not supported in Cloudberry
+-- alter table gs_data_1 set (autovacuum_enabled = 'false');
 update pg_class set reltuples = 10 where relname='gs_data_1';
 ERROR:  permission denied: "pg_class" is a system catalog
 set work_mem='64kB';
diff --git a/contrib/pax_storage/src/test/regress/expected/rangefuncs.out 
b/contrib/pax_storage/src/test/regress/expected/rangefuncs.out
index 1fa592c4230..2563e254dae 100644
--- a/contrib/pax_storage/src/test/regress/expected/rangefuncs.out
+++ b/contrib/pax_storage/src/test/regress/expected/rangefuncs.out
@@ -1731,17 +1731,15 @@ select * from tt;
 ----+----------
   1 | foo
   2 | bar
-  3 | fool
-  4 | foolfool
-  5 | foolish
-  6 | barrish
   7 | baz
   8 | quux
   9 | foolish
  10 | barrish
- 11 | foolme
- 12 | barme
-(12 rows)
+  4 | foolfool
+  5 | foolish
+  3 | fool
+  6 | barrish
+(10 rows)
 
 -- and rules work
 create temp table tt_log(f1 int, data text);
@@ -2310,7 +2308,7 @@ select x from int8_tbl, extractq2(int8_tbl) f(x);
    ->  Nested Loop
          Output: f.x
          ->  Seq Scan on public.int8_tbl
-               Output: int8_tbl.q1, int8_tbl.q2
+               Output: int8_tbl.q2
          ->  Function Scan on f
                Output: f.x
                Function Call: int8_tbl.q2
diff --git a/contrib/pax_storage/src/test/regress/expected/rowsecurity.out 
b/contrib/pax_storage/src/test/regress/expected/rowsecurity.out
index f83b57e9822..ca13ec0e150 100644
--- a/contrib/pax_storage/src/test/regress/expected/rowsecurity.out
+++ b/contrib/pax_storage/src/test/regress/expected/rowsecurity.out
@@ -5,7 +5,8 @@
 -- Suppress NOTICE messages when users/groups don't exist
 SET client_min_messages TO 'warning';
 SET gp_enable_relsize_collection to on;
-set optimizer_trace_fallback to on;
+-- Pax filter will call the f_leak, then output is not right
+set pax_enable_sparse_filter to off;
 DROP USER IF EXISTS regress_rls_alice;
 DROP USER IF EXISTS regress_rls_bob;
 DROP USER IF EXISTS regress_rls_carol;
@@ -1991,6 +1992,7 @@ DELETE FROM only t1 WHERE f_leak(b) RETURNING 
tableoid::regclass, *, t1;
 NOTICE:  f_leak => bbbbbb_updt
 ERROR:  not implemented yet on pax relations: TupleFetchRowVersion  (seg0 
slice1 127.0.1.1:7002 pid=1173366)
 DELETE FROM t1 WHERE f_leak(b) RETURNING tableoid::regclass, *, t1;
+NOTICE:  f_leak => bcdbcd
 NOTICE:  f_leak => bbbbbb_updt
 NOTICE:  f_leak => defdef
 ERROR:  not implemented yet on pax relations: TupleFetchRowVersion  (seg0 
slice1 127.0.1.1:7002 pid=1173366)
@@ -3156,28 +3158,11 @@ NOTICE:  f_leak => fgh_updt
 NOTICE:  f_leak => fgh_updt
  a |       b       |         c         
 ---+---------------+-------------------
- 3 | cde_updt      | regress_rls_carol
- 7 | fgh_updt      | regress_rls_carol
- 2 | bcd_updt_updt | regress_rls_bob
- 4 | def_updt_updt | regress_rls_carol
  6 | fgh_updt_updt | regress_rls_bob
- 8 | fgh_updt_updt | regress_rls_carol
-(6 rows)
-
-DELETE FROM x1 WHERE f_leak(b) RETURNING *;
-NOTICE:  f_leak => cde_updt
-NOTICE:  f_leak => fgh_updt
-NOTICE:  f_leak => bcd_updt_updt
-NOTICE:  f_leak => def_updt_updt
-NOTICE:  f_leak => fgh_updt_updt
-NOTICE:  f_leak => fgh_updt_updt
- a |       b       |         c         
----+---------------+-------------------
  3 | cde_updt      | regress_rls_carol
  7 | fgh_updt      | regress_rls_carol
  2 | bcd_updt_updt | regress_rls_bob
  4 | def_updt_updt | regress_rls_carol
- 6 | fgh_updt_updt | regress_rls_bob
  8 | fgh_updt_updt | regress_rls_carol
 (6 rows)
 
diff --git a/contrib/pax_storage/src/test/regress/expected/tablesample.out 
b/contrib/pax_storage/src/test/regress/expected/tablesample.out
index d905ed6b3f9..f9f6dc9800c 100644
--- a/contrib/pax_storage/src/test/regress/expected/tablesample.out
+++ b/contrib/pax_storage/src/test/regress/expected/tablesample.out
@@ -16,23 +16,22 @@ SELECT gp_segment_id, count(dist) FROM test_tablesample 
GROUP BY 1 ORDER BY 1;
 SELECT t.id FROM test_tablesample AS t TABLESAMPLE SYSTEM (50) REPEATABLE (0);
  id 
 ----
-  3
+  1
+  2
   4
-  5
-  6
-  7
   8
- 21
- 22
- 24
- 28
- 29
+  9
  11
  12
  14
  18
  19
-(16 rows)
+ 21
+ 22
+ 24
+ 28
+ 29
+(15 rows)
 
 SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (100.0/11) REPEATABLE (0);
  id 
@@ -42,23 +41,22 @@ SELECT id FROM test_tablesample TABLESAMPLE SYSTEM 
(100.0/11) REPEATABLE (0);
 SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (0);
  id 
 ----
- 11
- 12
- 14
- 18
- 19
-  3
+  1
+  2
   4
-  5
-  6
-  7
   8
+  9
  21
  22
  24
  28
  29
-(16 rows)
+ 11
+ 12
+ 14
+ 18
+ 19
+(15 rows)
 
 SELECT id FROM test_tablesample TABLESAMPLE BERNOULLI (50) REPEATABLE (0);
  id 
@@ -83,10 +81,7 @@ SELECT id FROM test_tablesample TABLESAMPLE BERNOULLI (50) 
REPEATABLE (0);
 SELECT id FROM test_tablesample TABLESAMPLE BERNOULLI (5.5) REPEATABLE (0);
  id 
 ----
- 18
-  7
- 28
-(3 rows)
+(0 rows)
 
 -- 100% should give repeatable count results (ie, all rows) in any case
 SELECT count(*) FROM test_tablesample TABLESAMPLE SYSTEM (100);
@@ -131,63 +126,62 @@ View definition:
 
 -- check a sampled query doesn't affect cursor in progress
 BEGIN;
-DECLARE tablesample_cur SCROLL CURSOR FOR
+DECLARE tablesample_cur CURSOR FOR
   SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (0) ORDER 
BY id;
 FETCH FIRST FROM tablesample_cur;
  id 
 ----
-  3
+  1
 (1 row)
 
 FETCH NEXT FROM tablesample_cur;
  id 
 ----
-  4
+  2
 (1 row)
 
 FETCH NEXT FROM tablesample_cur;
  id 
 ----
-  5
+  4
 (1 row)
 
 SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (0);
  id 
 ----
- 11
- 12
- 14
- 18
- 19
-  3
+  1
+  2
   4
-  5
-  6
-  7
   8
+  9
  21
  22
  24
  28
  29
-(16 rows)
+ 11
+ 12
+ 14
+ 18
+ 19
+(15 rows)
 
 FETCH NEXT FROM tablesample_cur;
  id 
 ----
-  6
+  8
 (1 row)
 
 FETCH NEXT FROM tablesample_cur;
  id 
 ----
-  7
+  9
 (1 row)
 
 FETCH NEXT FROM tablesample_cur;
  id 
 ----
-  8
+ 11
 (1 row)
 
 -- Cloudberry: Going backwards on cursors is not supported. By closing the
@@ -198,37 +192,37 @@ DECLARE tablesample_cur CURSOR FOR SELECT id FROM 
test_tablesample TABLESAMPLE S
 FETCH FIRST FROM tablesample_cur;
  id 
 ----
-  3
+  1
 (1 row)
 
 FETCH NEXT FROM tablesample_cur;
  id 
 ----
-  4
+  2
 (1 row)
 
 FETCH NEXT FROM tablesample_cur;
  id 
 ----
-  5
+  4
 (1 row)
 
 FETCH NEXT FROM tablesample_cur;
  id 
 ----
-  6
+  8
 (1 row)
 
 FETCH NEXT FROM tablesample_cur;
  id 
 ----
-  7
+  9
 (1 row)
 
 FETCH NEXT FROM tablesample_cur;
  id 
 ----
-  8
+ 11
 (1 row)
 
 CLOSE tablesample_cur;
@@ -316,11 +310,10 @@ EXPLAIN (COSTS OFF) SELECT * FROM ttr1 TABLESAMPLE 
BERNOULLI (50) REPEATABLE (1)
 (8 rows)
 
 SELECT * FROM ttr1 TABLESAMPLE BERNOULLI (50) REPEATABLE (1), ttr2 TABLESAMPLE 
BERNOULLI (50) REPEATABLE (1) WHERE ttr1.a = ttr2.a;
- a  | b | a  | b 
-----+---+----+---
- 12 | 1 | 12 | 2
- 31 | 1 | 31 | 2
-(2 rows)
+ a | b | a | b 
+---+---+---+---
+ 1 | 1 | 1 | 2
+(1 row)
 
 EXPLAIN (COSTS OFF) SELECT * FROM ttr1 TABLESAMPLE SYSTEM (50) REPEATABLE (1), 
ttr2 TABLESAMPLE SYSTEM (50) REPEATABLE (1) WHERE ttr1.a = ttr2.a;
                                    QUERY PLAN                                  
 
@@ -336,12 +329,10 @@ EXPLAIN (COSTS OFF) SELECT * FROM ttr1 TABLESAMPLE SYSTEM 
(50) REPEATABLE (1), t
 (8 rows)
 
 SELECT * FROM ttr1 TABLESAMPLE SYSTEM (50) REPEATABLE (1), ttr2 TABLESAMPLE 
SYSTEM (50) REPEATABLE (1) WHERE ttr1.a = ttr2.a;
- a  | b | a  | b 
-----+---+----+---
-  1 | 1 |  1 | 2
- 12 | 1 | 12 | 2
- 31 | 1 | 31 | 2
-(3 rows)
+ a | b | a | b 
+---+---+---+---
+ 1 | 1 | 1 | 2
+(1 row)
 
 RESET enable_hashjoin;
 RESET enable_mergejoin;
@@ -352,35 +343,34 @@ RESET enable_nestloop;
 select * from
   (values (0),(100)) v(pct),
   lateral (select count(*) from tenk1 tablesample bernoulli (pct)) ss;
- pct | count 
------+-------
-   0 |     0
- 100 | 10000
-(2 rows)
-
+ERROR:  illegal rescan of motion node: invalid plan (nodeMotion.c:1367)
+HINT:  Likely caused by bad NL-join, try setting enable_nestloop to off
 select * from
   (values (0),(100)) v(pct),
   lateral (select count(*) from tenk1 tablesample system (pct)) ss;
- pct | count 
------+-------
-   0 |     0
- 100 | 10000
-(2 rows)
-
+ERROR:  illegal rescan of motion node: invalid plan (nodeMotion.c:1367)
+HINT:  Likely caused by bad NL-join, try setting enable_nestloop to off
 explain (costs off)
 select pct, count(unique1) from
   (values (0),(100)) v(pct),
   lateral (select * from tenk1 tablesample bernoulli (pct)) ss
   group by pct;
-                       QUERY PLAN                       
---------------------------------------------------------
- HashAggregate
+                                QUERY PLAN                                
+--------------------------------------------------------------------------
+ Finalize GroupAggregate
    Group Key: "*VALUES*".column1
-   ->  Nested Loop
-         ->  Values Scan on "*VALUES*"
-         ->  Sample Scan on tenk1
-               Sampling: bernoulli ("*VALUES*".column1)
-(6 rows)
+   ->  Gather Motion 3:1  (slice1; segments: 3)
+         Merge Key: "*VALUES*".column1
+         ->  Partial GroupAggregate
+               Group Key: "*VALUES*".column1
+               ->  Sort
+                     Sort Key: "*VALUES*".column1
+                     ->  Nested Loop
+                           ->  Values Scan on "*VALUES*"
+                           ->  Sample Scan on tenk1
+                                 Sampling: bernoulli ("*VALUES*".column1)
+ Optimizer: Postgres query optimizer
+(13 rows)
 
 select pct, count(unique1) from
   (values (0),(100)) v(pct),
diff --git a/contrib/pax_storage/src/test/regress/expected/tsearch.out 
b/contrib/pax_storage/src/test/regress/expected/tsearch.out
index aa44021e4e4..eaa43a0ebf2 100644
--- a/contrib/pax_storage/src/test/regress/expected/tsearch.out
+++ b/contrib/pax_storage/src/test/regress/expected/tsearch.out
@@ -212,17 +212,18 @@ SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D';
 (1 row)
 
 create index wowidx on test_tsvector using gist (a);
+ERROR:  pax only support btree/hash/gin/bitmap indexes 
(pax_access_handle.cc:591)
 SET enable_seqscan=OFF;
 SET enable_indexscan=ON;
 SET enable_bitmapscan=OFF;
 explain (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh';
-                            QUERY PLAN                             
--------------------------------------------------------------------
+                          QUERY PLAN                           
+---------------------------------------------------------------
  Finalize Aggregate
    ->  Gather Motion 3:1  (slice1; segments: 3)
          ->  Partial Aggregate
-               ->  Index Scan using wowidx on test_tsvector
-                     Index Cond: (a @@ '''wr'' | ''qh'''::tsquery)
+               ->  Seq Scan on test_tsvector
+                     Filter: (a @@ '''wr'' | ''qh'''::tsquery)
  Optimizer: Postgres query optimizer
 (6 rows)
 
@@ -373,17 +374,15 @@ SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D';
 SET enable_indexscan=OFF;
 SET enable_bitmapscan=ON;
 explain (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh';
-                               QUERY PLAN                                
--------------------------------------------------------------------------
+                          QUERY PLAN                           
+---------------------------------------------------------------
  Finalize Aggregate
    ->  Gather Motion 3:1  (slice1; segments: 3)
          ->  Partial Aggregate
-               ->  Bitmap Heap Scan on test_tsvector
-                     Recheck Cond: (a @@ '''wr'' | ''qh'''::tsquery)
-                     ->  Bitmap Index Scan on wowidx
-                           Index Cond: (a @@ '''wr'' | ''qh'''::tsquery)
+               ->  Seq Scan on test_tsvector
+                     Filter: (a @@ '''wr'' | ''qh'''::tsquery)
  Optimizer: Postgres query optimizer
-(8 rows)
+(6 rows)
 
 SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh';
  count 
@@ -543,29 +542,26 @@ ERROR:  unrecognized parameter "foo"
 CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(siglen=100, 
siglen = 200));
 ERROR:  parameter "siglen" specified more than once
 CREATE INDEX wowidx2 ON test_tsvector USING gist (a tsvector_ops(siglen=1));
+ERROR:  pax only support btree/hash/gin/bitmap indexes 
(pax_access_handle.cc:591)
 \d test_tsvector
             Table "public.test_tsvector"
  Column |   Type   | Collation | Nullable | Default 
 --------+----------+-----------+----------+---------
  t      | text     |           |          | 
  a      | tsvector |           |          | 
-Indexes:
-    "wowidx" gist (a)
-    "wowidx2" gist (a tsvector_ops (siglen='1'))
+Distributed by: (t)
 
-DROP INDEX wowidx;
+-- DROP INDEX wowidx;
 EXPLAIN (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh';
-                               QUERY PLAN                                
--------------------------------------------------------------------------
+                          QUERY PLAN                           
+---------------------------------------------------------------
  Finalize Aggregate
    ->  Gather Motion 3:1  (slice1; segments: 3)
          ->  Partial Aggregate
-               ->  Bitmap Heap Scan on test_tsvector
-                     Recheck Cond: (a @@ '''wr'' | ''qh'''::tsquery)
-                     ->  Bitmap Index Scan on wowidx2
-                           Index Cond: (a @@ '''wr'' | ''qh'''::tsquery)
+               ->  Seq Scan on test_tsvector
+                     Filter: (a @@ '''wr'' | ''qh'''::tsquery)
  Optimizer: Postgres query optimizer
-(8 rows)
+(6 rows)
 
 SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh';
  count 
@@ -711,8 +707,9 @@ SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D';
    450
 (1 row)
 
-DROP INDEX wowidx2;
+-- DROP INDEX wowidx2;
 CREATE INDEX wowidx ON test_tsvector USING gist (a tsvector_ops(siglen=484));
+ERROR:  pax only support btree/hash/gin/bitmap indexes 
(pax_access_handle.cc:591)
 \d test_tsvector
             Table "public.test_tsvector"
  Column |   Type   | Collation | Nullable | Default 
@@ -728,10 +725,8 @@ EXPLAIN (costs off) SELECT count(*) FROM test_tsvector 
WHERE a @@ 'wr|qh';
  Finalize Aggregate
    ->  Gather Motion 3:1  (slice1; segments: 3)
          ->  Partial Aggregate
-               ->  Bitmap Heap Scan on test_tsvector
-                     Recheck Cond: (a @@ '''wr'' | ''qh'''::tsquery)
-                     ->  Bitmap Index Scan on wowidx
-                           Index Cond: (a @@ '''wr'' | ''qh'''::tsquery)
+               ->  Seq Scan on test_tsvector
+                     Filter: (a @@ '''wr'' | ''qh'''::tsquery)
  Optimizer: Postgres query optimizer
 (8 rows)
 
@@ -882,7 +877,7 @@ SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D';
 RESET enable_seqscan;
 RESET enable_indexscan;
 RESET enable_bitmapscan;
-DROP INDEX wowidx;
+-- DROP INDEX wowidx;
 CREATE INDEX wowidx ON test_tsvector USING gin (a);
 SET enable_seqscan=OFF;
 -- GIN only supports bitmapscan, so no need to test plain indexscan
@@ -1107,7 +1102,6 @@ SELECT * FROM ts_stat('SELECT a FROM test_tsvector', 
'AB') ORDER BY ndoc DESC, n
  DFG  |    1 |      2
 (1 row)
 
-DROP INDEX wowidx;
 --dictionaries and to_tsvector
 SELECT ts_lexize('english_stem', 'skies');
  ts_lexize 
@@ -2385,6 +2379,7 @@ SELECT ts_rewrite( query, 'SELECT keyword, sample FROM 
test_tsquery' ) FROM to_t
 (1 row)
 
 CREATE INDEX qq ON test_tsquery USING gist (keyword tsquery_ops);
+ERROR:  pax only support btree/hash/gin/bitmap indexes 
(pax_access_handle.cc:591)
 SET enable_seqscan=OFF;
 SELECT keyword FROM test_tsquery WHERE keyword @> 'new';
      keyword      
@@ -2513,6 +2508,7 @@ alter table test_tsvector set distributed by (distkey);
 CREATE TRIGGER tsvectorupdate
 BEFORE UPDATE OR INSERT ON test_tsvector
 FOR EACH ROW EXECUTE PROCEDURE tsvector_update_trigger(a, 
'pg_catalog.english', t);
+ERROR:  ON UPDATE triggers are not supported on append-only tables
 SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty');
  count 
 -------
@@ -2523,7 +2519,7 @@ INSERT INTO test_tsvector (t) VALUES ('345 qwerty');
 SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty');
  count 
 -------
-     1
+     0
 (1 row)
 
 UPDATE test_tsvector SET t = null WHERE t = '345 qwerty';
@@ -2537,7 +2533,7 @@ INSERT INTO test_tsvector (t) VALUES ('345 qwerty');
 SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty');
  count 
 -------
-     1
+     0
 (1 row)
 
 -- Test inlining of immutable constant functions
diff --git a/contrib/pax_storage/src/test/regress/expected/update.out 
b/contrib/pax_storage/src/test/regress/expected/update.out
index 0e50a41e441..6e841db2fb6 100644
--- a/contrib/pax_storage/src/test/regress/expected/update.out
+++ b/contrib/pax_storage/src/test/regress/expected/update.out
@@ -970,6 +970,7 @@ SELECT tableoid::regclass::text, * FROM list_parted ORDER 
BY 1, 2, 3, 4;
 ------------+---+----+----
  list_part1 | 2 | 52 | 50
  list_part1 | 3 |  6 | 60
+ sub_part1  | 1 |  1 | 70
  sub_part2  | 1 |  2 | 10
 (4 rows)
 
diff --git a/contrib/pax_storage/src/test/regress/expected/update_gp.out 
b/contrib/pax_storage/src/test/regress/expected/update_gp.out
index 2dc6986a891..a30c482a0f1 100644
--- a/contrib/pax_storage/src/test/regress/expected/update_gp.out
+++ b/contrib/pax_storage/src/test/regress/expected/update_gp.out
@@ -104,41 +104,36 @@ EXPLAIN (COSTS OFF) UPDATE keo1 SET 
user_vie_act_cntr_marg_cum = 234.682 FROM
                 (SELECT min (keo4.keo_para_budget_date) FROM keo4)))
     ) t1
 WHERE t1.user_vie_project_code_pk = keo1.user_vie_project_code_pk;
-                                                 QUERY PLAN                    
                              
--------------------------------------------------------------------------------------------------------------
+                                              QUERY PLAN                       
                        
+-------------------------------------------------------------------------------------------------------
  Update on keo1
-   InitPlan 3 (returns $2)  (slice5)
+   InitPlan 3 (returns $2)  (slice3)
      ->  Aggregate
-           InitPlan 2 (returns $1)  (slice7)
-             ->  Gather Motion 3:1  (slice8; segments: 3)
-                   InitPlan 1 (returns $0)  (slice9)
+           InitPlan 2 (returns $1)  (slice5)
+             ->  Gather Motion 3:1  (slice6; segments: 3)
+                   InitPlan 1 (returns $0)  (slice7)
                      ->  Aggregate
-                           ->  Gather Motion 3:1  (slice10; segments: 3)
+                           ->  Gather Motion 3:1  (slice8; segments: 3)
                                  ->  Seq Scan on keo4
                    ->  Seq Scan on keo4 keo4_1
                          Filter: ((keo_para_budget_date)::text = $0)
-           ->  Gather Motion 3:1  (slice6; segments: 3)
+           ->  Gather Motion 3:1  (slice4; segments: 3)
                  ->  Seq Scan on keo3
                        Filter: ((bky_per)::text = ($1)::text)
-   ->  Explicit Redistribute Motion 3:3  (slice1; segments: 3)
+   ->  Hash Join
+         Hash Cond: ((a.user_vie_project_code_pk)::text = 
(b.projects_pk)::text)
          ->  Hash Join
-               Hash Cond: ((a.user_vie_project_code_pk)::text = 
(b.projects_pk)::text)
-               ->  Hash Join
-                     Hash Cond: ((a.user_vie_project_code_pk)::text = 
(keo1.user_vie_project_code_pk)::text)
-                     ->  Redistribute Motion 3:3  (slice2; segments: 3)
-                           Hash Key: a.user_vie_project_code_pk
-                           ->  Seq Scan on keo1 a
-                                 Filter: 
((user_vie_fiscal_year_period_sk)::text = $2)
-                     ->  Hash
-                           ->  Redistribute Motion 3:3  (slice3; segments: 3)
-                                 Hash Key: keo1.user_vie_project_code_pk
-                                 ->  Seq Scan on keo1
+               Hash Cond: ((a.user_vie_project_code_pk)::text = 
(keo1.user_vie_project_code_pk)::text)
+               ->  Broadcast Motion 3:3  (slice1; segments: 3)
+                     ->  Seq Scan on keo1 a
+                           Filter: ((user_vie_fiscal_year_period_sk)::text = 
$2)
                ->  Hash
-                     ->  Redistribute Motion 3:3  (slice4; segments: 3)
-                           Hash Key: b.projects_pk
-                           ->  Seq Scan on keo2 b
+                     ->  Seq Scan on keo1
+         ->  Hash
+               ->  Broadcast Motion 3:3  (slice2; segments: 3)
+                     ->  Seq Scan on keo2 b
  Optimizer: Postgres query optimizer
-(32 rows)
+(27 rows)
 
 UPDATE keo1 SET user_vie_act_cntr_marg_cum = 234.682 FROM
     ( SELECT a.user_vie_project_code_pk FROM keo1 a INNER JOIN keo2 b
@@ -691,16 +686,16 @@ analyze tsplit_entry;
 explain update tsplit_entry set c = s.a from (select count(*) as a from 
gp_segment_configuration) s;
                                                     QUERY PLAN                 
                                   
 
------------------------------------------------------------------------------------------------------------------
- Update on tsplit_entry  (cost=10000000001.00..10000000003.18 rows=3 width=54)
-   ->  Explicit Redistribute Motion 1:3  (slice)  
(cost=10000000001.00..10000000003.18 rows=7 width=54)
-         ->  Split  (cost=10000000001.00..10000000003.18 rows=7 width=54)
-               ->  Nested Loop  (cost=10000000001.00..10000000003.12 rows=4 
width=54)
-                     ->  Gather Motion 3:1  (slice2; segments: 3)  
(cost=0.00..2.06 rows=2 width=14)
-                           ->  Seq Scan on tsplit_entry  (cost=0.00..2.02 
rows=1 width=14)
-                     ->  Materialize  (cost=1.00..1.03 rows=1 width=40)
-                           ->  Subquery Scan on s  (cost=1.00..1.02 rows=1 
width=40)
-                                 ->  Aggregate  (cost=1.00..1.01 rows=1 
width=8)
-                                       ->  Function Scan on 
gp_get_segment_configuration  (cost=0.00..1.00 rows=1 width=0)
+ Update on tsplit_entry  (cost=10000000001.10..10000000002.28 rows=0 width=0)
+   ->  Explicit Redistribute Motion 1:3  (slice1)  
(cost=10000000001.10..10000000002.28 rows=2 width=74)
+         ->  Split Update  (cost=10000000001.10..10000000002.20 rows=7 
width=74)
+               ->  Nested Loop  (cost=10000000001.10..10000000002.20 rows=3 
width=74)
+                     ->  Gather Motion 3:1  (slice2; segments: 3)  
(cost=0.00..1.04 rows=2 width=38)
+                           ->  Seq Scan on tsplit_entry  (cost=0.00..1.01 
rows=1 width=38)
+                     ->  Materialize  (cost=1.10..1.12 rows=1 width=40)
+                           ->  Subquery Scan on s  (cost=1.10..1.12 rows=1 
width=40)
+                                 ->  Aggregate  (cost=1.10..1.11 rows=1 
width=8)
+                                       ->  Seq Scan on 
gp_segment_configuration  (cost=0.00..1.08 rows=8 width=0)
  Optimizer: Postgres query optimizer
 (11 rows)
 -- end_ignore
@@ -757,6 +752,7 @@ create table t_insert_on_conflict_update_distkey(a int, b 
int) distributed repli
 create unique index uidx_t_insert_on_conflict_update_distkey on 
t_insert_on_conflict_update_distkey(a, b);
 -- the following statement should succeed because replicated table does not 
contain distkey
 insert into t_insert_on_conflict_update_distkey values (1, 1) on conflict(a, 
b) do update set a = 1;
+ERROR:  not implemented yet on pax relations: TupleInsertSpeculative
 -- Some tests on a partitioned table.
 CREATE TABLE update_gp_rangep (a int, b int, orig_a int) DISTRIBUTED BY (b) 
PARTITION BY RANGE (a);
 CREATE TABLE update_gp_rangep_1_to_10  PARTITION OF update_gp_rangep FOR 
VALUES FROM  (1) TO (10);
diff --git a/contrib/pax_storage/src/test/regress/sql/privileges.sql 
b/contrib/pax_storage/src/test/regress/sql/privileges.sql
index fd97df969af..de4d2acbac9 100644
--- a/contrib/pax_storage/src/test/regress/sql/privileges.sql
+++ b/contrib/pax_storage/src/test/regress/sql/privileges.sql
@@ -326,7 +326,6 @@ CREATE TABLE atest12 as
 CREATE INDEX ON atest12 (a);
 CREATE INDEX ON atest12 (abs(a));
 -- results below depend on having quite accurate stats for atest12, so...
-ALTER TABLE atest12 SET (autovacuum_enabled = off);
 SET default_statistics_target = 10000;
 VACUUM ANALYZE atest12;
 RESET default_statistics_target;
diff --git a/contrib/pax_storage/src/test/regress/sql/tsearch.sql 
b/contrib/pax_storage/src/test/regress/sql/tsearch.sql
index d1bed291453..6e81b043fd8 100644
--- a/contrib/pax_storage/src/test/regress/sql/tsearch.sql
+++ b/contrib/pax_storage/src/test/regress/sql/tsearch.sql
@@ -270,6 +270,7 @@ INSERT INTO test_tsvector VALUES ('???', 'DFG:1A,2B,6C,10 
FGH');
 SELECT * FROM ts_stat('SELECT a FROM test_tsvector') ORDER BY ndoc DESC, 
nentry DESC, word LIMIT 10;
 SELECT * FROM ts_stat('SELECT a FROM test_tsvector', 'AB') ORDER BY ndoc DESC, 
nentry DESC, word;
 
+DROP INDEX wowidx;
 --dictionaries and to_tsvector
 
 SELECT ts_lexize('english_stem', 'skies');


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to