This is an automated email from the ASF dual-hosted git repository.

chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git


The following commit(s) were added to refs/heads/cbdb-postgres-merge by this 
push:
     new 8dc719e3331 Fix some answer files
8dc719e3331 is described below

commit 8dc719e3331b0d1ef7d16b18e850fe122e54397d
Author: Jinbao Chen <[email protected]>
AuthorDate: Sat Jan 3 22:15:39 2026 +0800

    Fix some answer files
---
 src/test/regress/expected/alter_table_gp.out      |  2 +-
 src/test/regress/expected/bfv_cte.out             |  4 ++
 src/test/regress/expected/bfv_joins.out           |  1 +
 src/test/regress/expected/bfv_partition_plans.out | 38 +++++------
 src/test/regress/expected/catcache.out            | 10 +--
 src/test/regress/expected/gp_recursive_cte.out    |  4 +-
 src/test/regress/expected/oid_consistency.out     |  4 +-
 src/test/regress/expected/partition_pruning.out   | 17 ++---
 src/test/regress/expected/qp_join_universal.out   | 78 +++++++++++------------
 src/test/regress/expected/qp_rowsecurity.out      |  4 +-
 10 files changed, 82 insertions(+), 80 deletions(-)

diff --git a/src/test/regress/expected/alter_table_gp.out 
b/src/test/regress/expected/alter_table_gp.out
index 371d72f5a55..ffeb3b71be4 100644
--- a/src/test/regress/expected/alter_table_gp.out
+++ b/src/test/regress/expected/alter_table_gp.out
@@ -266,7 +266,7 @@ ANALYZE gp_test_fast_def;
 SELECT COUNT (DISTINCT ts) FROM gp_test_fast_def;
  count 
 -------
-     1
+     3
 (1 row)
 
 -- Create view with JOIN clause, drop column, check select to view not causing 
segfault
diff --git a/src/test/regress/expected/bfv_cte.out 
b/src/test/regress/expected/bfv_cte.out
index eb1592affd7..2d2dc5ccb20 100644
--- a/src/test/regress/expected/bfv_cte.out
+++ b/src/test/regress/expected/bfv_cte.out
@@ -492,6 +492,9 @@ select wait_until_query_output_to_file('/tmp/bfv_cte.out');
 -- end_matchsubs
 -- Filter out irrelevant LOG messages from segments other than seg2.
 \! cat /tmp/bfv_cte.out | grep -P '^(?!LOG)|^(LOG.*seg2)' | grep -vP 
'LOG.*fault|decreased xslice state refcount'
+SET
+SET
+SET
 LOG:  SISC (shareid=0, slice=2): initialized xslice state  (seg2 slice2 
127.0.1.1:7004 pid=1049102)
 LOG:  SISC READER (shareid=0, slice=2): wrote notify_done  (seg2 slice2 
127.0.1.1:7004 pid=1049102)
 LOG:  SISC WRITER (shareid=0, slice=4): initializing because squelched  (seg2 
slice4 127.0.1.1:7004 pid=1049114)
@@ -499,6 +502,7 @@ LOG:  SISC WRITER (shareid=0, slice=4): No tuplestore yet, 
creating tuplestore
 LOG:  SISC WRITER (shareid=0, slice=4): wrote notify_ready  (seg2 slice4 
127.0.1.1:7004 pid=1049114)
 LOG:  SISC WRITER (shareid=0, slice=4): got DONE message from 1 readers  (seg2 
slice4 127.0.1.1:7004 pid=1049114)
 LOG:  SISC (shareid=0, slice=4): removed xslice state  (seg2 slice4 
127.0.1.1:7004 pid=1049114)
+SET
  a | a 
 ---+---
  1 | 2
diff --git a/src/test/regress/expected/bfv_joins.out 
b/src/test/regress/expected/bfv_joins.out
index 3eab4b55fc1..31515680256 100644
--- a/src/test/regress/expected/bfv_joins.out
+++ b/src/test/regress/expected/bfv_joins.out
@@ -3511,6 +3511,7 @@ explain (costs off) select * from b, lateral (select * 
from a, c where b.i = a.i
 ---------------------------------------------------------------------------
  Gather Motion 3:1  (slice1; segments: 3)
    ->  Nested Loop
+         Join Filter: ((a.i + b.i) = c.j)
          ->  Broadcast Motion 3:3  (slice2; segments: 3)
                ->  Seq Scan on b
          ->  Materialize
diff --git a/src/test/regress/expected/bfv_partition_plans.out 
b/src/test/regress/expected/bfv_partition_plans.out
index 9b01daeea10..386491ea236 100644
--- a/src/test/regress/expected/bfv_partition_plans.out
+++ b/src/test/regress/expected/bfv_partition_plans.out
@@ -865,28 +865,28 @@ EVERY ('2 mons'::interval)
 NOTICE:  Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 
'oid' as the Apache Cloudberry data distribution key for this table.
 HINT:  The 'DISTRIBUTED BY' clause determines the distribution of data. Make 
sure column(s) chosen are the optimal data distribution key to minimize skew.
 explain analyze select a.* from mpp8031 a, mpp8031 b where a.oid = b.oid;
-                                                              QUERY PLAN       
                                                        
----------------------------------------------------------------------------------------------------------------------------------------
- Gather Motion 3:1  (slice1; segments: 3)  (cost=2743.00..2475974.00 
rows=80883360 width=16) (actual time=0.807..0.807 rows=0 loops=1)
-   ->  Hash Join  (cost=2743.00..1397529.20 rows=26961120 width=16) (never 
executed)
+                                                                QUERY PLAN     
                                                           
+------------------------------------------------------------------------------------------------------------------------------------------
+ Gather Motion 3:1  (slice1; segments: 3)  (cost=2743.00..2475974.00 
rows=80883360 width=16) (actual time=0.707..0.709 rows=0 loops=1)
+   ->  Hash Join  (cost=2743.00..1397529.20 rows=26961120 width=16) (actual 
time=0.024..0.028 rows=0 loops=1)
          Hash Cond: (a.oid = b.oid)
-         ->  Append  (cost=0.00..1558.00 rows=94800 width=16) (never executed)
-               ->  Seq Scan on mpp8031_1_prt_foo_1 a_1  (cost=0.00..271.00 
rows=23700 width=16) (never executed)
-               ->  Seq Scan on mpp8031_1_prt_2 a_2  (cost=0.00..271.00 
rows=23700 width=16) (never executed)
-               ->  Seq Scan on mpp8031_1_prt_3 a_3  (cost=0.00..271.00 
rows=23700 width=16) (never executed)
-               ->  Seq Scan on mpp8031_1_prt_4 a_4  (cost=0.00..271.00 
rows=23700 width=16) (never executed)
+         ->  Append  (cost=0.00..1558.00 rows=94800 width=16) (actual 
time=0.021..0.023 rows=0 loops=1)
+               ->  Seq Scan on mpp8031_1_prt_foo_1 a_1  (cost=0.00..271.00 
rows=23700 width=16) (actual time=0.010..0.010 rows=0 loops=1)
+               ->  Seq Scan on mpp8031_1_prt_2 a_2  (cost=0.00..271.00 
rows=23700 width=16) (actual time=0.003..0.004 rows=0 loops=1)
+               ->  Seq Scan on mpp8031_1_prt_3 a_3  (cost=0.00..271.00 
rows=23700 width=16) (actual time=0.003..0.003 rows=0 loops=1)
+               ->  Seq Scan on mpp8031_1_prt_4 a_4  (cost=0.00..271.00 
rows=23700 width=16) (actual time=0.003..0.003 rows=0 loops=1)
          ->  Hash  (cost=1558.00..1558.00 rows=94800 width=4) (never executed)
                ->  Append  (cost=0.00..1558.00 rows=94800 width=4) (never 
executed)
                      ->  Seq Scan on mpp8031_1_prt_foo_1 b_1  
(cost=0.00..271.00 rows=23700 width=4) (never executed)
                      ->  Seq Scan on mpp8031_1_prt_2 b_2  (cost=0.00..271.00 
rows=23700 width=4) (never executed)
                      ->  Seq Scan on mpp8031_1_prt_3 b_3  (cost=0.00..271.00 
rows=23700 width=4) (never executed)
                      ->  Seq Scan on mpp8031_1_prt_4 b_4  (cost=0.00..271.00 
rows=23700 width=4) (never executed)
- Planning Time: 2.439 ms
-   (slice0)    Executor memory: 51K bytes.
-   (slice1)    Executor memory: 57K bytes avg x 3 workers, 57K bytes max 
(seg0).
+ Planning Time: 2.080 ms
+   (slice0)    Executor memory: 122K bytes.
+   (slice1)    Executor memory: 121K bytes avg x 3x(0) workers, 121K bytes max 
(seg0).
  Memory used:  128000kB
  Optimizer: Postgres query optimizer
- Execution Time: 1.481 ms
+ Execution Time: 1.279 ms
 (20 rows)
 
 drop table mpp8031;
@@ -1236,11 +1236,11 @@ EXPLAIN (COSTS OFF, TIMING OFF, SUMMARY OFF, ANALYZE) 
DELETE FROM delete_from_pt
    ->  Hash Semi Join (actual rows=1 loops=1)
          Hash Cond: (delete_from_pt.b = t.a)
          Extra Text: (seg0)   Hash chain length 2.0 avg, 2 max, using 1 of 
131072 buckets.
-         ->  Append (actual rows=3 loops=1)
+         ->  Append (actual rows=5 loops=1)
                Partition Selectors: $1
                ->  Seq Scan on delete_from_pt_1_prt_1 delete_from_pt_2 (actual 
rows=3 loops=1)
-               ->  Seq Scan on delete_from_pt_1_prt_2 delete_from_pt_3 (never 
executed)
-               ->  Seq Scan on delete_from_pt_1_prt_3 delete_from_pt_4 (never 
executed)
+               ->  Seq Scan on delete_from_pt_1_prt_2 delete_from_pt_3 (actual 
rows=3 loops=1)
+               ->  Seq Scan on delete_from_pt_1_prt_3 delete_from_pt_4 (actual 
rows=0 loops=1)
          ->  Hash (actual rows=2 loops=1)
                Buckets: 131072  Batches: 1  Memory Usage: 1025kB
                ->  Partition Selector (selector id: $1) (actual rows=2 loops=1)
@@ -1248,11 +1248,11 @@ EXPLAIN (COSTS OFF, TIMING OFF, SUMMARY OFF, ANALYZE) 
DELETE FROM delete_from_pt
                            ->  Hash Join (actual rows=1 loops=1)
                                  Hash Cond: (delete_from_pt_1.b = t.a)
                                  Extra Text: (seg0)   Hash chain length 1.0 
avg, 1 max, using 1 of 262144 buckets.
-                                 ->  Append (actual rows=3 loops=1)
+                                 ->  Append (actual rows=5 loops=1)
                                        Partition Selectors: $2
                                        ->  Seq Scan on delete_from_pt_1_prt_1 
delete_from_pt_5 (actual rows=3 loops=1)
-                                       ->  Seq Scan on delete_from_pt_1_prt_2 
delete_from_pt_6 (never executed)
-                                       ->  Seq Scan on delete_from_pt_1_prt_3 
delete_from_pt_7 (never executed)
+                                       ->  Seq Scan on delete_from_pt_1_prt_2 
delete_from_pt_6 (actual rows=3 loops=1)
+                                       ->  Seq Scan on delete_from_pt_1_prt_3 
delete_from_pt_7 (actual rows=0 loops=1)
                                  ->  Hash (actual rows=1 loops=1)
                                        Buckets: 262144  Batches: 1  Memory 
Usage: 2049kB
                                        ->  Partition Selector (selector id: 
$2) (actual rows=1 loops=1)
diff --git a/src/test/regress/expected/catcache.out 
b/src/test/regress/expected/catcache.out
index 79c41c8fa61..dd587fa8c42 100644
--- a/src/test/regress/expected/catcache.out
+++ b/src/test/regress/expected/catcache.out
@@ -41,11 +41,11 @@ explain update dml_14027_union_s set a = (select null union 
select null)::numeri
                  ->  Append  (cost=0.00..0.05 rows=2 width=32)
                        ->  Result  (cost=0.00..0.01 rows=1 width=32)
                        ->  Result  (cost=0.00..0.01 rows=1 width=32)
-   ->  Explicit Redistribute Motion 3:3  (slice1; segments: 3)  
(cost=0.00..2134.67 rows=66133 width=50)
-         ->  Split  (cost=0.00..812.00 rows=66133 width=50)
-               ->  Append  (cost=0.00..812.00 rows=33067 width=50)
-                     ->  Seq Scan on dml_14027_union_s_1_prt_2 
dml_14027_union_s_1  (cost=0.00..323.33 rows=16533 width=50)
-                     ->  Seq Scan on dml_14027_union_s_1_prt_def 
dml_14027_union_s_2  (cost=0.00..323.33 rows=16533 width=50)
+   ->  Explicit Redistribute Motion 3:3  (slice1; segments: 3)  
(cost=0.00..2.12 rows=4 width=36)
+         ->  Split Update  (cost=0.00..2.04 rows=4 width=36)
+               ->  Append  (cost=0.00..2.04 rows=2 width=36)
+                     ->  Seq Scan on dml_14027_union_s_1_prt_2 
dml_14027_union_s_1  (cost=0.00..1.02 rows=1 width=36)
+                     ->  Seq Scan on dml_14027_union_s_1_prt_def 
dml_14027_union_s_2  (cost=0.00..1.02 rows=1 width=36)
  Optimizer: Postgres query optimizer
 (17 rows)
 
diff --git a/src/test/regress/expected/gp_recursive_cte.out 
b/src/test/regress/expected/gp_recursive_cte.out
index ecf5c26e03a..4c7c325a42b 100644
--- a/src/test/regress/expected/gp_recursive_cte.out
+++ b/src/test/regress/expected/gp_recursive_cte.out
@@ -594,8 +594,8 @@ with recursive the_cte_here(n) as (
 select * from the_cte_here;
                                           QUERY PLAN                           
               
 
----------------------------------------------------------------------------------------------
- Recursive Union  (cost=1.02..15.27 rows=34 width=4)
-   ->  Gather Motion 1:1  (slice1; segments: 1)  (cost=1.02..1.02 rows=1 
width=4)
+ Recursive Union  (cost=1.01..14.93 rows=34 width=4)
+   ->  Gather Motion 1:1  (slice1; segments: 1)  (cost=1.01..1.01 rows=1 
width=4)
          ->  Seq Scan on t_rep_test_rcte  (cost=0.00..1.01 rows=1 width=4)
    ->  Hash Join  (cost=1.09..1.36 rows=3 width=4)
          Hash Cond: (the_cte_here.n = t_rand_test_rcte.c)
diff --git a/src/test/regress/expected/oid_consistency.out 
b/src/test/regress/expected/oid_consistency.out
index 9a15834650a..1672c686b3b 100644
--- a/src/test/regress/expected/oid_consistency.out
+++ b/src/test/regress/expected/oid_consistency.out
@@ -762,10 +762,12 @@ CREATE table oid_consistency_tt1 (a int) distributed by 
(a);
 CREATE table oid_consistency_tt2 (a int) distributed by (a);
 CREATE rule "_RETURN" as on select to oid_consistency_tt1
         do instead select * from oid_consistency_tt2;
+ERROR:  relation "oid_consistency_tt1" cannot have ON SELECT rules
+DETAIL:  This operation is not supported for tables.
 select verify('oid_consistency_tt1');
  verify 
 --------
-      1
+      0
 (1 row)
 
 --
diff --git a/src/test/regress/expected/partition_pruning.out 
b/src/test/regress/expected/partition_pruning.out
index 888cf416687..38ed06f443a 100644
--- a/src/test/regress/expected/partition_pruning.out
+++ b/src/test/regress/expected/partition_pruning.out
@@ -488,8 +488,7 @@ EXPLAIN SELECT * FROM pt_lt_tab WHERE col2 <> 10 ORDER BY 
col2,col3 LIMIT 5;
                ->  Incremental Sort  (cost=135.46..2022.12 rows=15 width=12)
                      Sort Key: pt_lt_tab.col2, pt_lt_tab.col3
                      Presorted Key: pt_lt_tab.col2
-                     ->  Merge Append  (cost=0.73..2021.44 rows=15 width=12)
-                           Sort Key: pt_lt_tab.col2
+                     ->  Append  (cost=0.68..2021.21 rows=15 width=12)
                            ->  Index Scan using pt_lt_tab_1_prt_part1_col2_idx 
on pt_lt_tab_1_prt_part1 pt_lt_tab_1  (cost=0.14..404.23 rows=3 width=12)
                                  Filter: (col2 <> '10'::numeric)
                            ->  Index Scan using pt_lt_tab_1_prt_part2_col2_idx 
on pt_lt_tab_1_prt_part2 pt_lt_tab_2  (cost=0.14..404.23 rows=3 width=12)
@@ -501,7 +500,7 @@ EXPLAIN SELECT * FROM pt_lt_tab WHERE col2 <> 10 ORDER BY 
col2,col3 LIMIT 5;
                            ->  Index Scan using pt_lt_tab_1_prt_part5_col2_idx 
on pt_lt_tab_1_prt_part5 pt_lt_tab_5  (cost=0.14..404.23 rows=3 width=12)
                                  Filter: (col2 <> '10'::numeric)
  Optimizer: Postgres query optimizer
-(20 rows)
+(19 rows)
 
 SELECT * FROM pt_lt_tab WHERE col2 > 10 AND col2 < 50 ORDER BY col2,col3 LIMIT 
5;
  col1 | col2 | col3 | col4 
@@ -523,8 +522,7 @@ EXPLAIN SELECT * FROM pt_lt_tab WHERE col2 > 10 AND col2 < 
50 ORDER BY col2,col3
                ->  Incremental Sort  (cost=122.12..1577.88 rows=13 width=12)
                      Sort Key: pt_lt_tab.col2, pt_lt_tab.col3
                      Presorted Key: pt_lt_tab.col2
-                     ->  Merge Append  (cost=0.58..1577.29 rows=13 width=12)
-                           Sort Key: pt_lt_tab.col2
+                     ->  Append  (cost=0.54..1577.12 rows=13 width=12)
                            ->  Index Scan using pt_lt_tab_1_prt_part2_col2_idx 
on pt_lt_tab_1_prt_part2 pt_lt_tab_1  (cost=0.14..404.27 rows=3 width=12)
                                  Index Cond: ((col2 > '10'::numeric) AND (col2 
< '50'::numeric))
                            ->  Index Scan using pt_lt_tab_1_prt_part3_col2_idx 
on pt_lt_tab_1_prt_part3 pt_lt_tab_2  (cost=0.14..404.27 rows=3 width=12)
@@ -556,8 +554,7 @@ EXPLAIN SELECT * FROM pt_lt_tab WHERE col2 > 10 OR col2 = 
50 ORDER BY col2,col3
                ->  Incremental Sort  (cost=121.96..1617.78 rows=13 width=12)
                      Sort Key: pt_lt_tab.col2, pt_lt_tab.col3
                      Presorted Key: pt_lt_tab.col2
-                     ->  Merge Append  (cost=0.58..1617.18 rows=13 width=12)
-                           Sort Key: pt_lt_tab.col2
+                     ->  Append  (cost=0.54..1617.01 rows=13 width=12)
                            ->  Index Scan using pt_lt_tab_1_prt_part2_col2_idx 
on pt_lt_tab_1_prt_part2 pt_lt_tab_1  (cost=0.14..404.24 rows=3 width=12)
                                  Filter: ((col2 > '10'::numeric) OR (col2 = 
'50'::numeric))
                            ->  Index Scan using pt_lt_tab_1_prt_part3_col2_idx 
on pt_lt_tab_1_prt_part3 pt_lt_tab_2  (cost=0.14..404.24 rows=3 width=12)
@@ -589,8 +586,7 @@ EXPLAIN SELECT * FROM pt_lt_tab WHERE col2 between 10 AND 
50 ORDER BY col2,col3
                ->  Incremental Sort  (cost=116.85..1662.17 rows=14 width=12)
                      Sort Key: pt_lt_tab.col2, pt_lt_tab.col3
                      Presorted Key: pt_lt_tab.col2
-                     ->  Merge Append  (cost=0.73..1661.52 rows=14 width=12)
-                           Sort Key: pt_lt_tab.col2
+                     ->  Append  (cost=0.68..1661.30 rows=14 width=12)
                            ->  Index Scan using pt_lt_tab_1_prt_part1_col2_idx 
on pt_lt_tab_1_prt_part1 pt_lt_tab_1  (cost=0.14..44.15 rows=1 width=12)
                                  Index Cond: ((col2 >= '10'::numeric) AND 
(col2 <= '50'::numeric))
                            ->  Index Scan using pt_lt_tab_1_prt_part2_col2_idx 
on pt_lt_tab_1_prt_part2 pt_lt_tab_2  (cost=0.14..404.27 rows=3 width=12)
@@ -736,8 +732,7 @@ EXPLAIN SELECT * FROM pt_lt_tab WHERE col2 > 10 AND col1 = 
10 ORDER BY col2,col3
                ->  Incremental Sort  (cost=44.62..176.90 rows=4 width=12)
                      Sort Key: pt_lt_tab.col2, pt_lt_tab.col3
                      Presorted Key: pt_lt_tab.col2
-                     ->  Merge Append  (cost=0.58..176.72 rows=4 width=12)
-                           Sort Key: pt_lt_tab.col2
+                     ->  Append  (cost=0.54..176.64 rows=4 width=12)
                            ->  Index Scan using 
pt_lt_tab_1_prt_part2_col1_col2_idx on pt_lt_tab_1_prt_part2 pt_lt_tab_1  
(cost=0.14..44.15 rows=1 width=12)
                                  Index Cond: ((col1 = 10) AND (col2 > 
'10'::numeric))
                            ->  Index Scan using 
pt_lt_tab_1_prt_part3_col1_col2_idx on pt_lt_tab_1_prt_part3 pt_lt_tab_2  
(cost=0.14..44.15 rows=1 width=12)
diff --git a/src/test/regress/expected/qp_join_universal.out 
b/src/test/regress/expected/qp_join_universal.out
index 294b8b7735c..f44ed7ef075 100644
--- a/src/test/regress/expected/qp_join_universal.out
+++ b/src/test/regress/expected/qp_join_universal.out
@@ -221,11 +221,11 @@ explain (analyze, costs off, timing off, summary off) 
select * from part join un
    ->  Hash Join (actual rows=150 loops=1)
          Hash Cond: (part.c2 = 
(((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer))
          Extra Text: (seg0)   Hash chain length 1.0 avg, 1 max, using 7 of 
524288 buckets.
-         ->  Append (actual rows=245 loops=1)
+         ->  Append (actual rows=340 loops=1)
                Partition Selectors: $0
                ->  Seq Scan on part_1_prt_part3 part_1 (actual rows=106 
loops=1)
                ->  Seq Scan on part_1_prt_part1 part_2 (actual rows=150 
loops=1)
-               ->  Seq Scan on part_1_prt_part2 part_3 (never executed)
+               ->  Seq Scan on part_1_prt_part2 part_3 (actual rows=113 
loops=1)
          ->  Hash (actual rows=7 loops=1)
                Buckets: 524288  Batches: 1  Memory Usage: 4097kB
                ->  Partition Selector (selector id: $0) (actual rows=7 loops=1)
@@ -350,54 +350,54 @@ explain (analyze, costs off, timing off, summary off) 
select * from gen_series l
 -- it needs to be deduplicated. This is achieved by a hash
 -- filter (duplicate-sensitive hash motion).
 explain (analyze, costs off, timing off, summary off) select * from 
const_tvf(1) ct(c1) where not exists (select 1 from dist where dist.c1 = ct.c1);
-                                     QUERY PLAN                                
     
-------------------------------------------------------------------------------------
+                                        QUERY PLAN                             
           
+------------------------------------------------------------------------------------------
  Gather Motion 3:1  (slice1; segments: 3) (actual rows=0 loops=1)
-   ->  Hash Anti Join (actual rows=0 loops=1)
-         Hash Cond: (($0) = dist.c1)
-         ->  Redistribute Motion 1:3  (slice2; segments: 1) (actual rows=1 
loops=1)
-               Hash Key: ($0)
-               ->  Result (actual rows=1 loops=1)
-                     InitPlan 1 (returns $0)  (slice3)
-                       ->  Result (actual rows=1 loops=1)
-         ->  Hash (actual rows=340 loops=1)
-               Buckets: 524288  Batches: 1  Memory Usage: 4108kB
-               ->  Seq Scan on dist (actual rows=340 loops=1)
+   ->  Hash Right Anti Join (actual rows=0 loops=1)
+         Hash Cond: (dist.c1 = ($0))
+         ->  Seq Scan on dist (actual rows=322 loops=1)
+         ->  Hash (actual rows=1 loops=1)
+               Buckets: 524288  Batches: 1  Memory Usage: 4097kB
+               ->  Redistribute Motion 1:3  (slice2; segments: 1) (actual 
rows=1 loops=1)
+                     Hash Key: ($0)
+                     ->  Result (actual rows=1 loops=1)
+                           InitPlan 1 (returns $0)  (slice3)
+                             ->  Result (actual rows=1 loops=1)
  Optimizer: Postgres query optimizer
 (12 rows)
 
 explain (analyze, costs off, timing off, summary off) select * from unnest_arr 
where not exists (select 1 from dist where dist.c1 = unnest_arr.c1);
-                                         QUERY PLAN                            
              
----------------------------------------------------------------------------------------------
+                                        QUERY PLAN                             
            
+-------------------------------------------------------------------------------------------
  Gather Motion 3:1  (slice1; segments: 3) (actual rows=4 loops=1)
-   ->  Hash Anti Join (actual rows=3 loops=1)
-         Hash Cond: ((((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer) = 
dist.c1)
-         Extra Text: (seg1)   Hash chain length 1.0 avg, 1 max, using 322 of 
524288 buckets.
-         ->  Redistribute Motion 1:3  (slice2; segments: 1) (actual rows=4 
loops=1)
-               Hash Key: (((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer)
-               ->  Result (actual rows=7 loops=1)
-                     ->  ProjectSet (actual rows=7 loops=1)
-                           ->  Result (actual rows=1 loops=1)
-         ->  Hash (actual rows=340 loops=1)
-               Buckets: 524288  Batches: 1  Memory Usage: 4108kB
-               ->  Seq Scan on dist (actual rows=340 loops=1)
+   ->  Hash Right Anti Join (actual rows=3 loops=1)
+         Hash Cond: (dist.c1 = 
(((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer))
+         Extra Text: (seg1)   Hash chain length 1.0 avg, 1 max, using 4 of 
524288 buckets.
+         ->  Seq Scan on dist (actual rows=340 loops=1)
+         ->  Hash (actual rows=4 loops=1)
+               Buckets: 524288  Batches: 1  Memory Usage: 4097kB
+               ->  Redistribute Motion 1:3  (slice2; segments: 1) (actual 
rows=4 loops=1)
+                     Hash Key: 
(((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer)
+                     ->  Result (actual rows=7 loops=1)
+                           ->  ProjectSet (actual rows=7 loops=1)
+                                 ->  Result (actual rows=1 loops=1)
  Optimizer: Postgres query optimizer
 (13 rows)
 
 explain (analyze, costs off, timing off, summary off) select * from gen_series 
where not exists (select 1 from dist where dist.c1 = gen_series.c1);
-                                         QUERY PLAN                            
              
----------------------------------------------------------------------------------------------
+                                        QUERY PLAN                             
            
+-------------------------------------------------------------------------------------------
  Gather Motion 3:1  (slice1; segments: 3) (actual rows=11 loops=1)
-   ->  Hash Anti Join (actual rows=6 loops=1)
-         Hash Cond: ((generate_series('-10'::integer, 10)) = dist.c1)
-         Extra Text: (seg1)   Hash chain length 1.0 avg, 1 max, using 322 of 
524288 buckets.
-         ->  Redistribute Motion 1:3  (slice2; segments: 1) (actual rows=8 
loops=1)
-               Hash Key: (generate_series('-10'::integer, 10))
-               ->  ProjectSet (actual rows=21 loops=1)
-                     ->  Result (actual rows=1 loops=1)
-         ->  Hash (actual rows=340 loops=1)
-               Buckets: 524288  Batches: 1  Memory Usage: 4108kB
-               ->  Seq Scan on dist (actual rows=340 loops=1)
+   ->  Hash Right Anti Join (actual rows=6 loops=1)
+         Hash Cond: (dist.c1 = (generate_series('-10'::integer, 10)))
+         Extra Text: (seg1)   Hash chain length 1.0 avg, 1 max, using 7 of 
524288 buckets.
+         ->  Seq Scan on dist (actual rows=340 loops=1)
+         ->  Hash (actual rows=8 loops=1)
+               Buckets: 524288  Batches: 1  Memory Usage: 4097kB
+               ->  Redistribute Motion 1:3  (slice2; segments: 1) (actual 
rows=8 loops=1)
+                     Hash Key: (generate_series('-10'::integer, 10))
+                     ->  ProjectSet (actual rows=21 loops=1)
+                           ->  Result (actual rows=1 loops=1)
  Optimizer: Postgres query optimizer
 (12 rows)
 
diff --git a/src/test/regress/expected/qp_rowsecurity.out 
b/src/test/regress/expected/qp_rowsecurity.out
index bd578cb94aa..dd3a43d3122 100644
--- a/src/test/regress/expected/qp_rowsecurity.out
+++ b/src/test/regress/expected/qp_rowsecurity.out
@@ -880,7 +880,7 @@ explain (costs off) update foo_rls set a=-10 where c=4;
 ---------------------------------------------------------------
  Update on foo_rls
    ->  Explicit Redistribute Motion 3:3  (slice1; segments: 3)
-         ->  Split
+         ->  Split Update
                ->  Seq Scan on foo_rls
                      Filter: ((c >= 3) AND (c = 4))
  Optimizer: Postgres-based planner
@@ -911,7 +911,7 @@ explain (costs off) update foo_part_rls set a=10 where 
foo_part_rls.c = 800;
    Update on foo_part_rls_1_prt_4 foo_part_rls_4
    Update on foo_part_rls_1_prt_5 foo_part_rls_5
    ->  Explicit Redistribute Motion 3:3  (slice1; segments: 3)
-         ->  Split
+         ->  Split Update
                ->  Append
                      ->  Seq Scan on foo_part_rls_1_prt_1 foo_part_rls_1
                            Filter: ((c >= 500) AND (c = 800))


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to