This is an automated email from the ASF dual-hosted git repository.

chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git


The following commit(s) were added to refs/heads/cbdb-postgres-merge by this 
push:
     new c70e7a58d22 Fix some answer files
c70e7a58d22 is described below

commit c70e7a58d225cd452f714efa639f142922cae24b
Author: Jinbao Chen <[email protected]>
AuthorDate: Fri Dec 26 11:16:16 2025 +0800

    Fix some answer files
---
 src/test/regress/expected/gp_aggregates.out       | 18 +++++----
 src/test/regress/expected/gpcopy.out              |  9 ++++-
 src/test/regress/expected/partition.out           | 45 +++++++++++------------
 src/test/regress/expected/partition_aggregate.out | 14 +++++--
 src/test/regress/expected/percentile.out          | 24 ++++++------
 src/test/regress/expected/rowsecurity.out         |  1 +
 src/test/regress/expected/segspace.out            |  2 +-
 7 files changed, 63 insertions(+), 50 deletions(-)

diff --git a/src/test/regress/expected/gp_aggregates.out 
b/src/test/regress/expected/gp_aggregates.out
index 5b2206fb8fa..b4760a78540 100644
--- a/src/test/regress/expected/gp_aggregates.out
+++ b/src/test/regress/expected/gp_aggregates.out
@@ -373,20 +373,22 @@ insert into multiagg_with_subquery select i, i+1, i+2, 
i+3 from generate_series(
 analyze multiagg_with_subquery;
 explain (costs off)
 select count(distinct j), count(distinct k), count(distinct m) from (select 
j,k,m from multiagg_with_subquery group by j,k,m ) sub group by j;
-                                                    QUERY PLAN                 
                                   
-------------------------------------------------------------------------------------------------------------------
+                                                       QUERY PLAN              
                                         
+------------------------------------------------------------------------------------------------------------------------
  Gather Motion 3:1  (slice1; segments: 3)
    ->  GroupAggregate
          Group Key: multiagg_with_subquery.j
          ->  Sort
                Sort Key: multiagg_with_subquery.j, multiagg_with_subquery.k
-               ->  HashAggregate
-                     Group Key: multiagg_with_subquery.j, 
multiagg_with_subquery.k, multiagg_with_subquery.m
-                     ->  Redistribute Motion 3:3  (slice2; segments: 3)
-                           Hash Key: multiagg_with_subquery.j, 
multiagg_with_subquery.k, multiagg_with_subquery.m
-                           ->  Seq Scan on multiagg_with_subquery
+               ->  Redistribute Motion 3:3  (slice2; segments: 3)
+                     Hash Key: multiagg_with_subquery.j
+                     ->  HashAggregate
+                           Group Key: multiagg_with_subquery.j, 
multiagg_with_subquery.k, multiagg_with_subquery.m
+                           ->  Redistribute Motion 3:3  (slice3; segments: 3)
+                                 Hash Key: multiagg_with_subquery.j, 
multiagg_with_subquery.k, multiagg_with_subquery.m
+                                 ->  Seq Scan on multiagg_with_subquery
  Optimizer: Postgres query optimizer
-(11 rows)
+(13 rows)
 
 select count(distinct j), count(distinct k), count(distinct m) from (select 
j,k,m from multiagg_with_subquery group by j,k,m ) sub group by j;
  count | count | count 
diff --git a/src/test/regress/expected/gpcopy.out 
b/src/test/regress/expected/gpcopy.out
index 4b04b043a3a..883f2838cb6 100755
--- a/src/test/regress/expected/gpcopy.out
+++ b/src/test/regress/expected/gpcopy.out
@@ -482,6 +482,7 @@ COPY copy_regression_newline from stdin with delimiter '|' 
newline 'cr' csv;
 ERROR:  extra data after last expected column
 CONTEXT:  COPY xxxxx line x: xxx
 2|2
+\.
 "
 -- negative: invalid newline
 COPY copy_regression_newline from stdin with delimiter '|' newline 'blah';
@@ -1043,7 +1044,9 @@ CREATE TABLE LINEITEM_5 (LIKE LINEITEM);
 CREATE TABLE LINEITEM_6 (LIKE LINEITEM);
 CREATE TABLE LINEITEM_7 (LIKE LINEITEM);
 CREATE TABLE LINEITEM_8 (LIKE LINEITEM);
-COPY LINEITEM FROM '@abs_srcdir@/data/lineitem.csv' WITH DELIMITER '|' CSV;
+\getenv abs_srcdir PG_ABS_SRCDIR
+\set lineitem_csv :abs_srcdir '/data/lineitem.csv'
+COPY LINEITEM FROM :'lineitem_csv' WITH DELIMITER '|' CSV;
 ANALYZE LINEITEM;
 SELECT COUNT(*) FROM LINEITEM;
  count 
@@ -1586,9 +1589,11 @@ CREATE FUNCTION broken_int4in(cstring)
    AS 'int4in'
    LANGUAGE internal IMMUTABLE STRICT;
 NOTICE:  return type broken_int4 is only a shell
+\getenv abs_builddir PG_ABS_BUILDDIR
+\set regress_dll :abs_builddir '/regress.so'
 CREATE FUNCTION broken_int4out(broken_int4)
    RETURNS cstring
-   AS '@abs_builddir@/regress@DLSUFFIX@', 'broken_int4out'
+   AS :'regress_dll', 'broken_int4out'
    LANGUAGE C IMMUTABLE STRICT;
 NOTICE:  argument type broken_int4 is only a shell
 CREATE TYPE broken_int4 (
diff --git a/src/test/regress/expected/partition.out 
b/src/test/regress/expected/partition.out
index ae7dc6b2b2a..32fed2e7f95 100755
--- a/src/test/regress/expected/partition.out
+++ b/src/test/regress/expected/partition.out
@@ -4872,30 +4872,29 @@ analyze bar;
 set optimizer_segments = 3;
 set optimizer_nestloop_factor = 1.0;
 explain select foo_p.b, foo_p.t from foo_p left outer join bar on foo_p.a = 
bar.k  where foo_p.t is not null and foo_p.a = (array[1])[1];
-                                            QUERY PLAN                         
                   
---------------------------------------------------------------------------------------------------
- Gather Motion 3:1  (slice1; segments: 3)  (cost=10.28..366.35 rows=13416 
width=31)
-   ->  Hash Right Join  (cost=10.28..187.47 rows=4472 width=31)
-         Hash Cond: (bar.k = foo_p.a)
-         ->  Broadcast Motion 3:3  (slice2; segments: 3)  (cost=0.00..126.56 
rows=2236 width=4)
-               ->  Seq Scan on bar  (cost=0.00..96.75 rows=745 width=4)
-                     Filter: (k = 1)
-         ->  Hash  (cost=10.21..10.21 rows=6 width=35)
-               ->  Append  (cost=0.00..10.21 rows=6 width=35)
-                     ->  Seq Scan on foo_p_1_prt_2 foo_p_1  (cost=0.00..1.83 
rows=1 width=34)
-                           Filter: ((t IS NOT NULL) AND (a = 1))
-                     ->  Seq Scan on foo_p_1_prt_3 foo_p_2  (cost=0.00..1.83 
rows=1 width=34)
-                           Filter: ((t IS NOT NULL) AND (a = 1))
-                     ->  Seq Scan on foo_p_1_prt_4 foo_p_3  (cost=0.00..1.83 
rows=1 width=34)
-                           Filter: ((t IS NOT NULL) AND (a = 1))
-                     ->  Seq Scan on foo_p_1_prt_5 foo_p_4  (cost=0.00..1.83 
rows=1 width=34)
-                           Filter: ((t IS NOT NULL) AND (a = 1))
-                     ->  Seq Scan on foo_p_1_prt_6 foo_p_5  (cost=0.00..1.83 
rows=1 width=34)
-                           Filter: ((t IS NOT NULL) AND (a = 1))
-                     ->  Seq Scan on foo_p_1_prt_other foo_p_6  
(cost=0.00..1.01 rows=1 width=40)
-                           Filter: ((t IS NOT NULL) AND (a = 1))
+                                              QUERY PLAN                       
                       
+------------------------------------------------------------------------------------------------------
+ Gather Motion 3:1  (slice1; segments: 3)  
(cost=10000000000.00..10000001082.63 rows=13416 width=31)
+   ->  Nested Loop Left Join  (cost=10000000000.00..10000000903.75 rows=4472 
width=31)
+         ->  Append  (cost=0.00..10.21 rows=6 width=35)
+               ->  Seq Scan on foo_p_1_prt_2 foo_p_1  (cost=0.00..1.83 rows=1 
width=34)
+                     Filter: ((t IS NOT NULL) AND (a = 1))
+               ->  Seq Scan on foo_p_1_prt_3 foo_p_2  (cost=0.00..1.83 rows=1 
width=34)
+                     Filter: ((t IS NOT NULL) AND (a = 1))
+               ->  Seq Scan on foo_p_1_prt_4 foo_p_3  (cost=0.00..1.83 rows=1 
width=34)
+                     Filter: ((t IS NOT NULL) AND (a = 1))
+               ->  Seq Scan on foo_p_1_prt_5 foo_p_4  (cost=0.00..1.83 rows=1 
width=34)
+                     Filter: ((t IS NOT NULL) AND (a = 1))
+               ->  Seq Scan on foo_p_1_prt_6 foo_p_5  (cost=0.00..1.83 rows=1 
width=34)
+                     Filter: ((t IS NOT NULL) AND (a = 1))
+               ->  Seq Scan on foo_p_1_prt_other foo_p_6  (cost=0.00..1.01 
rows=1 width=40)
+                     Filter: ((t IS NOT NULL) AND (a = 1))
+         ->  Materialize  (cost=0.00..137.74 rows=2236 width=4)
+               ->  Broadcast Motion 3:3  (slice2; segments: 3)  
(cost=0.00..126.56 rows=2236 width=4)
+                     ->  Seq Scan on bar  (cost=0.00..96.75 rows=745 width=4)
+                           Filter: (k = 1)
  Optimizer: Postgres query optimizer
-(21 rows)
+(20 rows)
 
 reset optimizer_segments;
 drop table if exists foo_p;
diff --git a/src/test/regress/expected/partition_aggregate.out 
b/src/test/regress/expected/partition_aggregate.out
index 18ae4b847c8..9d0d1d4f9c1 100644
--- a/src/test/regress/expected/partition_aggregate.out
+++ b/src/test/regress/expected/partition_aggregate.out
@@ -436,19 +436,25 @@ SELECT c, sum(b order by a) FROM pagg_tab GROUP BY c 
ORDER BY 1, 2;
                      Group Key: pagg_tab.c
                      ->  Sort
                            Sort Key: pagg_tab.c, pagg_tab.a
-                           ->  Seq Scan on pagg_tab_p1 pagg_tab
+                           ->  Redistribute Motion 3:3  (slice2; segments: 3)
+                                 Hash Key: pagg_tab.c
+                                 ->  Seq Scan on pagg_tab_p1 pagg_tab
                ->  GroupAggregate
                      Group Key: pagg_tab_1.c
                      ->  Sort
                            Sort Key: pagg_tab_1.c, pagg_tab_1.a
-                           ->  Seq Scan on pagg_tab_p2 pagg_tab_1
+                           ->  Redistribute Motion 3:3  (slice3; segments: 3)
+                                 Hash Key: pagg_tab_1.c
+                                 ->  Seq Scan on pagg_tab_p2 pagg_tab_1
                ->  GroupAggregate
                      Group Key: pagg_tab_2.c
                      ->  Sort
                            Sort Key: pagg_tab_2.c, pagg_tab_2.a
-                           ->  Seq Scan on pagg_tab_p3 pagg_tab_2
+                           ->  Redistribute Motion 3:3  (slice4; segments: 3)
+                                 Hash Key: pagg_tab_2.c
+                                 ->  Seq Scan on pagg_tab_p3 pagg_tab_2
  Optimizer: Postgres query optimizer
-(21 rows)
+(27 rows)
 
 -- Since GROUP BY clause does not match with PARTITION KEY; we need to do
 -- partial aggregation. However, ORDERED SET are not partial safe and thus
diff --git a/src/test/regress/expected/percentile.out 
b/src/test/regress/expected/percentile.out
index 981507d01a3..3efc06a19df 100644
--- a/src/test/regress/expected/percentile.out
+++ b/src/test/regress/expected/percentile.out
@@ -513,21 +513,21 @@ select * from percv;
 (11 rows)
 
 select pg_get_viewdef('percv');
-                                                          pg_get_viewdef       
                                                    
------------------------------------------------------------------------------------------------------------------------------------
-  SELECT percentile_cont((0.4)::double precision) WITHIN GROUP (ORDER BY 
(((perct.a / 10))::double precision)) AS percentile_cont,+
-     MEDIAN(perct.a) AS "median",                                              
                                                   +
-     percentile_disc((0.51)::double precision) WITHIN GROUP (ORDER BY perct.a 
DESC) AS percentile_disc                            +
-    FROM perct                                                                 
                                                   +
-   GROUP BY perct.b                                                            
                                                   +
-   ORDER BY perct.b;
+                                                       pg_get_viewdef          
                                              
+-----------------------------------------------------------------------------------------------------------------------------
+  SELECT percentile_cont((0.4)::double precision) WITHIN GROUP (ORDER BY (((a 
/ 10))::double precision)) AS percentile_cont,+
+     MEDIAN(a) AS "median",                                                    
                                             +
+     percentile_disc((0.51)::double precision) WITHIN GROUP (ORDER BY a DESC) 
AS percentile_disc                            +
+    FROM perct                                                                 
                                             +
+   GROUP BY b                                                                  
                                             +
+   ORDER BY b;
 (1 row)
 
 select pg_get_viewdef('percv2');
-                pg_get_viewdef                 
------------------------------------------------
-  SELECT MEDIAN(perct.a) AS m1,               +
-     MEDIAN((perct.a)::double precision) AS m2+
+             pg_get_viewdef              
+-----------------------------------------
+  SELECT MEDIAN(a) AS m1,               +
+     MEDIAN((a)::double precision) AS m2+
     FROM perct;
 (1 row)
 
diff --git a/src/test/regress/expected/rowsecurity.out 
b/src/test/regress/expected/rowsecurity.out
index ec784f154bb..816c3d15c09 100644
--- a/src/test/regress/expected/rowsecurity.out
+++ b/src/test/regress/expected/rowsecurity.out
@@ -3942,6 +3942,7 @@ COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - 
would be affected by RLS
 ERROR:  query would be affected by row-level security policy for table 
"copy_rel_to"
 SET row_security TO ON;
 COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok
+2,two
 -- Check COPY TO as user with permissions and BYPASSRLS
 SET SESSION AUTHORIZATION regress_rls_exempt_user;
 SET row_security TO OFF;
diff --git a/src/test/regress/expected/segspace.out 
b/src/test/regress/expected/segspace.out
index 21cfbe034ca..b7e5ea2549b 100644
--- a/src/test/regress/expected/segspace.out
+++ b/src/test/regress/expected/segspace.out
@@ -58,7 +58,7 @@ set local enable_parallel = true;
 set local optimizer=off;
 set local min_parallel_table_scan_size=0;
 set local min_parallel_index_scan_size = 0;
-set local force_parallel_mode=1;
+set local debug_parallel_query=regress;
 EXPLAIN(COSTS OFF) SELECT t1.* FROM segspace_test_hj_skew AS t1, 
segspace_test_hj_skew AS t2 WHERE t1.i1=t2.i2;
                               QUERY PLAN                               
 -----------------------------------------------------------------------


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to