This is an automated email from the ASF dual-hosted git repository.

chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git


The following commit(s) were added to refs/heads/cbdb-postgres-merge by this 
push:
     new 1a05b9bb7ba Fix some answer files
1a05b9bb7ba is described below

commit 1a05b9bb7ba86bf7abab665a8cafa621322b36be
Author: Jinbao Chen <[email protected]>
AuthorDate: Sun Nov 16 23:28:17 2025 +0800

    Fix some answer files
---
 src/test/regress/expected/arrays.out       |  7 ---
 src/test/regress/expected/btree_index.out  |  3 -
 src/test/regress/expected/hash_index.out   |  6 ++
 src/test/regress/expected/portals.out      | 99 +++---------------------------
 src/test/regress/expected/transactions.out | 12 +---
 src/test/regress/serial_schedule           | 15 +++--
 src/test/regress/sql/arrays.sql            |  3 -
 src/test/regress/sql/btree_index.sql       |  4 +-
 src/test/regress/sql/portals.sql           |  8 +--
 9 files changed, 26 insertions(+), 131 deletions(-)

diff --git a/src/test/regress/expected/arrays.out 
b/src/test/regress/expected/arrays.out
index a58f704b5d6..20b401a9ef9 100644
--- a/src/test/regress/expected/arrays.out
+++ b/src/test/regress/expected/arrays.out
@@ -11,10 +11,7 @@ CREATE TABLE arrtest (
        e                       float8[],
        f                       char(5)[],
        g                       varchar(5)[]
-<<<<<<< HEAD
 ) DISTRIBUTED RANDOMLY;
-=======
-);
 CREATE TABLE array_op_test (
        seqno           int4,
        i                       int4[],
@@ -23,7 +20,6 @@ CREATE TABLE array_op_test (
 \set filename :abs_srcdir '/data/array.data'
 COPY array_op_test FROM :'filename';
 ANALYZE array_op_test;
->>>>>>> REL_16_9
 --
 -- only the 'e' array is 0-based, the others are 1-based.
 --
@@ -1451,13 +1447,10 @@ update arr_pk_tbl set f1[2147483647] = 42 where pk = 10;
 ERROR:  54000
 update arr_pk_tbl set f1[2147483646:2147483647] = array[4,2] where pk = 10;
 ERROR:  54000
-<<<<<<< HEAD
-=======
 insert into arr_pk_tbl(pk, f1[0:2147483647]) values (2, '{}');
 ERROR:  54000
 insert into arr_pk_tbl(pk, f1[-2147483648:2147483647]) values (2, '{}');
 ERROR:  54000
->>>>>>> REL_16_9
 -- also exercise the expanded-array case
 do $$ declare a int[];
 begin
diff --git a/src/test/regress/expected/btree_index.out 
b/src/test/regress/expected/btree_index.out
index c85283184e5..1ad5c115e00 100644
--- a/src/test/regress/expected/btree_index.out
+++ b/src/test/regress/expected/btree_index.out
@@ -400,7 +400,6 @@ VACUUM delete_test_table;
 -- The vacuum above should've turned the leaf page into a fast root. We just
 -- need to insert some rows to cause the fast root page to split.
 INSERT INTO delete_test_table SELECT i, 1, 2, 3 FROM generate_series(1,1000) i;
-<<<<<<< HEAD
 --
 -- GPDB: Test correctness of B-tree stats in consecutively VACUUM.
 --
@@ -510,7 +509,6 @@ SELECT reltuples FROM pg_class WHERE 
relname='btree_stats_idx';
          2
 (1 row)
 
-=======
 -- Test unsupported btree opclass parameters
 create index on btree_tall_tbl (id int4_ops(foo=1));
 ERROR:  operator class int4_ops has no options
@@ -528,4 +526,3 @@ ALTER INDEX btree_part_idx ALTER COLUMN id SET 
(n_distinct=100);
 ERROR:  ALTER action ALTER COLUMN ... SET cannot be performed on relation 
"btree_part_idx"
 DETAIL:  This operation is not supported for partitioned indexes.
 DROP TABLE btree_part;
->>>>>>> REL_16_9
diff --git a/src/test/regress/expected/hash_index.out 
b/src/test/regress/expected/hash_index.out
index 7ead0fc380e..c6d731598d6 100644
--- a/src/test/regress/expected/hash_index.out
+++ b/src/test/regress/expected/hash_index.out
@@ -46,8 +46,14 @@ CREATE INDEX hash_f8_index ON hash_f8_heap USING hash 
(random float8_ops)
 -- tables that already contain data.
 --
 create unique index hash_f8_index_1 on hash_f8_heap(abs(random));
+ERROR:  UNIQUE index must contain all columns in the table's distribution key
+DETAIL:  Distribution key column "seqno" is not included in the constraint.
 create unique index hash_f8_index_2 on hash_f8_heap((seqno + 1), random);
+ERROR:  UNIQUE index must contain all columns in the table's distribution key
+DETAIL:  Distribution key column "seqno" is not included in the constraint.
 create unique index hash_f8_index_3 on hash_f8_heap(random) where seqno > 1000;
+ERROR:  UNIQUE index must contain all columns in the table's distribution key
+DETAIL:  Distribution key column "seqno" is not included in the constraint.
 --
 -- hash index
 -- grep 843938989 hash.data
diff --git a/src/test/regress/expected/portals.out 
b/src/test/regress/expected/portals.out
index 6720c09aa2b..93753d4c164 100644
--- a/src/test/regress/expected/portals.out
+++ b/src/test/regress/expected/portals.out
@@ -483,8 +483,7 @@ FETCH ABSOLUTE 2 FROM foo24; -- allowed
 (1 row)
 
 FETCH ABSOLUTE 1 FROM foo24; -- should fail
-ERROR:  cursor can only scan forward
-HINT:  Declare it with SCROLL option to enable backward scan.
+ERROR:  backward scan is not supported in this version of Apache Cloudberry
 END;
 --
 -- Cursors outside transaction blocks
@@ -525,65 +524,39 @@ SELECT name, statement, is_holdable, is_binary, 
is_scrollable FROM pg_cursors;
 
 CLOSE foo25;
 BEGIN;
-<<<<<<< HEAD
 DECLARE foo25ns NO SCROLL CURSOR WITH HOLD FOR SELECT * FROM tenk2 ORDER BY 
1,2,3,4;
 FETCH FROM foo25ns;
  unique1 | unique2 | two | four | ten | twenty | hundred | thousand | 
twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 
 
---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
        0 |    9998 |   0 |    0 |   0 |      0 |       0 |        0 |          
 0 |         0 |        0 |   0 |    1 | AAAAAA   | OUOAAA   | OOOOxx
-=======
-DECLARE foo25ns NO SCROLL CURSOR WITH HOLD FOR SELECT * FROM tenk2;
-FETCH FROM foo25ns;
- unique1 | unique2 | two | four | ten | twenty | hundred | thousand | 
twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 
----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
-    8800 |       0 |   0 |    0 |   0 |      0 |       0 |      800 |         
800 |      3800 |     8800 |   0 |    1 | MAAAAA   | AAAAAA   | AAAAxx
->>>>>>> REL_16_9
 (1 row)
 
 FETCH FROM foo25ns;
  unique1 | unique2 | two | four | ten | twenty | hundred | thousand | 
twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 
 
---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
-<<<<<<< HEAD
        1 |    2838 |   1 |    1 |   1 |      1 |       1 |        1 |          
 1 |         1 |        1 |   2 |    3 | BAAAAA   | EFEAAA   | OOOOxx
-=======
-    1891 |       1 |   1 |    3 |   1 |     11 |      91 |      891 |        
1891 |      1891 |     1891 | 182 |  183 | TUAAAA   | BAAAAA   | HHHHxx
->>>>>>> REL_16_9
 (1 row)
 
 COMMIT;
 FETCH FROM foo25ns;
  unique1 | unique2 | two | four | ten | twenty | hundred | thousand | 
twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 
 
---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
-<<<<<<< HEAD
        2 |    2716 |   0 |    2 |   2 |      2 |       2 |        2 |          
 2 |         2 |        2 |   4 |    5 | CAAAAA   | MAEAAA   | AAAAxx
-=======
-    3420 |       2 |   0 |    0 |   0 |      0 |      20 |      420 |        
1420 |      3420 |     3420 |  40 |   41 | OBAAAA   | CAAAAA   | OOOOxx
->>>>>>> REL_16_9
 (1 row)
 
 FETCH ABSOLUTE 4 FROM foo25ns;
  unique1 | unique2 | two | four | ten | twenty | hundred | thousand | 
twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 
 
---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
-<<<<<<< HEAD
        3 |    5679 |   1 |    3 |   3 |      3 |       3 |        3 |          
 3 |         3 |        3 |   6 |    7 | DAAAAA   | LKIAAA   | VVVVxx
-=======
-    9850 |       3 |   0 |    2 |   0 |     10 |      50 |      850 |        
1850 |      4850 |     9850 | 100 |  101 | WOAAAA   | DAAAAA   | VVVVxx
->>>>>>> REL_16_9
 (1 row)
 
 FETCH ABSOLUTE 4 FROM foo25ns; -- fail
 ERROR:  cursor can only scan forward
 HINT:  Declare it with SCROLL option to enable backward scan.
 SELECT name, statement, is_holdable, is_binary, is_scrollable FROM pg_cursors;
-<<<<<<< HEAD
   name   |                                      statement                      
                 | is_holdable | is_binary | is_scrollable 
 
---------+--------------------------------------------------------------------------------------+-------------+-----------+---------------
  foo25ns | DECLARE foo25ns NO SCROLL CURSOR WITH HOLD FOR SELECT * FROM tenk2 
ORDER BY 1,2,3,4; | t           | f         | f
-=======
-  name   |                              statement                              
| is_holdable | is_binary | is_scrollable 
----------+---------------------------------------------------------------------+-------------+-----------+---------------
- foo25ns | DECLARE foo25ns NO SCROLL CURSOR WITH HOLD FOR SELECT * FROM tenk2; 
| t           | f         | f
->>>>>>> REL_16_9
 (1 row)
 
 CLOSE foo25ns;
@@ -648,12 +621,6 @@ insert into tt1 values(1);
 declare c1 cursor for select count_tt1_v(), count_tt1_s();
 insert into tt1 values(2);
 -- fetch all from c1; -- DISABLED: see open JIRA MPP-835
--- start_ignore
- count_tt1_v | count_tt1_s 
--------------+-------------
-           2 |           1
-(1 row)
--- end_ignore
 rollback;
 begin;
 insert into tt1 values(1);
@@ -662,12 +629,6 @@ insert into tt1 values(2);
 commit;
 delete from tt1;
 -- fetch all from c2; -- DISABLED: see open JIRA MPP-835
--- start_ignore
- count_tt1_v | count_tt1_s 
--------------+-------------
-           2 |           1
-(1 row)
--- end_ignore
 drop function count_tt1_v();
 drop function count_tt1_s();
 -- Create a cursor with the BINARY option and check the pg_cursors view
@@ -1142,62 +1103,23 @@ DECLARE c1 SCROLL CURSOR FOR SELECT * FROM 
current_check;
 FETCH ABSOLUTE 12 FROM c1;
  currentid | payload 
 -----------+---------
-        12 | P12
+         7 | p7
 (1 row)
 
 FETCH ABSOLUTE 8 FROM c1;
- currentid | payload 
------------+---------
-         8 | p8
-(1 row)
-
+ERROR:  cursor can only scan forward
+HINT:  Declare it with SCROLL option to enable backward scan.
 DELETE FROM current_check WHERE CURRENT OF c1 RETURNING *;
- currentid | payload 
------------+---------
-         8 | p8
-(1 row)
-
+ERROR:  current transaction is aborted, commands ignored until end of 
transaction block
 -- This tests the ExecutorRewind code path
 FETCH ABSOLUTE 13 FROM c1;
- currentid | payload 
------------+---------
-        13 | P13
-(1 row)
-
+ERROR:  current transaction is aborted, commands ignored until end of 
transaction block
 FETCH ABSOLUTE 1 FROM c1;
- currentid | payload 
------------+---------
-         1 | p1
-(1 row)
-
+ERROR:  current transaction is aborted, commands ignored until end of 
transaction block
 DELETE FROM current_check WHERE CURRENT OF c1 RETURNING *;
- currentid | payload 
------------+---------
-         1 | p1
-(1 row)
-
+ERROR:  current transaction is aborted, commands ignored until end of 
transaction block
 SELECT * FROM current_check;
- currentid | payload 
------------+---------
-         2 | p2
-         3 | p3
-         4 | p4
-         5 | p5
-         6 | p6
-         7 | p7
-         9 | p9
-        10 | P10
-        11 | P11
-        12 | P12
-        13 | P13
-        14 | P14
-        15 | P15
-        16 | P16
-        17 | P17
-        18 | P18
-        19 | P19
-(17 rows)
-
+ERROR:  current transaction is aborted, commands ignored until end of 
transaction block
 ROLLBACK;
 -- end_ignore
 -- Make sure snapshot management works okay, per bug report in
@@ -1314,7 +1236,6 @@ fetch all in c2;
 fetch backward all in c2;
 ERROR:  backward scan is not supported in this version of Apache Cloudberry
 rollback;
-<<<<<<< HEAD
 -- gpdb: Test executor should return NULL directly during commit for holdable
 -- cursor if previously executor has emitted all tuples. We've seen two issues
 -- below.
@@ -1355,7 +1276,6 @@ FETCH ALL FROM foo2;
 (0 rows)
 
 CLOSE foo2;
-=======
 -- Check fetching of toasted datums via cursors.
 begin;
 -- Other compression algorithms may cause the compressed data to be stored
@@ -1381,4 +1301,3 @@ fetch all in held_portal;
 (1 row)
 
 reset default_toast_compression;
->>>>>>> REL_16_9
diff --git a/src/test/regress/expected/transactions.out 
b/src/test/regress/expected/transactions.out
index c9444365f12..bae1cc27ee6 100644
--- a/src/test/regress/expected/transactions.out
+++ b/src/test/regress/expected/transactions.out
@@ -632,21 +632,13 @@ fetch from foo;
 ERROR:  cursor "foo" does not exist
 commit;
 begin;
-<<<<<<< HEAD
-create table abc (a int);
-insert into abc values (5);
-insert into abc values (10);
-insert into abc values (15);
-declare foo cursor for select * from abc;
--- CBDB: the order of value is not guaranteed
--- start_ignore
-=======
 create table trans_abc (a int);
 insert into trans_abc values (5);
 insert into trans_abc values (10);
 insert into trans_abc values (15);
 declare foo cursor for select * from trans_abc;
->>>>>>> REL_16_9
+-- CBDB: the order of value is not guaranteed
+-- start_ignore
 fetch from foo;
  a 
 ---
diff --git a/src/test/regress/serial_schedule b/src/test/regress/serial_schedule
index 84bbfc2b24d..48bd03dc8a3 100644
--- a/src/test/regress/serial_schedule
+++ b/src/test/regress/serial_schedule
@@ -94,14 +94,13 @@ test: union
 test: case
 test: join
 test: aggregates
-# test: transactions
-# ignore: random
-# test: random
-# test: portals
-# test: arrays
-# test: btree_index
-# test: hash_index
-# test: update
+test: transactions
+test: random
+test: portals
+test: arrays
+test: btree_index
+test: hash_index
+test: update
 # test: delete
 # test: namespace
 # ignore: prepared_xacts
diff --git a/src/test/regress/sql/arrays.sql b/src/test/regress/sql/arrays.sql
index 37d55cf36ce..aabbd6f92de 100644
--- a/src/test/regress/sql/arrays.sql
+++ b/src/test/regress/sql/arrays.sql
@@ -452,11 +452,8 @@ reset enable_bitmapscan;
 insert into arr_pk_tbl values(10, '[-2147483648:-2147483647]={1,2}');
 update arr_pk_tbl set f1[2147483647] = 42 where pk = 10;
 update arr_pk_tbl set f1[2147483646:2147483647] = array[4,2] where pk = 10;
-<<<<<<< HEAD
-=======
 insert into arr_pk_tbl(pk, f1[0:2147483647]) values (2, '{}');
 insert into arr_pk_tbl(pk, f1[-2147483648:2147483647]) values (2, '{}');
->>>>>>> REL_16_9
 
 -- also exercise the expanded-array case
 do $$ declare a int[];
diff --git a/src/test/regress/sql/btree_index.sql 
b/src/test/regress/sql/btree_index.sql
index 1debebd838d..d6dc0c643b2 100644
--- a/src/test/regress/sql/btree_index.sql
+++ b/src/test/regress/sql/btree_index.sql
@@ -238,7 +238,6 @@ VACUUM delete_test_table;
 -- need to insert some rows to cause the fast root page to split.
 INSERT INTO delete_test_table SELECT i, 1, 2, 3 FROM generate_series(1,1000) i;
 
-<<<<<<< HEAD
 --
 -- GPDB: Test correctness of B-tree stats in consecutively VACUUM.
 --
@@ -281,7 +280,7 @@ SELECT reltuples FROM pg_class WHERE 
relname='btree_stats_tbl';
 -- inspect the state of the stats on segments
 SELECT gp_segment_id, relname, reltuples FROM gp_dist_random('pg_class') WHERE 
relname = 'btree_stats_idx';
 SELECT reltuples FROM pg_class WHERE relname='btree_stats_idx';
-=======
+
 -- Test unsupported btree opclass parameters
 create index on btree_tall_tbl (id int4_ops(foo=1));
 
@@ -295,4 +294,3 @@ CREATE TABLE btree_part (id int4) PARTITION BY RANGE (id);
 CREATE INDEX btree_part_idx ON btree_part(id);
 ALTER INDEX btree_part_idx ALTER COLUMN id SET (n_distinct=100);
 DROP TABLE btree_part;
->>>>>>> REL_16_9
diff --git a/src/test/regress/sql/portals.sql b/src/test/regress/sql/portals.sql
index 57a0cf58e53..75e8930b8ae 100644
--- a/src/test/regress/sql/portals.sql
+++ b/src/test/regress/sql/portals.sql
@@ -187,11 +187,7 @@ CLOSE foo25;
 
 BEGIN;
 
-<<<<<<< HEAD
 DECLARE foo25ns NO SCROLL CURSOR WITH HOLD FOR SELECT * FROM tenk2 ORDER BY 
1,2,3,4;
-=======
-DECLARE foo25ns NO SCROLL CURSOR WITH HOLD FOR SELECT * FROM tenk2;
->>>>>>> REL_16_9
 
 FETCH FROM foo25ns;
 
@@ -579,7 +575,6 @@ fetch all in c2;
 fetch backward all in c2;
 rollback;
 
-<<<<<<< HEAD
 -- gpdb: Test executor should return NULL directly during commit for holdable
 -- cursor if previously executor has emitted all tuples. We've seen two issues
 -- below.
@@ -603,7 +598,7 @@ FETCH ALL FROM foo2;
 COMMIT;
 FETCH ALL FROM foo2;
 CLOSE foo2;
-=======
+
 -- Check fetching of toasted datums via cursors.
 begin;
 
@@ -627,4 +622,3 @@ drop table toasted_data;
 fetch all in held_portal;
 
 reset default_toast_compression;
->>>>>>> REL_16_9


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to