This is an automated email from the ASF dual-hosted git repository.

chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git


The following commit(s) were added to refs/heads/cbdb-postgres-merge by this 
push:
     new b568a284ff8 Fix some answer files
b568a284ff8 is described below

commit b568a284ff89711ab1cf213bf730ed2751106f21
Author: Jinbao Chen <[email protected]>
AuthorDate: Wed Dec 3 22:53:43 2025 +0800

    Fix some answer files
---
 src/backend/nodes/equalfuncs.c                    |  73 +++++-
 src/backend/nodes/outfast.c                       |  15 ++
 src/backend/nodes/outfuncs.c                      |  72 ++++++
 src/backend/nodes/readfast.c                      |  15 ++
 src/backend/nodes/readfuncs.c                     |  77 ++++++
 src/test/regress/expected/aggregates.out          |   2 +-
 src/test/regress/expected/brin_bloom.out          |  17 +-
 src/test/regress/expected/brin_multi.out          | 129 +++++-----
 src/test/regress/expected/create_function_sql.out |  22 +-
 src/test/regress/expected/create_schema.out       |   6 +-
 src/test/regress/expected/gp_gin_index.out        | 157 ++++++------
 src/test/regress/expected/memoize.out             | 284 ++++------------------
 src/test/regress/expected/sanity_check.out        |   1 -
 src/test/regress/expected/select_parallel.out     | 144 +++++------
 src/test/regress/expected/sqljson.out             |   8 +-
 src/test/regress/expected/stats.out               | 200 ++++++---------
 src/test/regress/expected/tid.out                 |   5 -
 src/test/regress/expected/tsearch.out             |   1 +
 src/test/regress/expected/vacuum_parallel.out     |   7 -
 src/test/regress/parallel_schedule                |   3 +-
 src/test/regress/sql/aggregates.sql               |   2 +-
 src/test/regress/sql/create_function_sql.sql      |   4 -
 src/test/regress/sql/memoize.sql                  |  34 ---
 src/test/regress/sql/stats.sql                    |  69 ------
 src/test/regress/sql/tsearch.sql                  |   2 +
 25 files changed, 617 insertions(+), 732 deletions(-)

diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 5e33e2cbf49..eb77ea9169f 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -3674,6 +3674,63 @@ _equalMergeAction(const MergeAction *a, const 
MergeAction *b)
        return true;
 }
 
+static bool
+_equalJsonConstructorExpr(const JsonConstructorExpr *a, const 
JsonConstructorExpr *b)
+{
+       COMPARE_SCALAR_FIELD(type);
+       COMPARE_NODE_FIELD(args);
+       COMPARE_NODE_FIELD(func);
+       COMPARE_NODE_FIELD(coercion);
+       COMPARE_NODE_FIELD(returning);
+       COMPARE_SCALAR_FIELD(absent_on_null);
+       COMPARE_SCALAR_FIELD(unique);
+       COMPARE_LOCATION_FIELD(location);
+
+       return true;
+}
+
+static bool
+_equalJsonIsPredicate(const JsonIsPredicate *a, const JsonIsPredicate *b)
+{
+       COMPARE_NODE_FIELD(expr);
+       COMPARE_NODE_FIELD(format);
+       COMPARE_SCALAR_FIELD(item_type);
+       COMPARE_SCALAR_FIELD(unique_keys);
+       COMPARE_LOCATION_FIELD(location);
+
+       return true;
+}
+
+static bool
+_equalJsonReturning(const JsonReturning *a, const JsonReturning *b)
+{
+       COMPARE_NODE_FIELD(format);
+       COMPARE_SCALAR_FIELD(typid);
+       COMPARE_SCALAR_FIELD(typmod);
+
+       return true;
+}
+
+static bool
+_equalJsonValueExpr(const JsonValueExpr *a, const JsonValueExpr *b)
+{
+       COMPARE_NODE_FIELD(raw_expr);
+       COMPARE_NODE_FIELD(formatted_expr);
+       COMPARE_NODE_FIELD(format);
+
+       return true;
+}
+
+static bool
+_equalJsonFormat(const JsonFormat *a, const JsonFormat *b)
+{
+       COMPARE_SCALAR_FIELD(format_type);
+       COMPARE_SCALAR_FIELD(encoding);
+       COMPARE_LOCATION_FIELD(location);
+
+       return true;
+}
+
 /*
  * equal
  *       returns whether two nodes are equal
@@ -4571,7 +4628,21 @@ equal(const void *a, const void *b)
                case T_MergeAction:
                        retval = _equalMergeAction(a, b);
                        break;
-
+               case T_JsonConstructorExpr:
+                       retval = _equalJsonConstructorExpr(a, b);
+                       break;
+               case T_JsonIsPredicate:
+                       retval = _equalJsonIsPredicate(a, b);
+                       break;
+               case T_JsonReturning:
+                       retval = _equalJsonReturning(a, b);
+                       break;
+               case T_JsonValueExpr:
+                       retval = _equalJsonValueExpr(a, b);
+                       break;
+               case T_JsonFormat:
+                       retval = _equalJsonFormat(a, b);
+                       break;
                default:
                        elog(ERROR, "unrecognized node type: %d",
                                 (int) nodeTag(a));
diff --git a/src/backend/nodes/outfast.c b/src/backend/nodes/outfast.c
index 9b8558b105a..11bf27c08c6 100644
--- a/src/backend/nodes/outfast.c
+++ b/src/backend/nodes/outfast.c
@@ -1970,6 +1970,21 @@ _outNode(StringInfo str, void *obj)
                        case T_WindowDef:
                                _outWindowDef(str, obj);
                                break;
+                       case T_JsonConstructorExpr:
+                               _outJsonConstructorExpr(str, obj);
+                               break;
+                       case T_JsonIsPredicate:
+                               _outJsonIsPredicate(str, obj);
+                               break;
+                       case T_JsonReturning:
+                               _outJsonReturning(str, obj);
+                               break;
+                       case T_JsonValueExpr:
+                               _outJsonValueExpr(str, obj);
+                               break;
+                       case T_JsonFormat:
+                               _outJsonFormat(str, obj);
+                               break;
                        default:
                                elog(ERROR, "could not serialize unrecognized 
node type: %d",
                                                 (int) nodeTag(obj));
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index 0c6ee6df71b..2a174d850c6 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -4341,6 +4341,63 @@ _outPublicationTable(StringInfo str, const 
PublicationTable *node)
        WRITE_NODE_FIELD(columns);
 }
 
+static void
+_outJsonIsPredicate(StringInfo str, const JsonIsPredicate *node)
+{
+       WRITE_NODE_TYPE("JSONISPREDICATE");
+
+       WRITE_NODE_FIELD(expr);
+       WRITE_NODE_FIELD(format);
+       WRITE_ENUM_FIELD(item_type, JsonValueType);
+       WRITE_BOOL_FIELD(unique_keys);
+       WRITE_LOCATION_FIELD(location);
+}
+
+static void
+_outJsonConstructorExpr(StringInfo str, const JsonConstructorExpr *node)
+{
+       WRITE_NODE_TYPE("JSONCONSTRUCTOREXPR");
+
+       WRITE_ENUM_FIELD(type, JsonConstructorType);
+       WRITE_NODE_FIELD(args);
+       WRITE_NODE_FIELD(func);
+       WRITE_NODE_FIELD(coercion);
+       WRITE_NODE_FIELD(returning);
+       WRITE_BOOL_FIELD(absent_on_null);
+       WRITE_BOOL_FIELD(unique);
+       WRITE_LOCATION_FIELD(location);
+}
+
+static void
+_outJsonReturning(StringInfo str, const JsonReturning *node)
+{
+       WRITE_NODE_TYPE("JSONRETURNING");
+
+       WRITE_NODE_FIELD(format);
+       WRITE_OID_FIELD(typid);
+       WRITE_INT_FIELD(typmod);
+}
+
+static void
+_outJsonValueExpr(StringInfo str, const JsonValueExpr *node)
+{
+       WRITE_NODE_TYPE("JSONVALUEEXPR");
+
+       WRITE_NODE_FIELD(raw_expr);
+       WRITE_NODE_FIELD(formatted_expr);
+       WRITE_NODE_FIELD(format);
+}
+
+static void
+_outJsonFormat(StringInfo str, const JsonFormat *node)
+{
+       WRITE_NODE_TYPE("JSONFORMAT");
+
+       WRITE_ENUM_FIELD(format_type, JsonFormatType);
+       WRITE_ENUM_FIELD(encoding, JsonEncoding);
+       WRITE_LOCATION_FIELD(location);
+}
+
 #include "outfuncs_common.c"
 #ifndef COMPILING_BINARY_FUNCS
 /*
@@ -5549,6 +5606,21 @@ outNode(StringInfo str, const void *obj)
                        case T_PublicationTable:
                                _outPublicationTable(str, obj);
                                break;
+                       case T_JsonIsPredicate:
+                               _outJsonIsPredicate(str, obj);
+                               break;
+                       case T_JsonConstructorExpr:
+                               _outJsonConstructorExpr(str, obj);
+                               break;
+                       case T_JsonReturning:
+                               _outJsonReturning(str, obj);
+                               break;
+                       case T_JsonValueExpr:
+                               _outJsonValueExpr(str, obj);
+                               break;
+                       case T_JsonFormat:
+                               _outJsonFormat(str, obj);
+                               break;
                        default:
 
                                /*
diff --git a/src/backend/nodes/readfast.c b/src/backend/nodes/readfast.c
index 80c00523bf2..36213f1862d 100644
--- a/src/backend/nodes/readfast.c
+++ b/src/backend/nodes/readfast.c
@@ -2976,6 +2976,21 @@ readNodeBinary(void)
                        case T_WindowDef:
                                return_value = _readWindowDef();
                                break;
+                       case T_JsonConstructorExpr:
+                               return_value = _readJsonConstructorExpr();
+                               break;
+                       case T_JsonIsPredicate:
+                               return_value = _readJsonIsPredicate();
+                               break;
+                       case T_JsonReturning:
+                               return_value = _readJsonReturning();
+                               break;
+                       case T_JsonValueExpr:
+                               return_value = _readJsonValueExpr();
+                               break;
+                       case T_JsonFormat:
+                               return_value = _readJsonFormat();
+                               break;
                        default:
                                return_value = NULL; /* keep the compiler 
silent */
                                elog(ERROR, "could not deserialize unrecognized 
node type: %d",
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index f33fd633120..5c9d473c5f6 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -2996,6 +2996,73 @@ _readWindowDef(void)
        READ_DONE();
 }
 
+static JsonConstructorExpr *
+_readJsonConstructorExpr(void)
+{
+       READ_LOCALS(JsonConstructorExpr);
+
+       READ_ENUM_FIELD(type, JsonConstructorType);
+       READ_NODE_FIELD(args);
+       READ_NODE_FIELD(func);
+       READ_NODE_FIELD(coercion);
+       READ_NODE_FIELD(returning);
+       READ_BOOL_FIELD(absent_on_null);
+       READ_BOOL_FIELD(unique);
+       READ_LOCATION_FIELD(location);
+
+       READ_DONE();
+}
+
+static JsonIsPredicate *
+_readJsonIsPredicate(void)
+{
+       READ_LOCALS(JsonIsPredicate);
+
+       READ_NODE_FIELD(expr);
+       READ_NODE_FIELD(format);
+       READ_ENUM_FIELD(item_type, JsonValueType);
+       READ_BOOL_FIELD(unique_keys);
+       READ_LOCATION_FIELD(location);
+
+       READ_DONE();
+}
+
+static JsonReturning *
+_readJsonReturning(void)
+{
+       READ_LOCALS(JsonReturning);
+
+       READ_NODE_FIELD(format);
+       READ_OID_FIELD(typid);
+       READ_INT_FIELD(typmod);
+
+       READ_DONE();
+}
+
+static JsonValueExpr *
+_readJsonValueExpr(void)
+{
+       READ_LOCALS(JsonValueExpr);
+
+       READ_NODE_FIELD(raw_expr);
+       READ_NODE_FIELD(formatted_expr);
+       READ_NODE_FIELD(format);
+
+       READ_DONE();
+}
+
+static JsonFormat *
+_readJsonFormat(void)
+{
+       READ_LOCALS(JsonFormat);
+
+       READ_ENUM_FIELD(format_type, JsonFormatType);
+       READ_ENUM_FIELD(encoding, JsonEncoding);
+       READ_LOCATION_FIELD(location);
+
+       READ_DONE();
+}
+
 #include "readfuncs_common.c"
 #ifndef COMPILING_BINARY_FUNCS
 /*
@@ -3525,6 +3592,16 @@ parseNodeString(void)
                return_value = _readPublicationTable();
        else if (MATCHX("WINDOWDEF"))
                return_value = _readWindowDef();
+       else if (MATCHX("JSONCONSTRUCTOREXPR"))
+               return_value = _readJsonConstructorExpr();
+       else if (MATCHX("JSONISPREDICATE"))
+               return_value = _readJsonIsPredicate();
+       else if (MATCHX("JSONRETURNING"))
+               return_value = _readJsonReturning();
+       else if (MATCHX("JSONVALUEEXPR"))
+               return_value = _readJsonValueExpr();
+       else if (MATCHX("JSONFORMAT"))
+               return_value = _readJsonFormat();
        else
        {
         ereport(ERROR,
diff --git a/src/test/regress/expected/aggregates.out 
b/src/test/regress/expected/aggregates.out
index bbf15891e35..46bc187644e 100644
--- a/src/test/regress/expected/aggregates.out
+++ b/src/test/regress/expected/aggregates.out
@@ -2482,7 +2482,6 @@ select * from v_pagg_test order by y;
  9 |   19 | 4999 |        250 | 1019 | 999  |        250 |   19 | 4999 |       
 250 |    19 |  4999 |         250
 (10 rows)
 
--- end_ignore
 -- Ensure parallel aggregation is actually being used.
 explain (costs off) select * from v_pagg_test order by y;
                                                                  QUERY PLAN    
                                                             
@@ -2522,6 +2521,7 @@ select * from v_pagg_test order by y;
  9 |   19 | 4999 |        250 | 1019 | 999  |        250 |   19 | 4999 |       
 250 |    19 |  4999 |         250
 (10 rows)
 
+-- end_ignore
 -- Check that we don't fail on anonymous record types.
 set max_parallel_workers_per_gather = 2;
 explain (costs off)
diff --git a/src/test/regress/expected/brin_bloom.out 
b/src/test/regress/expected/brin_bloom.out
index 241847a2707..256ae17bf7c 100644
--- a/src/test/regress/expected/brin_bloom.out
+++ b/src/test/regress/expected/brin_bloom.out
@@ -411,19 +411,16 @@ CREATE INDEX brin_test_bloom_a_idx ON brin_test_bloom 
USING brin (a) WITH (pages
 CREATE INDEX brin_test_bloom_b_idx ON brin_test_bloom USING brin (b) WITH 
(pages_per_range = 2);
 VACUUM ANALYZE brin_test_bloom;
 -- Ensure brin index is used when columns are perfectly correlated
---start_ignore
---GPDB_14_MERGE_FIXME
---It should choose bitmap index scan, but seq scan here, which is caused by
---inaccurate index correlation calculation in compute_scalar_stats.
---end_ignore
 EXPLAIN (COSTS OFF) SELECT * FROM brin_test_bloom WHERE a = 1;
-                QUERY PLAN                
-------------------------------------------
+                       QUERY PLAN                       
+--------------------------------------------------------
  Gather Motion 1:1  (slice1; segments: 1)
-   ->  Seq Scan on brin_test_bloom
-         Filter: (a = 1)
+   ->  Bitmap Heap Scan on brin_test_bloom
+         Recheck Cond: (a = 1)
+         ->  Bitmap Index Scan on brin_test_bloom_a_idx
+               Index Cond: (a = 1)
  Optimizer: Postgres query optimizer
-(4 rows)
+(6 rows)
 
 -- Ensure brin index is not used when values are not correlated
 EXPLAIN (COSTS OFF) SELECT * FROM brin_test_bloom WHERE b = 1;
diff --git a/src/test/regress/expected/brin_multi.out 
b/src/test/regress/expected/brin_multi.out
index c92d59821c0..3122652b1a9 100644
--- a/src/test/regress/expected/brin_multi.out
+++ b/src/test/regress/expected/brin_multi.out
@@ -452,19 +452,16 @@ CREATE INDEX brin_test_multi_a_idx ON brin_test_multi 
USING brin (a) WITH (pages
 CREATE INDEX brin_test_multi_b_idx ON brin_test_multi USING brin (b) WITH 
(pages_per_range = 2);
 VACUUM ANALYZE brin_test_multi;
 -- Ensure brin index is used when columns are perfectly correlated
---start_ignore
---GPDB_14_MERGE_FIXME
---It should choose bitmap index scan, but seq scan here, which is caused by
---inaccurate index correlation calculation in compute_scalar_stats.
---end_ignore
 EXPLAIN (COSTS OFF) SELECT * FROM brin_test_multi WHERE a = 1;
-                QUERY PLAN                
-------------------------------------------
+                       QUERY PLAN                       
+--------------------------------------------------------
  Gather Motion 1:1  (slice1; segments: 1)
-   ->  Seq Scan on brin_test_multi
-         Filter: (a = 1)
+   ->  Bitmap Heap Scan on brin_test_multi
+         Recheck Cond: (a = 1)
+         ->  Bitmap Index Scan on brin_test_multi_a_idx
+               Index Cond: (a = 1)
  Optimizer: Postgres query optimizer
-(4 rows)
+(6 rows)
 
 -- Ensure brin index is not used when values are not correlated
 EXPLAIN (COSTS OFF) SELECT * FROM brin_test_multi WHERE b = 1;
@@ -500,13 +497,15 @@ SET enable_seqscan = off;
 -- make sure the ranges were built correctly and 2023-01-01 eliminates all
 EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF)
 SELECT * FROM brin_date_test WHERE a = '2023-01-01'::date;
-                               QUERY PLAN                                
--------------------------------------------------------------------------
- Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1)
-   Recheck Cond: (a = '2023-01-01'::date)
-   ->  Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1)
-         Index Cond: (a = '2023-01-01'::date)
-(4 rows)
+                                  QUERY PLAN                                   
+-------------------------------------------------------------------------------
+ Gather Motion 1:1  (slice1; segments: 1) (actual rows=0 loops=1)
+   ->  Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1)
+         Recheck Cond: (a = '2023-01-01'::date)
+         ->  Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1)
+               Index Cond: (a = '2023-01-01'::date)
+ Optimizer: Postgres query optimizer
+(6 rows)
 
 DROP TABLE brin_date_test;
 RESET enable_seqscan;
@@ -519,23 +518,27 @@ CREATE INDEX ON brin_timestamp_test USING brin (a 
timestamp_minmax_multi_ops) WI
 SET enable_seqscan = off;
 EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF)
 SELECT * FROM brin_timestamp_test WHERE a = '2023-01-01'::timestamp;
-                                  QUERY PLAN                                  
-------------------------------------------------------------------------------
- Bitmap Heap Scan on brin_timestamp_test (actual rows=0 loops=1)
-   Recheck Cond: (a = '2023-01-01 00:00:00'::timestamp without time zone)
-   ->  Bitmap Index Scan on brin_timestamp_test_a_idx (actual rows=0 loops=1)
-         Index Cond: (a = '2023-01-01 00:00:00'::timestamp without time zone)
-(4 rows)
+                                     QUERY PLAN                                
     
+------------------------------------------------------------------------------------
+ Gather Motion 1:1  (slice1; segments: 1) (actual rows=0 loops=1)
+   ->  Bitmap Heap Scan on brin_timestamp_test (actual rows=0 loops=1)
+         Recheck Cond: (a = '2023-01-01 00:00:00'::timestamp without time zone)
+         ->  Bitmap Index Scan on brin_timestamp_test_a_idx (actual rows=0 
loops=1)
+               Index Cond: (a = '2023-01-01 00:00:00'::timestamp without time 
zone)
+ Optimizer: Postgres query optimizer
+(6 rows)
 
 EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF)
 SELECT * FROM brin_timestamp_test WHERE a = '1900-01-01'::timestamp;
-                                  QUERY PLAN                                  
-------------------------------------------------------------------------------
- Bitmap Heap Scan on brin_timestamp_test (actual rows=0 loops=1)
-   Recheck Cond: (a = '1900-01-01 00:00:00'::timestamp without time zone)
-   ->  Bitmap Index Scan on brin_timestamp_test_a_idx (actual rows=0 loops=1)
-         Index Cond: (a = '1900-01-01 00:00:00'::timestamp without time zone)
-(4 rows)
+                                     QUERY PLAN                                
     
+------------------------------------------------------------------------------------
+ Gather Motion 1:1  (slice1; segments: 1) (actual rows=0 loops=1)
+   ->  Bitmap Heap Scan on brin_timestamp_test (actual rows=0 loops=1)
+         Recheck Cond: (a = '1900-01-01 00:00:00'::timestamp without time zone)
+         ->  Bitmap Index Scan on brin_timestamp_test_a_idx (actual rows=0 
loops=1)
+               Index Cond: (a = '1900-01-01 00:00:00'::timestamp without time 
zone)
+ Optimizer: Postgres query optimizer
+(6 rows)
 
 DROP TABLE brin_timestamp_test;
 RESET enable_seqscan;
@@ -547,23 +550,27 @@ CREATE INDEX ON brin_date_test USING brin (a 
date_minmax_multi_ops) WITH (pages_
 SET enable_seqscan = off;
 EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF)
 SELECT * FROM brin_date_test WHERE a = '2023-01-01'::date;
-                               QUERY PLAN                                
--------------------------------------------------------------------------
- Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1)
-   Recheck Cond: (a = '2023-01-01'::date)
-   ->  Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1)
-         Index Cond: (a = '2023-01-01'::date)
-(4 rows)
+                                  QUERY PLAN                                   
+-------------------------------------------------------------------------------
+ Gather Motion 1:1  (slice1; segments: 1) (actual rows=0 loops=1)
+   ->  Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1)
+         Recheck Cond: (a = '2023-01-01'::date)
+         ->  Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1)
+               Index Cond: (a = '2023-01-01'::date)
+ Optimizer: Postgres query optimizer
+(6 rows)
 
 EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF)
 SELECT * FROM brin_date_test WHERE a = '1900-01-01'::date;
-                               QUERY PLAN                                
--------------------------------------------------------------------------
- Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1)
-   Recheck Cond: (a = '1900-01-01'::date)
-   ->  Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1)
-         Index Cond: (a = '1900-01-01'::date)
-(4 rows)
+                                  QUERY PLAN                                   
+-------------------------------------------------------------------------------
+ Gather Motion 1:1  (slice1; segments: 1) (actual rows=0 loops=1)
+   ->  Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1)
+         Recheck Cond: (a = '1900-01-01'::date)
+         ->  Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1)
+               Index Cond: (a = '1900-01-01'::date)
+ Optimizer: Postgres query optimizer
+(6 rows)
 
 DROP TABLE brin_date_test;
 RESET enable_seqscan;
@@ -576,23 +583,27 @@ CREATE INDEX ON brin_interval_test USING brin (a 
interval_minmax_multi_ops) WITH
 SET enable_seqscan = off;
 EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF)
 SELECT * FROM brin_interval_test WHERE a = '-30 years'::interval;
-                                 QUERY PLAN                                  
------------------------------------------------------------------------------
- Bitmap Heap Scan on brin_interval_test (actual rows=0 loops=1)
-   Recheck Cond: (a = '@ 30 years ago'::interval)
-   ->  Bitmap Index Scan on brin_interval_test_a_idx (actual rows=0 loops=1)
-         Index Cond: (a = '@ 30 years ago'::interval)
-(4 rows)
+                                    QUERY PLAN                                 
    
+-----------------------------------------------------------------------------------
+ Gather Motion 1:1  (slice1; segments: 1) (actual rows=0 loops=1)
+   ->  Bitmap Heap Scan on brin_interval_test (actual rows=0 loops=1)
+         Recheck Cond: (a = '@ 30 years ago'::interval)
+         ->  Bitmap Index Scan on brin_interval_test_a_idx (actual rows=0 
loops=1)
+               Index Cond: (a = '@ 30 years ago'::interval)
+ Optimizer: Postgres query optimizer
+(6 rows)
 
 EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF)
 SELECT * FROM brin_interval_test WHERE a = '30 years'::interval;
-                                 QUERY PLAN                                  
------------------------------------------------------------------------------
- Bitmap Heap Scan on brin_interval_test (actual rows=0 loops=1)
-   Recheck Cond: (a = '@ 30 years'::interval)
-   ->  Bitmap Index Scan on brin_interval_test_a_idx (actual rows=0 loops=1)
-         Index Cond: (a = '@ 30 years'::interval)
-(4 rows)
+                                    QUERY PLAN                                 
    
+-----------------------------------------------------------------------------------
+ Gather Motion 1:1  (slice1; segments: 1) (actual rows=0 loops=1)
+   ->  Bitmap Heap Scan on brin_interval_test (actual rows=0 loops=1)
+         Recheck Cond: (a = '@ 30 years'::interval)
+         ->  Bitmap Index Scan on brin_interval_test_a_idx (actual rows=0 
loops=1)
+               Index Cond: (a = '@ 30 years'::interval)
+ Optimizer: Postgres query optimizer
+(6 rows)
 
 DROP TABLE brin_interval_test;
 RESET enable_seqscan;
diff --git a/src/test/regress/expected/create_function_sql.out 
b/src/test/regress/expected/create_function_sql.out
index 758d7b9e504..5cae4ecbc1e 100644
--- a/src/test/regress/expected/create_function_sql.out
+++ b/src/test/regress/expected/create_function_sql.out
@@ -279,11 +279,7 @@ CREATE FUNCTION functest_S_13() RETURNS boolean
         SELECT 1;
         SELECT false;
     END;
-<<<<<<< HEAD:src/test/regress/expected/create_function_3.out
--- check display of function argments in sub-SELECT
-=======
 -- check display of function arguments in sub-SELECT
->>>>>>> REL_16_9:src/test/regress/expected/create_function_sql.out
 CREATE TABLE functest1 (i int);
 CREATE FUNCTION functest_S_16(a int, b int) RETURNS void
     LANGUAGE SQL
@@ -585,7 +581,6 @@ SELECT * FROM functest_sri1();
 (3 rows)
 
 EXPLAIN (verbose, costs off) SELECT * FROM functest_sri1();
-<<<<<<< HEAD:src/test/regress/expected/create_function_3.out
                  QUERY PLAN                 
 --------------------------------------------
  Gather Motion 3:1  (slice1; segments: 3)
@@ -594,13 +589,6 @@ EXPLAIN (verbose, costs off) SELECT * FROM functest_sri1();
          Output: functest3.a
  Optimizer: Postgres query optimizer
 (5 rows)
-=======
-              QUERY PLAN              
---------------------------------------
- Seq Scan on temp_func_test.functest3
-   Output: functest3.a
-(2 rows)
->>>>>>> REL_16_9:src/test/regress/expected/create_function_sql.out
 
 CREATE FUNCTION functest_sri2() RETURNS SETOF int
 LANGUAGE SQL
@@ -617,7 +605,6 @@ SELECT * FROM functest_sri2();
 (3 rows)
 
 EXPLAIN (verbose, costs off) SELECT * FROM functest_sri2();
-<<<<<<< HEAD:src/test/regress/expected/create_function_3.out
                  QUERY PLAN                 
 --------------------------------------------
  Gather Motion 3:1  (slice1; segments: 3)
@@ -626,13 +613,6 @@ EXPLAIN (verbose, costs off) SELECT * FROM functest_sri2();
          Output: functest3.a
  Optimizer: Postgres query optimizer
 (5 rows)
-=======
-              QUERY PLAN              
---------------------------------------
- Seq Scan on temp_func_test.functest3
-   Output: functest3.a
-(2 rows)
->>>>>>> REL_16_9:src/test/regress/expected/create_function_sql.out
 
 DROP TABLE functest3 CASCADE;
 NOTICE:  drop cascades to function functest_sri2()
@@ -734,7 +714,7 @@ CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL
 ERROR:  only one AS item needed for language "sql"
 -- Cleanup
 DROP SCHEMA temp_func_test CASCADE;
-NOTICE:  drop cascades to 29 other objects
+NOTICE:  drop cascades to 30 other objects
 DETAIL:  drop cascades to function functest_a_1(text,date)
 drop cascades to function functest_a_2(text[])
 drop cascades to function functest_a_3()
diff --git a/src/test/regress/expected/create_schema.out 
b/src/test/regress/expected/create_schema.out
index 93302a07efc..324f357ae32 100644
--- a/src/test/regress/expected/create_schema.out
+++ b/src/test/regress/expected/create_schema.out
@@ -21,7 +21,7 @@ ERROR:  CREATE specifies a schema (schema_not_existing) 
different from the one b
 CREATE SCHEMA AUTHORIZATION regress_create_schema_role
   CREATE TRIGGER schema_trig BEFORE INSERT ON schema_not_existing.tab
   EXECUTE FUNCTION schema_trig.no_func();
-ERROR:  CREATE specifies a schema (schema_not_existing) different from the one 
being created (regress_create_schema_role)
+ERROR:  Triggers for statements are not yet supported
 -- Again, with a role specification and no schema names.
 SET ROLE regress_create_schema_role;
 CREATE SCHEMA AUTHORIZATION CURRENT_ROLE
@@ -39,7 +39,7 @@ ERROR:  CREATE specifies a schema (schema_not_existing) 
different from the one b
 CREATE SCHEMA AUTHORIZATION CURRENT_ROLE
   CREATE TRIGGER schema_trig BEFORE INSERT ON schema_not_existing.tab
   EXECUTE FUNCTION schema_trig.no_func();
-ERROR:  CREATE specifies a schema (schema_not_existing) different from the one 
being created (regress_create_schema_role)
+ERROR:  Triggers for statements are not yet supported
 -- Again, with a schema name and a role specification.
 CREATE SCHEMA regress_schema_1 AUTHORIZATION CURRENT_ROLE
   CREATE SEQUENCE schema_not_existing.seq;
@@ -56,7 +56,7 @@ ERROR:  CREATE specifies a schema (schema_not_existing) 
different from the one b
 CREATE SCHEMA regress_schema_1 AUTHORIZATION CURRENT_ROLE
   CREATE TRIGGER schema_trig BEFORE INSERT ON schema_not_existing.tab
   EXECUTE FUNCTION schema_trig.no_func();
-ERROR:  CREATE specifies a schema (schema_not_existing) different from the one 
being created (regress_schema_1)
+ERROR:  Triggers for statements are not yet supported
 RESET ROLE;
 -- Cases where the schema creation succeeds.
 -- The schema created matches the role name.
diff --git a/src/test/regress/expected/gp_gin_index.out 
b/src/test/regress/expected/gp_gin_index.out
index 8dc071564be..de81b7a36d0 100644
--- a/src/test/regress/expected/gp_gin_index.out
+++ b/src/test/regress/expected/gp_gin_index.out
@@ -3,14 +3,15 @@ SET optimizer_enable_tablescan = off;
 SET enable_seqscan = off;
 set enable_bitmapscan = on;
 EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}';
-                                      QUERY PLAN                               
       
---------------------------------------------------------------------------------------
- Aggregate  (cost=600.04..600.05 rows=1 width=8)
-   ->  Gather Motion 3:1  (slice1; segments: 3)  (cost=500.01..600.04 rows=2 
width=0)
-         ->  Bitmap Heap Scan on testjsonb  (cost=500.01..600.02 rows=1 
width=0)
-               Recheck Cond: (j @> '{"wait": null}'::jsonb)
-               ->  Bitmap Index Scan on jidx  (cost=0.00..500.01 rows=1 
width=0)
-                     Index Cond: (j @> '{"wait": null}'::jsonb)
+                                       QUERY PLAN                              
         
+----------------------------------------------------------------------------------------
+ Finalize Aggregate  (cost=49.72..49.73 rows=1 width=8)
+   ->  Gather Motion 3:1  (slice1; segments: 3)  (cost=49.66..49.71 rows=3 
width=8)
+         ->  Partial Aggregate  (cost=49.66..49.67 rows=1 width=8)
+               ->  Bitmap Heap Scan on testjsonb  (cost=13.02..49.22 rows=176 
width=0)
+                     Recheck Cond: (j @> '{"wait": null}'::jsonb)
+                     ->  Bitmap Index Scan on jidx  (cost=0.00..12.98 rows=176 
width=0)
+                           Index Cond: (j @> '{"wait": null}'::jsonb)
  Optimizer: Postgres query optimizer
 (7 rows)
 
@@ -41,50 +42,54 @@ EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> 
'{"wait":"CC", "public":true}'
 (8 rows)
 
 EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}';
-                                      QUERY PLAN                               
       
---------------------------------------------------------------------------------------
- Aggregate  (cost=600.04..600.05 rows=1 width=8)
-   ->  Gather Motion 3:1  (slice1; segments: 3)  (cost=500.01..600.04 rows=2 
width=0)
-         ->  Bitmap Heap Scan on testjsonb  (cost=500.01..600.02 rows=1 
width=0)
-               Recheck Cond: (j @> '{"age": 25}'::jsonb)
-               ->  Bitmap Index Scan on jidx  (cost=0.00..500.01 rows=1 
width=0)
-                     Index Cond: (j @> '{"age": 25}'::jsonb)
+                                       QUERY PLAN                              
         
+----------------------------------------------------------------------------------------
+ Finalize Aggregate  (cost=49.72..49.73 rows=1 width=8)
+   ->  Gather Motion 3:1  (slice1; segments: 3)  (cost=49.66..49.71 rows=3 
width=8)
+         ->  Partial Aggregate  (cost=49.66..49.67 rows=1 width=8)
+               ->  Bitmap Heap Scan on testjsonb  (cost=13.02..49.22 rows=176 
width=0)
+                     Recheck Cond: (j @> '{"age": 25}'::jsonb)
+                     ->  Bitmap Index Scan on jidx  (cost=0.00..12.98 rows=176 
width=0)
+                           Index Cond: (j @> '{"age": 25}'::jsonb)
  Optimizer: Postgres query optimizer
 (7 rows)
 
 EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}';
-                                      QUERY PLAN                               
       
---------------------------------------------------------------------------------------
- Aggregate  (cost=600.04..600.05 rows=1 width=8)
-   ->  Gather Motion 3:1  (slice1; segments: 3)  (cost=500.01..600.04 rows=2 
width=0)
-         ->  Bitmap Heap Scan on testjsonb  (cost=500.01..600.02 rows=1 
width=0)
-               Recheck Cond: (j @> '{"age": 25.0}'::jsonb)
-               ->  Bitmap Index Scan on jidx  (cost=0.00..500.01 rows=1 
width=0)
-                     Index Cond: (j @> '{"age": 25.0}'::jsonb)
+                                       QUERY PLAN                              
         
+----------------------------------------------------------------------------------------
+ Finalize Aggregate  (cost=49.72..49.73 rows=1 width=8)
+   ->  Gather Motion 3:1  (slice1; segments: 3)  (cost=49.66..49.71 rows=3 
width=8)
+         ->  Partial Aggregate  (cost=49.66..49.67 rows=1 width=8)
+               ->  Bitmap Heap Scan on testjsonb  (cost=13.02..49.22 rows=176 
width=0)
+                     Recheck Cond: (j @> '{"age": 25.0}'::jsonb)
+                     ->  Bitmap Index Scan on jidx  (cost=0.00..12.98 rows=176 
width=0)
+                           Index Cond: (j @> '{"age": 25.0}'::jsonb)
  Optimizer: Postgres query optimizer
 (7 rows)
 
 EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{"array":["foo"]}';
-                                      QUERY PLAN                               
       
---------------------------------------------------------------------------------------
- Aggregate  (cost=600.04..600.05 rows=1 width=8)
-   ->  Gather Motion 3:1  (slice1; segments: 3)  (cost=500.01..600.04 rows=2 
width=0)
-         ->  Bitmap Heap Scan on testjsonb  (cost=500.01..600.02 rows=1 
width=0)
-               Recheck Cond: (j @> '{"array": ["foo"]}'::jsonb)
-               ->  Bitmap Index Scan on jidx  (cost=0.00..500.01 rows=1 
width=0)
-                     Index Cond: (j @> '{"array": ["foo"]}'::jsonb)
+                                       QUERY PLAN                              
         
+----------------------------------------------------------------------------------------
+ Finalize Aggregate  (cost=49.72..49.73 rows=1 width=8)
+   ->  Gather Motion 3:1  (slice1; segments: 3)  (cost=49.66..49.71 rows=3 
width=8)
+         ->  Partial Aggregate  (cost=49.66..49.67 rows=1 width=8)
+               ->  Bitmap Heap Scan on testjsonb  (cost=13.02..49.22 rows=176 
width=0)
+                     Recheck Cond: (j @> '{"array": ["foo"]}'::jsonb)
+                     ->  Bitmap Index Scan on jidx  (cost=0.00..12.98 rows=176 
width=0)
+                           Index Cond: (j @> '{"array": ["foo"]}'::jsonb)
  Optimizer: Postgres query optimizer
 (7 rows)
 
 EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{"array":["bar"]}';
-                                      QUERY PLAN                               
       
---------------------------------------------------------------------------------------
- Aggregate  (cost=600.04..600.05 rows=1 width=8)
-   ->  Gather Motion 3:1  (slice1; segments: 3)  (cost=500.01..600.04 rows=2 
width=0)
-         ->  Bitmap Heap Scan on testjsonb  (cost=500.01..600.02 rows=1 
width=0)
-               Recheck Cond: (j @> '{"array": ["bar"]}'::jsonb)
-               ->  Bitmap Index Scan on jidx  (cost=0.00..500.01 rows=1 
width=0)
-                     Index Cond: (j @> '{"array": ["bar"]}'::jsonb)
+                                       QUERY PLAN                              
         
+----------------------------------------------------------------------------------------
+ Finalize Aggregate  (cost=49.72..49.73 rows=1 width=8)
+   ->  Gather Motion 3:1  (slice1; segments: 3)  (cost=49.66..49.71 rows=3 
width=8)
+         ->  Partial Aggregate  (cost=49.66..49.67 rows=1 width=8)
+               ->  Bitmap Heap Scan on testjsonb  (cost=13.02..49.22 rows=176 
width=0)
+                     Recheck Cond: (j @> '{"array": ["bar"]}'::jsonb)
+                     ->  Bitmap Index Scan on jidx  (cost=0.00..12.98 rows=176 
width=0)
+                           Index Cond: (j @> '{"array": ["bar"]}'::jsonb)
  Optimizer: Postgres query optimizer
 (7 rows)
 
@@ -158,14 +163,15 @@ EXPLAIN SELECT count(*) FROM testjsonb WHERE j ? 'public';
 (8 rows)
 
 EXPLAIN SELECT count(*) FROM testjsonb WHERE j ? 'bar';
-                                      QUERY PLAN                               
       
---------------------------------------------------------------------------------------
- Aggregate  (cost=400.04..400.05 rows=1 width=8)
-   ->  Gather Motion 3:1  (slice1; segments: 3)  (cost=300.01..400.04 rows=2 
width=0)
-         ->  Bitmap Heap Scan on testjsonb  (cost=300.01..400.02 rows=1 
width=0)
-               Recheck Cond: (j ? 'bar'::text)
-               ->  Bitmap Index Scan on jidx  (cost=0.00..300.01 rows=1 
width=0)
-                     Index Cond: (j ? 'bar'::text)
+                                      QUERY PLAN                               
        
+---------------------------------------------------------------------------------------
+ Finalize Aggregate  (cost=45.29..45.30 rows=1 width=8)
+   ->  Gather Motion 3:1  (slice1; segments: 3)  (cost=45.24..45.29 rows=3 
width=8)
+         ->  Partial Aggregate  (cost=45.24..45.25 rows=1 width=8)
+               ->  Bitmap Heap Scan on testjsonb  (cost=8.60..44.80 rows=176 
width=0)
+                     Recheck Cond: (j ? 'bar'::text)
+                     ->  Bitmap Index Scan on jidx  (cost=0.00..8.55 rows=176 
width=0)
+                           Index Cond: (j ? 'bar'::text)
  Optimizer: Postgres query optimizer
 (7 rows)
 
@@ -294,14 +300,15 @@ DROP INDEX jidx_array;
 DROP INDEX jidx;
 CREATE INDEX jidx ON testjsonb USING gin (j jsonb_path_ops);
 EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}';
-                                      QUERY PLAN                               
       
---------------------------------------------------------------------------------------
- Aggregate  (cost=400.04..400.05 rows=1 width=8)
-   ->  Gather Motion 3:1  (slice1; segments: 3)  (cost=300.01..400.04 rows=2 
width=0)
-         ->  Bitmap Heap Scan on testjsonb  (cost=300.01..400.02 rows=1 
width=0)
-               Recheck Cond: (j @> '{"wait": null}'::jsonb)
-               ->  Bitmap Index Scan on jidx  (cost=0.00..300.01 rows=1 
width=0)
-                     Index Cond: (j @> '{"wait": null}'::jsonb)
+                                      QUERY PLAN                               
        
+---------------------------------------------------------------------------------------
+ Finalize Aggregate  (cost=45.29..45.30 rows=1 width=8)
+   ->  Gather Motion 3:1  (slice1; segments: 3)  (cost=45.24..45.29 rows=3 
width=8)
+         ->  Partial Aggregate  (cost=45.24..45.25 rows=1 width=8)
+               ->  Bitmap Heap Scan on testjsonb  (cost=8.60..44.80 rows=176 
width=0)
+                     Recheck Cond: (j @> '{"wait": null}'::jsonb)
+                     ->  Bitmap Index Scan on jidx  (cost=0.00..8.55 rows=176 
width=0)
+                           Index Cond: (j @> '{"wait": null}'::jsonb)
  Optimizer: Postgres query optimizer
 (7 rows)
 
@@ -332,26 +339,28 @@ EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> 
'{"wait":"CC", "public":true}'
 (8 rows)
 
 EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}';
-                                      QUERY PLAN                               
       
---------------------------------------------------------------------------------------
- Aggregate  (cost=400.04..400.05 rows=1 width=8)
-   ->  Gather Motion 3:1  (slice1; segments: 3)  (cost=300.01..400.04 rows=2 
width=0)
-         ->  Bitmap Heap Scan on testjsonb  (cost=300.01..400.02 rows=1 
width=0)
-               Recheck Cond: (j @> '{"age": 25}'::jsonb)
-               ->  Bitmap Index Scan on jidx  (cost=0.00..300.01 rows=1 
width=0)
-                     Index Cond: (j @> '{"age": 25}'::jsonb)
+                                      QUERY PLAN                               
        
+---------------------------------------------------------------------------------------
+ Finalize Aggregate  (cost=45.29..45.30 rows=1 width=8)
+   ->  Gather Motion 3:1  (slice1; segments: 3)  (cost=45.24..45.29 rows=3 
width=8)
+         ->  Partial Aggregate  (cost=45.24..45.25 rows=1 width=8)
+               ->  Bitmap Heap Scan on testjsonb  (cost=8.60..44.80 rows=176 
width=0)
+                     Recheck Cond: (j @> '{"age": 25}'::jsonb)
+                     ->  Bitmap Index Scan on jidx  (cost=0.00..8.55 rows=176 
width=0)
+                           Index Cond: (j @> '{"age": 25}'::jsonb)
  Optimizer: Postgres query optimizer
 (7 rows)
 
 EXPLAIN SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}';
-                                      QUERY PLAN                               
       
---------------------------------------------------------------------------------------
- Aggregate  (cost=400.04..400.05 rows=1 width=8)
-   ->  Gather Motion 3:1  (slice1; segments: 3)  (cost=300.01..400.04 rows=2 
width=0)
-         ->  Bitmap Heap Scan on testjsonb  (cost=300.01..400.02 rows=1 
width=0)
-               Recheck Cond: (j @> '{"age": 25.0}'::jsonb)
-               ->  Bitmap Index Scan on jidx  (cost=0.00..300.01 rows=1 
width=0)
-                     Index Cond: (j @> '{"age": 25.0}'::jsonb)
+                                      QUERY PLAN                               
        
+---------------------------------------------------------------------------------------
+ Finalize Aggregate  (cost=45.29..45.30 rows=1 width=8)
+   ->  Gather Motion 3:1  (slice1; segments: 3)  (cost=45.24..45.29 rows=3 
width=8)
+         ->  Partial Aggregate  (cost=45.24..45.25 rows=1 width=8)
+               ->  Bitmap Heap Scan on testjsonb  (cost=8.60..44.80 rows=176 
width=0)
+                     Recheck Cond: (j @> '{"age": 25.0}'::jsonb)
+                     ->  Bitmap Index Scan on jidx  (cost=0.00..8.55 rows=176 
width=0)
+                           Index Cond: (j @> '{"age": 25.0}'::jsonb)
  Optimizer: Postgres query optimizer
 (7 rows)
 
@@ -650,7 +659,7 @@ SELECT count(*) FROM test_tsvector WHERE a @@ 
'(eq|yt)&(wr|qh)';
 SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*';
  count 
 -------
-   494
+   496
 (1 row)
 
 -- For orca, ScalarArrayOpExpr condition on index scan not supported
@@ -669,7 +678,7 @@ SELECT count(*) FROM test_tsvector WHERE a @@ 
'no_such_lexeme';
 SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme';
  count 
 -------
-   508
+  1022
 (1 row)
 
 DROP INDEX wowidx;
diff --git a/src/test/regress/expected/memoize.out 
b/src/test/regress/expected/memoize.out
index 46a7f0fcfb8..dd5328211ef 100644
--- a/src/test/regress/expected/memoize.out
+++ b/src/test/regress/expected/memoize.out
@@ -1,10 +1,7 @@
 -- Perform tests on the Memoize node.
-<<<<<<< HEAD
 -- GPDB_14_MERGE_FIXME:
 -- 1.test memoize in CBDB as enable_nestloop is false by default
 -- 2.enable memoize in orca
-=======
->>>>>>> REL_16_9
 -- The cache hits/misses/evictions from the Memoize node can vary between
 -- machines.  Let's just replace the number with an 'N'.  In order to allow us
 -- to perform validation when the measure was zero, we replace a zero value
@@ -28,10 +25,7 @@ begin
         ln := regexp_replace(ln, 'Evictions: 0', 'Evictions: Zero');
         ln := regexp_replace(ln, 'Evictions: \d+', 'Evictions: N');
         ln := regexp_replace(ln, 'Memory Usage: \d+', 'Memory Usage: N');
-<<<<<<< HEAD
         ln := regexp_replace(ln, 'Memory: \d+', 'Memory: N');
-=======
->>>>>>> REL_16_9
        ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N');
        ln := regexp_replace(ln, 'loops=\d+', 'loops=N');
         return next ln;
@@ -39,18 +33,14 @@ begin
 end;
 $$;
 -- Ensure we get a memoize node on the inner side of the nested loop
-<<<<<<< HEAD
 SET optimizer_enable_hashjoin TO off;
 SET optimizer_enable_bitmapscan TO off;
-=======
->>>>>>> REL_16_9
 SET enable_hashjoin TO off;
 SET enable_bitmapscan TO off;
 SELECT explain_memoize('
 SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1
 INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty
 WHERE t2.unique1 < 1000;', false);
-<<<<<<< HEAD
                                             explain_memoize                    
                        
 
-------------------------------------------------------------------------------------------------------
  Finalize Aggregate (actual rows=1 loops=N)
@@ -70,23 +60,6 @@ WHERE t2.unique1 < 1000;', false);
                                  Heap Fetches: N
  Optimizer: Postgres query optimizer
 (16 rows)
-=======
-                                      explain_memoize                          
            
--------------------------------------------------------------------------------------------
- Aggregate (actual rows=1 loops=N)
-   ->  Nested Loop (actual rows=1000 loops=N)
-         ->  Seq Scan on tenk1 t2 (actual rows=1000 loops=N)
-               Filter: (unique1 < 1000)
-               Rows Removed by Filter: 9000
-         ->  Memoize (actual rows=1 loops=N)
-               Cache Key: t2.twenty
-               Cache Mode: logical
-               Hits: 980  Misses: 20  Evictions: Zero  Overflows: 0  Memory 
Usage: NkB
-               ->  Index Only Scan using tenk1_unique1 on tenk1 t1 (actual 
rows=1 loops=N)
-                     Index Cond: (unique1 = t2.twenty)
-                     Heap Fetches: N
-(12 rows)
->>>>>>> REL_16_9
 
 -- And check we get the expected results.
 SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1
@@ -100,68 +73,40 @@ WHERE t2.unique1 < 1000;
 -- Try with LATERAL joins
 SELECT explain_memoize('
 SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
-<<<<<<< HEAD
-LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
-WHERE t1.unique1 < 1000;', false);
-                                            explain_memoize                    
                        
--------------------------------------------------------------------------------------------------------
- Finalize Aggregate (actual rows=1 loops=N)
-   ->  Gather Motion 3:1  (slice1; segments: 3) (actual rows=3 loops=N)
-         ->  Partial Aggregate (actual rows=1 loops=N)
-               ->  Nested Loop (actual rows=400 loops=N)
-                     ->  Redistribute Motion 3:3  (slice2; segments: 3) 
(actual rows=400 loops=N)
-                           Hash Key: t1.twenty
-                           ->  Seq Scan on tenk1 t1 (actual rows=340 loops=N)
-                                 Filter: (unique1 < 1000)
-                                 Rows Removed by Filter: 2906
-                     ->  Memoize (actual rows=1 loops=N)
-                           Cache Key: t1.twenty
-                           Cache Mode: logical
-                           ->  Index Only Scan using tenk1_unique1 on tenk1 t2 
(actual rows=1 loops=N)
-                                 Index Cond: (unique1 = t1.twenty)
-                                 Heap Fetches: N
- Optimizer: Postgres query optimizer
-(16 rows)
-
--- And check we get the expected results.
-SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
-LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
-=======
 LATERAL (SELECT t2.unique1 FROM tenk1 t2
          WHERE t1.twenty = t2.unique1 OFFSET 0) t2
 WHERE t1.unique1 < 1000;', false);
-                                      explain_memoize                          
            
--------------------------------------------------------------------------------------------
+                                             explain_memoize                   
                           
+----------------------------------------------------------------------------------------------------------
  Aggregate (actual rows=1 loops=N)
    ->  Nested Loop (actual rows=1000 loops=N)
-         ->  Seq Scan on tenk1 t1 (actual rows=1000 loops=N)
-               Filter: (unique1 < 1000)
-               Rows Removed by Filter: 9000
-         ->  Memoize (actual rows=1 loops=N)
-               Cache Key: t1.twenty
-               Cache Mode: binary
-               Hits: 980  Misses: 20  Evictions: Zero  Overflows: 0  Memory 
Usage: NkB
-               ->  Index Only Scan using tenk1_unique1 on tenk1 t2 (actual 
rows=1 loops=N)
-                     Index Cond: (unique1 = t1.twenty)
-                     Heap Fetches: N
-(12 rows)
+         ->  Gather Motion 3:1  (slice1; segments: 3) (actual rows=1000 
loops=N)
+               ->  Seq Scan on tenk1 t1 (actual rows=340 loops=N)
+                     Filter: (unique1 < 1000)
+                     Rows Removed by Filter: 2906
+         ->  Materialize (actual rows=1 loops=N)
+               ->  Memoize (actual rows=1 loops=N)
+                     Cache Key: t1.twenty
+                     Cache Mode: binary
+                     Hits: 980  Misses: 20  Evictions: Zero  Overflows: 0  
Memory Usage: NkB
+                     ->  Result (actual rows=1 loops=N)
+                           Filter: (t1.twenty = t2.unique1)
+                           ->  Materialize (actual rows=10000 loops=N)
+                                 ->  Gather Motion 3:1  (slice2; segments: 3) 
(actual rows=10000 loops=N)
+                                       ->  Seq Scan on tenk1 t2 (actual 
rows=3386 loops=N)
+ Optimizer: Postgres query optimizer
+(17 rows)
 
 -- And check we get the expected results.
 SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
 LATERAL (SELECT t2.unique1 FROM tenk1 t2
          WHERE t1.twenty = t2.unique1 OFFSET 0) t2
->>>>>>> REL_16_9
 WHERE t1.unique1 < 1000;
  count |        avg         
 -------+--------------------
   1000 | 9.5000000000000000
 (1 row)
 
-<<<<<<< HEAD
--- Reduce work_mem so that we see some cache evictions
-SET work_mem TO '64kB';
-SET enable_mergejoin TO off;
-=======
 SET enable_mergejoin TO off;
 -- Test for varlena datatype with expr evaluation
 CREATE TABLE expr_key (x numeric, t text);
@@ -177,25 +122,29 @@ VACUUM ANALYZE expr_key;
 SELECT explain_memoize('
 SELECT * FROM expr_key t1 INNER JOIN expr_key t2
 ON t1.x = t2.t::numeric AND t1.t::numeric = t2.x;', false);
-                                      explain_memoize                          
            
--------------------------------------------------------------------------------------------
- Nested Loop (actual rows=80 loops=N)
-   ->  Seq Scan on expr_key t1 (actual rows=40 loops=N)
-   ->  Memoize (actual rows=2 loops=N)
-         Cache Key: t1.x, (t1.t)::numeric
-         Cache Mode: logical
-         Hits: 20  Misses: 20  Evictions: Zero  Overflows: 0  Memory Usage: NkB
-         ->  Index Only Scan using expr_key_idx_x_t on expr_key t2 (actual 
rows=2 loops=N)
-               Index Cond: (x = (t1.t)::numeric)
-               Filter: (t1.x = (t)::numeric)
-               Heap Fetches: N
-(10 rows)
+                                         explain_memoize                       
                  
+-------------------------------------------------------------------------------------------------
+ Gather Motion 3:1  (slice1; segments: 3) (actual rows=80 loops=N)
+   ->  Merge Join (actual rows=28 loops=N)
+         Merge Cond: ((t1.x = ((t2.t)::numeric)) AND (((t1.t)::numeric) = 
t2.x))
+         ->  Sort (actual rows=14 loops=N)
+               Sort Key: t1.x, ((t1.t)::numeric)
+               Sort Method:  quicksort  Memory: NkB
+               ->  Seq Scan on expr_key t1 (actual rows=14 loops=N)
+         ->  Sort (actual rows=27 loops=N)
+               Sort Key: ((t2.t)::numeric), t2.x
+               Sort Method:  quicksort  Memory: NkB
+               ->  Result (actual rows=14 loops=N)
+                     ->  Redistribute Motion 3:3  (slice2; segments: 3) 
(actual rows=14 loops=N)
+                           Hash Key: (t2.t)::numeric
+                           ->  Seq Scan on expr_key t2 (actual rows=14 loops=N)
+ Optimizer: Postgres query optimizer
+(15 rows)
 
 DROP TABLE expr_key;
 -- Reduce work_mem and hash_mem_multiplier so that we see some cache evictions
 SET work_mem TO '64kB';
 SET hash_mem_multiplier TO 1.0;
->>>>>>> REL_16_9
 -- Ensure we get some evictions.  We're unable to validate the hits and misses
 -- here as the number of entries that fit in the cache at once will vary
 -- between different machines.
@@ -203,7 +152,6 @@ SELECT explain_memoize('
 SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1
 INNER JOIN tenk1 t2 ON t1.unique1 = t2.thousand
 WHERE t2.unique1 < 1200;', true);
-<<<<<<< HEAD
                                             explain_memoize                    
                         
 
--------------------------------------------------------------------------------------------------------
  Finalize Aggregate (actual rows=1 loops=N)
@@ -223,23 +171,6 @@ WHERE t2.unique1 < 1200;', true);
                                        Rows Removed by Filter: 2961
  Optimizer: Postgres query optimizer
 (16 rows)
-=======
-                                      explain_memoize                          
            
--------------------------------------------------------------------------------------------
- Aggregate (actual rows=1 loops=N)
-   ->  Nested Loop (actual rows=1200 loops=N)
-         ->  Seq Scan on tenk1 t2 (actual rows=1200 loops=N)
-               Filter: (unique1 < 1200)
-               Rows Removed by Filter: 8800
-         ->  Memoize (actual rows=1 loops=N)
-               Cache Key: t2.thousand
-               Cache Mode: logical
-               Hits: N  Misses: N  Evictions: N  Overflows: 0  Memory Usage: 
NkB
-               ->  Index Only Scan using tenk1_unique1 on tenk1 t1 (actual 
rows=1 loops=N)
-                     Index Cond: (unique1 = t2.thousand)
-                     Heap Fetches: N
-(12 rows)
->>>>>>> REL_16_9
 
 CREATE TABLE flt (f float);
 CREATE INDEX flt_f_idx ON flt (f);
@@ -251,7 +182,6 @@ SELECT explain_memoize('
 SELECT * FROM flt f1 INNER JOIN flt f2 ON f1.f = f2.f;', false);
                                 explain_memoize                                
 -------------------------------------------------------------------------------
-<<<<<<< HEAD
  Gather Motion 3:1  (slice1; segments: 3) (actual rows=4 loops=N)
    ->  Nested Loop (actual rows=4 loops=N)
          ->  Index Only Scan using flt_f_idx on flt f1 (actual rows=2 loops=N)
@@ -261,24 +191,10 @@ SELECT * FROM flt f1 INNER JOIN flt f2 ON f1.f = f2.f;', 
false);
                Heap Fetches: N
  Optimizer: Postgres query optimizer
 (8 rows)
-=======
- Nested Loop (actual rows=4 loops=N)
-   ->  Index Only Scan using flt_f_idx on flt f1 (actual rows=2 loops=N)
-         Heap Fetches: N
-   ->  Memoize (actual rows=2 loops=N)
-         Cache Key: f1.f
-         Cache Mode: logical
-         Hits: 1  Misses: 1  Evictions: Zero  Overflows: 0  Memory Usage: NkB
-         ->  Index Only Scan using flt_f_idx on flt f2 (actual rows=2 loops=N)
-               Index Cond: (f = f1.f)
-               Heap Fetches: N
-(10 rows)
->>>>>>> REL_16_9
 
 -- Ensure memoize operates in binary mode
 SELECT explain_memoize('
 SELECT * FROM flt f1 INNER JOIN flt f2 ON f1.f >= f2.f;', false);
-<<<<<<< HEAD
                                    explain_memoize                             
      
 
-------------------------------------------------------------------------------------
  Gather Motion 3:1  (slice1; segments: 3) (actual rows=4 loops=N)
@@ -291,21 +207,6 @@ SELECT * FROM flt f1 INNER JOIN flt f2 ON f1.f >= f2.f;', 
false);
                Heap Fetches: N
  Optimizer: Postgres query optimizer
 (9 rows)
-=======
-                                explain_memoize                                
--------------------------------------------------------------------------------
- Nested Loop (actual rows=4 loops=N)
-   ->  Index Only Scan using flt_f_idx on flt f1 (actual rows=2 loops=N)
-         Heap Fetches: N
-   ->  Memoize (actual rows=2 loops=N)
-         Cache Key: f1.f
-         Cache Mode: binary
-         Hits: 0  Misses: 2  Evictions: Zero  Overflows: 0  Memory Usage: NkB
-         ->  Index Only Scan using flt_f_idx on flt f2 (actual rows=2 loops=N)
-               Index Cond: (f <= f1.f)
-               Heap Fetches: N
-(10 rows)
->>>>>>> REL_16_9
 
 DROP TABLE flt;
 -- Exercise Memoize in binary mode with a large fixed width type and a
@@ -313,11 +214,7 @@ DROP TABLE flt;
 CREATE TABLE strtest (n name, t text);
 CREATE INDEX strtest_n_idx ON strtest (n);
 CREATE INDEX strtest_t_idx ON strtest (t);
-<<<<<<< HEAD
-INSERT INTO strtest 
VALUES('one','one'),('two','two'),('three',repeat(md5('three'),100));
-=======
 INSERT INTO strtest 
VALUES('one','one'),('two','two'),('three',repeat(fipshash('three'),100));
->>>>>>> REL_16_9
 -- duplicate rows so we get some cache hits
 INSERT INTO strtest SELECT * FROM strtest;
 ANALYZE strtest;
@@ -326,7 +223,6 @@ SELECT explain_memoize('
 SELECT * FROM strtest s1 INNER JOIN strtest s2 ON s1.n >= s2.n;', false);
                                  explain_memoize                               
   
 
----------------------------------------------------------------------------------
-<<<<<<< HEAD
  Gather Motion 3:1  (slice1; segments: 3) (actual rows=24 loops=N)
    ->  Nested Loop (actual rows=12 loops=N)
          ->  Broadcast Motion 3:3  (slice2; segments: 3) (actual rows=6 
loops=N)
@@ -335,24 +231,12 @@ SELECT * FROM strtest s1 INNER JOIN strtest s2 ON s1.n >= 
s2.n;', false);
                Index Cond: (n <= s1.n)
  Optimizer: Postgres query optimizer
 (7 rows)
-=======
- Nested Loop (actual rows=24 loops=N)
-   ->  Seq Scan on strtest s1 (actual rows=6 loops=N)
-   ->  Memoize (actual rows=4 loops=N)
-         Cache Key: s1.n
-         Cache Mode: binary
-         Hits: 3  Misses: 3  Evictions: Zero  Overflows: 0  Memory Usage: NkB
-         ->  Index Scan using strtest_n_idx on strtest s2 (actual rows=4 
loops=N)
-               Index Cond: (n <= s1.n)
-(8 rows)
->>>>>>> REL_16_9
 
 -- Ensure we get 3 hits and 3 misses
 SELECT explain_memoize('
 SELECT * FROM strtest s1 INNER JOIN strtest s2 ON s1.t >= s2.t;', false);
                                  explain_memoize                               
   
 
----------------------------------------------------------------------------------
-<<<<<<< HEAD
  Gather Motion 3:1  (slice1; segments: 3) (actual rows=24 loops=N)
    ->  Nested Loop (actual rows=16 loops=N)
          ->  Broadcast Motion 3:3  (slice2; segments: 3) (actual rows=6 
loops=N)
@@ -362,18 +246,6 @@ SELECT * FROM strtest s1 INNER JOIN strtest s2 ON s1.t >= 
s2.t;', false);
  Optimizer: Postgres query optimizer
 (7 rows)
 
-DROP TABLE strtest;
-=======
- Nested Loop (actual rows=24 loops=N)
-   ->  Seq Scan on strtest s1 (actual rows=6 loops=N)
-   ->  Memoize (actual rows=4 loops=N)
-         Cache Key: s1.t
-         Cache Mode: binary
-         Hits: 3  Misses: 3  Evictions: Zero  Overflows: 0  Memory Usage: NkB
-         ->  Index Scan using strtest_t_idx on strtest s2 (actual rows=4 
loops=N)
-               Index Cond: (t <= s1.t)
-(8 rows)
-
 DROP TABLE strtest;
 -- Ensure memoize works with partitionwise join
 SET enable_partitionwise_join TO on;
@@ -389,28 +261,22 @@ SELECT explain_memoize('
 SELECT * FROM prt t1 INNER JOIN prt t2 ON t1.a = t2.a;', false);
                                      explain_memoize                           
           
 
------------------------------------------------------------------------------------------
- Append (actual rows=32 loops=N)
-   ->  Nested Loop (actual rows=16 loops=N)
-         ->  Index Only Scan using iprt_p1_a on prt_p1 t1_1 (actual rows=4 
loops=N)
-               Heap Fetches: N
-         ->  Memoize (actual rows=4 loops=N)
-               Cache Key: t1_1.a
-               Cache Mode: logical
-               Hits: 3  Misses: 1  Evictions: Zero  Overflows: 0  Memory 
Usage: NkB
+ Gather Motion 3:1  (slice1; segments: 3) (actual rows=32 loops=N)
+   ->  Append (actual rows=16 loops=N)
+         ->  Nested Loop (actual rows=16 loops=N)
+               ->  Index Only Scan using iprt_p1_a on prt_p1 t1_1 (actual 
rows=4 loops=N)
+                     Heap Fetches: N
                ->  Index Only Scan using iprt_p1_a on prt_p1 t2_1 (actual 
rows=4 loops=N)
                      Index Cond: (a = t1_1.a)
                      Heap Fetches: N
-   ->  Nested Loop (actual rows=16 loops=N)
-         ->  Index Only Scan using iprt_p2_a on prt_p2 t1_2 (actual rows=4 
loops=N)
-               Heap Fetches: N
-         ->  Memoize (actual rows=4 loops=N)
-               Cache Key: t1_2.a
-               Cache Mode: logical
-               Hits: 3  Misses: 1  Evictions: Zero  Overflows: 0  Memory 
Usage: NkB
+         ->  Nested Loop (actual rows=16 loops=N)
+               ->  Index Only Scan using iprt_p2_a on prt_p2 t1_2 (actual 
rows=4 loops=N)
+                     Heap Fetches: N
                ->  Index Only Scan using iprt_p2_a on prt_p2 t2_2 (actual 
rows=4 loops=N)
                      Index Cond: (a = t1_2.a)
                      Heap Fetches: N
-(21 rows)
+ Optimizer: Postgres query optimizer
+(15 rows)
 
 -- Ensure memoize works with parameterized union-all Append path
 SET enable_partitionwise_join TO off;
@@ -420,13 +286,10 @@ SELECT * FROM prt_p1 t1 INNER JOIN
 ON t1.a = t2.a;', false);
                                    explain_memoize                             
      
 
-------------------------------------------------------------------------------------
- Nested Loop (actual rows=16 loops=N)
-   ->  Index Only Scan using iprt_p1_a on prt_p1 t1 (actual rows=4 loops=N)
-         Heap Fetches: N
-   ->  Memoize (actual rows=4 loops=N)
-         Cache Key: t1.a
-         Cache Mode: logical
-         Hits: 3  Misses: 1  Evictions: Zero  Overflows: 0  Memory Usage: NkB
+ Gather Motion 3:1  (slice1; segments: 3) (actual rows=16 loops=N)
+   ->  Nested Loop (actual rows=16 loops=N)
+         ->  Index Only Scan using iprt_p1_a on prt_p1 t1 (actual rows=4 
loops=N)
+               Heap Fetches: N
          ->  Append (actual rows=4 loops=N)
                ->  Index Only Scan using iprt_p1_a on prt_p1 (actual rows=4 
loops=N)
                      Index Cond: (a = t1.a)
@@ -434,11 +297,11 @@ ON t1.a = t2.a;', false);
                ->  Index Only Scan using iprt_p2_a on prt_p2 (actual rows=0 
loops=N)
                      Index Cond: (a = t1.a)
                      Heap Fetches: N
-(14 rows)
+ Optimizer: Postgres query optimizer
+(12 rows)
 
 DROP TABLE prt;
 RESET enable_partitionwise_join;
->>>>>>> REL_16_9
 -- Exercise Memoize code that flushes the cache when a parameter changes which
 -- is not part of the cache key.
 -- Ensure we get a Memoize plan
@@ -449,7 +312,6 @@ WHERE unique1 < 3
        SELECT 1 FROM tenk1 t1
        INNER JOIN tenk1 t2 ON t1.unique1 = t2.hundred
        WHERE t0.ten = t1.twenty AND t0.two <> t2.four OFFSET 0);
-<<<<<<< HEAD
                                           QUERY PLAN                           
               
 
----------------------------------------------------------------------------------------------
  Gather Motion 3:1  (slice1; segments: 3)
@@ -476,24 +338,6 @@ WHERE unique1 < 3
                                                ->  Index Scan using 
tenk1_unique1 on tenk1 t1
  Optimizer: Postgres query optimizer
 (23 rows)
-=======
-                           QUERY PLAN                           
-----------------------------------------------------------------
- Index Scan using tenk1_unique1 on tenk1 t0
-   Index Cond: (unique1 < 3)
-   Filter: (SubPlan 1)
-   SubPlan 1
-     ->  Nested Loop
-           ->  Index Scan using tenk1_hundred on tenk1 t2
-                 Filter: (t0.two <> four)
-           ->  Memoize
-                 Cache Key: t2.hundred
-                 Cache Mode: logical
-                 ->  Index Scan using tenk1_unique1 on tenk1 t1
-                       Index Cond: (unique1 = t2.hundred)
-                       Filter: (t0.ten = twenty)
-(13 rows)
->>>>>>> REL_16_9
 
 -- Ensure the above query returns the correct result
 SELECT unique1 FROM tenk1 t0
@@ -510,16 +354,11 @@ WHERE unique1 < 3
 RESET enable_seqscan;
 RESET enable_mergejoin;
 RESET work_mem;
-<<<<<<< HEAD
 RESET enable_bitmapscan;
 RESET enable_hashjoin;
 RESET optimizer_enable_hashjoin;
 RESET optimizer_enable_bitmapscan;
-=======
 RESET hash_mem_multiplier;
-RESET enable_bitmapscan;
-RESET enable_hashjoin;
->>>>>>> REL_16_9
 -- Test parallel plans with Memoize
 SET min_parallel_table_scan_size TO 0;
 SET parallel_setup_cost TO 0;
@@ -530,7 +369,6 @@ EXPLAIN (COSTS OFF)
 SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
 LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
 WHERE t1.unique1 < 1000;
-<<<<<<< HEAD
                                   QUERY PLAN                                  
 ------------------------------------------------------------------------------
  Finalize Aggregate
@@ -547,24 +385,6 @@ WHERE t1.unique1 < 1000;
                                        ->  Bitmap Index Scan on tenk1_unique1
                                              Index Cond: (unique1 < 1000)
  Optimizer: Postgres query optimizer
-=======
-                                  QUERY PLAN                                   
--------------------------------------------------------------------------------
- Finalize Aggregate
-   ->  Gather
-         Workers Planned: 2
-         ->  Partial Aggregate
-               ->  Nested Loop
-                     ->  Parallel Bitmap Heap Scan on tenk1 t1
-                           Recheck Cond: (unique1 < 1000)
-                           ->  Bitmap Index Scan on tenk1_unique1
-                                 Index Cond: (unique1 < 1000)
-                     ->  Memoize
-                           Cache Key: t1.twenty
-                           Cache Mode: logical
-                           ->  Index Only Scan using tenk1_unique1 on tenk1 t2
-                                 Index Cond: (unique1 = t1.twenty)
->>>>>>> REL_16_9
 (14 rows)
 
 -- And ensure the parallel plan gives us the correct results.
diff --git a/src/test/regress/expected/sanity_check.out 
b/src/test/regress/expected/sanity_check.out
index a69f0486b24..4625d100bf0 100644
--- a/src/test/regress/expected/sanity_check.out
+++ b/src/test/regress/expected/sanity_check.out
@@ -212,7 +212,6 @@ tableam_parted_d_heap2|f
 tableam_parted_heap2|f
 tableam_tbl_heap2|f
 tableam_tblas_heap2|f
-tablespace_table1|f
 tbl_include_box|t
 tbl_include_box_pk|f
 tbl_include_pk|t
diff --git a/src/test/regress/expected/select_parallel.out 
b/src/test/regress/expected/select_parallel.out
index 022bafa43db..e2117df4bd3 100644
--- a/src/test/regress/expected/select_parallel.out
+++ b/src/test/regress/expected/select_parallel.out
@@ -15,20 +15,19 @@ set max_parallel_workers_per_gather=4;
 -- Parallel Append with partial-subplans
 explain (costs off)
   select round(avg(aa)), sum(aa) from a_star;
-                     QUERY PLAN                      
------------------------------------------------------
- Finalize Aggregate
+                   QUERY PLAN                   
+------------------------------------------------
+ Aggregate
    ->  Gather Motion 9:1  (slice1; segments: 9)
-         ->  Partial Aggregate
-               ->  Parallel Append
-                     ->  Seq Scan on d_star a_star_4
-                     ->  Seq Scan on f_star a_star_6
-                     ->  Seq Scan on e_star a_star_5
-                     ->  Seq Scan on b_star a_star_2
-                     ->  Seq Scan on c_star a_star_3
-                     ->  Seq Scan on a_star a_star_1
+         ->  Parallel Append
+               ->  Seq Scan on d_star a_star_4
+               ->  Seq Scan on f_star a_star_6
+               ->  Seq Scan on e_star a_star_5
+               ->  Seq Scan on b_star a_star_2
+               ->  Seq Scan on c_star a_star_3
+               ->  Seq Scan on a_star a_star_1
  Optimizer: Postgres query optimizer
-(11 rows)
+(10 rows)
 
 select round(avg(aa)), sum(aa) from a_star a1;
  round | sum 
@@ -41,20 +40,19 @@ alter table c_star set (parallel_workers = 0);
 alter table d_star set (parallel_workers = 0);
 explain (costs off)
   select round(avg(aa)), sum(aa) from a_star;
-                     QUERY PLAN                      
------------------------------------------------------
- Finalize Aggregate
+                   QUERY PLAN                   
+------------------------------------------------
+ Aggregate
    ->  Gather Motion 9:1  (slice1; segments: 9)
-         ->  Partial Aggregate
-               ->  Parallel Append
-                     ->  Seq Scan on d_star a_star_4
-                     ->  Seq Scan on f_star a_star_6
-                     ->  Seq Scan on e_star a_star_5
-                     ->  Seq Scan on b_star a_star_2
-                     ->  Seq Scan on c_star a_star_3
-                     ->  Seq Scan on a_star a_star_1
+         ->  Parallel Append
+               ->  Seq Scan on d_star a_star_4
+               ->  Seq Scan on f_star a_star_6
+               ->  Seq Scan on e_star a_star_5
+               ->  Seq Scan on b_star a_star_2
+               ->  Seq Scan on c_star a_star_3
+               ->  Seq Scan on a_star a_star_1
  Optimizer: Postgres query optimizer
-(11 rows)
+(10 rows)
 
 select round(avg(aa)), sum(aa) from a_star a2;
  round | sum 
@@ -69,20 +67,19 @@ alter table e_star set (parallel_workers = 0);
 alter table f_star set (parallel_workers = 0);
 explain (costs off)
   select round(avg(aa)), sum(aa) from a_star;
-                     QUERY PLAN                      
------------------------------------------------------
- Finalize Aggregate
+                   QUERY PLAN                   
+------------------------------------------------
+ Aggregate
    ->  Gather Motion 9:1  (slice1; segments: 9)
-         ->  Partial Aggregate
-               ->  Parallel Append
-                     ->  Seq Scan on d_star a_star_4
-                     ->  Seq Scan on f_star a_star_6
-                     ->  Seq Scan on e_star a_star_5
-                     ->  Seq Scan on b_star a_star_2
-                     ->  Seq Scan on c_star a_star_3
-                     ->  Seq Scan on a_star a_star_1
+         ->  Parallel Append
+               ->  Seq Scan on d_star a_star_4
+               ->  Seq Scan on f_star a_star_6
+               ->  Seq Scan on e_star a_star_5
+               ->  Seq Scan on b_star a_star_2
+               ->  Seq Scan on c_star a_star_3
+               ->  Seq Scan on a_star a_star_1
  Optimizer: Postgres query optimizer
-(11 rows)
+(10 rows)
 
 select round(avg(aa)), sum(aa) from a_star a3;
  round | sum 
@@ -170,15 +167,14 @@ drop table part_pa_test;
 set parallel_leader_participation = off;
 explain (costs off)
   select count(*) from tenk1 where stringu1 = 'GRAAAA';
-                       QUERY PLAN                        
----------------------------------------------------------
- Finalize Aggregate
+                    QUERY PLAN                     
+---------------------------------------------------
+ Aggregate
    ->  Gather Motion 12:1  (slice1; segments: 12)
-         ->  Partial Aggregate
-               ->  Parallel Seq Scan on tenk1
-                     Filter: (stringu1 = 'GRAAAA'::name)
+         ->  Parallel Seq Scan on tenk1
+               Filter: (stringu1 = 'GRAAAA'::name)
  Optimizer: Postgres query optimizer
-(6 rows)
+(5 rows)
 
 select count(*) from tenk1 where stringu1 = 'GRAAAA';
  count 
@@ -191,15 +187,14 @@ select count(*) from tenk1 where stringu1 = 'GRAAAA';
 set max_parallel_workers = 0;
 explain (costs off)
   select count(*) from tenk1 where stringu1 = 'GRAAAA';
-                       QUERY PLAN                        
----------------------------------------------------------
- Finalize Aggregate
+                    QUERY PLAN                     
+---------------------------------------------------
+ Aggregate
    ->  Gather Motion 12:1  (slice1; segments: 12)
-         ->  Partial Aggregate
-               ->  Parallel Seq Scan on tenk1
-                     Filter: (stringu1 = 'GRAAAA'::name)
+         ->  Parallel Seq Scan on tenk1
+               Filter: (stringu1 = 'GRAAAA'::name)
  Optimizer: Postgres query optimizer
-(6 rows)
+(5 rows)
 
 select count(*) from tenk1 where stringu1 = 'GRAAAA';
  count 
@@ -859,25 +854,22 @@ select * from
   (select string4, count(unique2)
    from tenk1 group by string4 order by string4) ss
   right join (values (1),(2),(3)) v(x) on true;
-                                  QUERY PLAN                                  
-------------------------------------------------------------------------------
+                         QUERY PLAN                         
+------------------------------------------------------------
  Nested Loop Left Join
    ->  Values Scan on "*VALUES*"
    ->  Materialize
-         ->  Gather Motion 3:1  (slice1; segments: 3)
-               ->  Finalize GroupAggregate
-                     Group Key: tenk1.string4
-                     ->  Sort
-                           Sort Key: tenk1.string4
-                           ->  Redistribute Motion 3:3  (slice2; segments: 3)
-                                 Hash Key: tenk1.string4
-                                 ->  Partial GroupAggregate
-                                       Group Key: tenk1.string4
-                                       ->  Sort
-                                             Sort Key: tenk1.string4
-                                             ->  Seq Scan on tenk1
+         ->  Finalize GroupAggregate
+               Group Key: tenk1.string4
+               ->  Gather Motion 3:1  (slice1; segments: 3)
+                     Merge Key: tenk1.string4
+                     ->  Partial GroupAggregate
+                           Group Key: tenk1.string4
+                           ->  Sort
+                                 Sort Key: tenk1.string4
+                                 ->  Seq Scan on tenk1
  Optimizer: Postgres query optimizer
-(16 rows)
+(13 rows)
 
 select * from
   (select string4, count(unique2)
@@ -1110,8 +1102,7 @@ explain (costs off)
 
 -- to increase the parallel query test coverage
 SAVEPOINT settings;
-<<<<<<< HEAD
-SET LOCAL force_parallel_mode = 1;
+SET LOCAL debug_parallel_query = 1;
 -- CBDB_PARALLEL_FIXME: analyze actual rows may be different by running 
multiple times.
 EXPLAIN (timing off, summary off, costs off) SELECT * FROM tenk1;
                 QUERY PLAN                
@@ -1120,17 +1111,6 @@ EXPLAIN (timing off, summary off, costs off) SELECT * 
FROM tenk1;
    ->  Parallel Seq Scan on tenk1
  Optimizer: Postgres query optimizer
 (3 rows)
-=======
-SET LOCAL debug_parallel_query = 1;
-EXPLAIN (analyze, timing off, summary off, costs off) SELECT * FROM tenk1;
-                         QUERY PLAN                          
--------------------------------------------------------------
- Gather (actual rows=10000 loops=1)
-   Workers Planned: 4
-   Workers Launched: 4
-   ->  Parallel Seq Scan on tenk1 (actual rows=2000 loops=5)
-(4 rows)
->>>>>>> REL_16_9
 
 ROLLBACK TO SAVEPOINT settings;
 -- provoke error in worker
@@ -1302,10 +1282,6 @@ SELECT 1 FROM tenk1_vw_sec
 (12 rows)
 
 rollback;
-<<<<<<< HEAD
-reset enable_parallel;
-reset optimizer;
-=======
 -- test that function option SET ROLE works in parallel workers.
 create role regress_parallel_worker;
 create function set_and_report_role() returns text as
@@ -1334,7 +1310,6 @@ select set_and_report_role();
 select set_role_and_error(0);
 ERROR:  division by zero
 CONTEXT:  SQL function "set_role_and_error" statement 1
-parallel worker
 reset debug_parallel_query;
 drop function set_and_report_role();
 drop function set_role_and_error(int);
@@ -1363,4 +1338,5 @@ CREATE UNIQUE INDEX parallel_hang_idx
 SET debug_parallel_query = on;
 DELETE FROM parallel_hang WHERE 380 <= i AND i <= 420;
 ROLLBACK;
->>>>>>> REL_16_9
+reset enable_parallel;
+reset optimizer;
diff --git a/src/test/regress/expected/sqljson.out 
b/src/test/regress/expected/sqljson.out
index 7a5a6325229..ba1f4b0e24a 100644
--- a/src/test/regress/expected/sqljson.out
+++ b/src/test/regress/expected/sqljson.out
@@ -642,7 +642,8 @@ SELECT JSON_OBJECT('foo' : '1' FORMAT JSON, 'bar' : 'baz' 
RETURNING json);
 ------------------------------------------------------------------------------
  Result
    Output: JSON_OBJECT('foo' : '1'::json, 'bar' : 'baz'::text RETURNING json)
-(2 rows)
+ Optimizer: Postgres query optimizer
+(3 rows)
 
 CREATE VIEW json_object_view AS
 SELECT JSON_OBJECT('foo' : '1' FORMAT JSON, 'bar' : 'baz' RETURNING json);
@@ -657,7 +658,8 @@ SELECT JSON_ARRAY('1' FORMAT JSON, 2 RETURNING json);
 ---------------------------------------------------
  Result
    Output: JSON_ARRAY('1'::json, 2 RETURNING json)
-(2 rows)
+ Optimizer: Postgres query optimizer
+(3 rows)
 
 CREATE VIEW json_array_view AS
 SELECT JSON_ARRAY('1' FORMAT JSON, 2 RETURNING json);
@@ -685,6 +687,7 @@ FROM generate_series(1,5) i;
 
-----------------------------------------------------------------------------------------------------------------------------------
  WindowAgg
    Output: JSON_OBJECTAGG(i : (('111'::text || (i)::text))::bytea FORMAT JSON 
WITH UNIQUE KEYS RETURNING text) OVER (?), ((i % 2))
+   Partition By: ((i.i % 2))
    ->  Sort
          Output: ((i % 2)), i
          Sort Key: ((i.i % 2))
@@ -721,6 +724,7 @@ FROM generate_series(1,5) i;
 
--------------------------------------------------------------------------------------------------------------------------
  WindowAgg
    Output: JSON_ARRAYAGG((('111'::text || (i)::text))::bytea FORMAT JSON NULL 
ON NULL RETURNING text) OVER (?), ((i % 2))
+   Partition By: ((i.i % 2))
    ->  Sort
          Output: ((i % 2)), i
          Sort Key: ((i.i % 2))
diff --git a/src/test/regress/expected/stats.out 
b/src/test/regress/expected/stats.out
index a85532132ce..04ea7de5d16 100644
--- a/src/test/regress/expected/stats.out
+++ b/src/test/regress/expected/stats.out
@@ -31,68 +31,7 @@ SELECT t.seq_scan, t.seq_tup_read, t.idx_scan, 
t.idx_tup_fetch,
   FROM pg_catalog.pg_stat_user_tables AS t,
        pg_catalog.pg_statio_user_tables AS b
  WHERE t.relname='tenk2' AND b.relname='tenk2';
-<<<<<<< HEAD
--- function to wait for counters to advance
-create function wait_for_stats() returns void as $$
-declare
-  start_time timestamptz := clock_timestamp();
-  updated1 bool;
-  updated2 bool;
-  updated3 bool;
-  updated4 bool;
-  updated5 bool;
-begin
-  -- we don't want to wait forever; loop will exit after 30 seconds
-  for i in 1 .. 300 loop
-
-    -- With parallel query, the seqscan and indexscan on tenk2 might be done
-    -- in parallel worker processes, which will send their stats counters
-    -- asynchronously to what our own session does.  So we must check for
-    -- those counts to be registered separately from the update counts.
-
-    -- check to see if seqscan has been sensed
-    SELECT (st.seq_scan >= pr.seq_scan + 1) INTO updated1
-      FROM pg_stat_user_tables AS st, pg_class AS cl, prevstats AS pr
-     WHERE st.relname='tenk2' AND cl.relname='tenk2';
-
-    -- check to see if indexscan has been sensed
-    SELECT (st.idx_scan >= pr.idx_scan + 1) INTO updated2
-      FROM pg_stat_user_tables AS st, pg_class AS cl, prevstats AS pr
-     WHERE st.relname='tenk2' AND cl.relname='tenk2';
-
-    -- check to see if all updates have been sensed
-    SELECT (n_tup_ins > 0) INTO updated3
-      FROM pg_stat_user_tables WHERE relname='trunc_stats_test4';
-
-    -- We must also check explicitly that pg_stat_get_snapshot_timestamp has
-    -- advanced, because that comes from the global stats file which might
-    -- be older than the per-DB stats file we got the other values from.
-    SELECT (pr.snap_ts < pg_stat_get_snapshot_timestamp()) INTO updated4
-      FROM prevstats AS pr;
-
-    -- check to see if idx_tup_fetch has been sensed
-    SELECT (st.idx_tup_fetch >= pr.idx_tup_fetch + 1) INTO updated5
-      FROM pg_stat_user_tables AS st, pg_class AS cl, prevstats AS pr
-     WHERE st.relname='tenk2' AND cl.relname='tenk2';
-
-    exit when updated1 and updated2 and updated3 and updated4 and updated5;
-
-    -- wait a little
-    perform pg_sleep_for('100 milliseconds');
-
-    -- reset stats snapshot so we can test again
-    perform pg_stat_clear_snapshot();
-
-  end loop;
-
-  -- report time waited in postmaster log (where it won't change test output)
-  raise log 'wait_for_stats delayed % seconds',
-    extract(epoch from clock_timestamp() - start_time);
-end
-$$ language plpgsql;
-=======
 COMMIT;
->>>>>>> REL_16_9
 -- test effects of TRUNCATE on n_live_tup/n_dead_tup counters
 CREATE TABLE trunc_stats_test(id serial);
 CREATE TABLE trunc_stats_test1(id serial, stuff text);
@@ -629,8 +568,6 @@ SELECT 
pg_stat_get_live_tuples(:drop_stats_test_subxact_oid);
 
 DROP TABLE trunc_stats_test, trunc_stats_test1, trunc_stats_test2, 
trunc_stats_test3, trunc_stats_test4;
 DROP TABLE prevstats;
-<<<<<<< HEAD
-=======
 -----
 -- Test that last_seq_scan, last_idx_scan are correctly maintained
 --
@@ -673,12 +610,15 @@ SET LOCAL enable_seqscan TO on;
 SET LOCAL enable_indexscan TO on;
 SET LOCAL enable_bitmapscan TO off;
 EXPLAIN (COSTS off) SELECT count(*) FROM test_last_scan WHERE noidx_col = 1;
-            QUERY PLAN            
-----------------------------------
- Aggregate
-   ->  Seq Scan on test_last_scan
-         Filter: (noidx_col = 1)
-(3 rows)
+                   QUERY PLAN                   
+------------------------------------------------
+ Finalize Aggregate
+   ->  Gather Motion 3:1  (slice1; segments: 3)
+         ->  Partial Aggregate
+               ->  Seq Scan on test_last_scan
+                     Filter: (noidx_col = 1)
+ Optimizer: Postgres query optimizer
+(6 rows)
 
 SELECT count(*) FROM test_last_scan WHERE noidx_col = 1;
  count 
@@ -688,12 +628,14 @@ SELECT count(*) FROM test_last_scan WHERE noidx_col = 1;
 
 SET LOCAL enable_seqscan TO off;
 EXPLAIN (COSTS off) SELECT count(*) FROM test_last_scan WHERE idx_col = 1;
-                          QUERY PLAN                          
---------------------------------------------------------------
+                             QUERY PLAN                             
+--------------------------------------------------------------------
  Aggregate
-   ->  Index Scan using test_last_scan_pkey on test_last_scan
-         Index Cond: (idx_col = 1)
-(3 rows)
+   ->  Gather Motion 1:1  (slice1; segments: 1)
+         ->  Index Scan using test_last_scan_pkey on test_last_scan
+               Index Cond: (idx_col = 1)
+ Optimizer: Postgres query optimizer
+(5 rows)
 
 SELECT count(*) FROM test_last_scan WHERE idx_col = 1;
  count 
@@ -723,12 +665,15 @@ SET LOCAL enable_seqscan TO on;
 SET LOCAL enable_indexscan TO off;
 SET LOCAL enable_bitmapscan TO off;
 EXPLAIN (COSTS off) SELECT count(*) FROM test_last_scan WHERE noidx_col = 1;
-            QUERY PLAN            
-----------------------------------
- Aggregate
-   ->  Seq Scan on test_last_scan
-         Filter: (noidx_col = 1)
-(3 rows)
+                   QUERY PLAN                   
+------------------------------------------------
+ Finalize Aggregate
+   ->  Gather Motion 3:1  (slice1; segments: 3)
+         ->  Partial Aggregate
+               ->  Seq Scan on test_last_scan
+                     Filter: (noidx_col = 1)
+ Optimizer: Postgres query optimizer
+(6 rows)
 
 SELECT count(*) FROM test_last_scan WHERE noidx_col = 1;
  count 
@@ -766,12 +711,14 @@ SET LOCAL enable_seqscan TO off;
 SET LOCAL enable_indexscan TO on;
 SET LOCAL enable_bitmapscan TO off;
 EXPLAIN (COSTS off) SELECT count(*) FROM test_last_scan WHERE idx_col = 1;
-                          QUERY PLAN                          
---------------------------------------------------------------
+                             QUERY PLAN                             
+--------------------------------------------------------------------
  Aggregate
-   ->  Index Scan using test_last_scan_pkey on test_last_scan
-         Index Cond: (idx_col = 1)
-(3 rows)
+   ->  Gather Motion 1:1  (slice1; segments: 1)
+         ->  Index Scan using test_last_scan_pkey on test_last_scan
+               Index Cond: (idx_col = 1)
+ Optimizer: Postgres query optimizer
+(5 rows)
 
 SELECT count(*) FROM test_last_scan WHERE idx_col = 1;
  count 
@@ -809,14 +756,16 @@ SET LOCAL enable_seqscan TO off;
 SET LOCAL enable_indexscan TO off;
 SET LOCAL enable_bitmapscan TO on;
 EXPLAIN (COSTS off) SELECT count(*) FROM test_last_scan WHERE idx_col = 1;
-                      QUERY PLAN                      
-------------------------------------------------------
+                         QUERY PLAN                         
+------------------------------------------------------------
  Aggregate
-   ->  Bitmap Heap Scan on test_last_scan
-         Recheck Cond: (idx_col = 1)
-         ->  Bitmap Index Scan on test_last_scan_pkey
-               Index Cond: (idx_col = 1)
-(5 rows)
+   ->  Gather Motion 1:1  (slice1; segments: 1)
+         ->  Bitmap Heap Scan on test_last_scan
+               Recheck Cond: (idx_col = 1)
+               ->  Bitmap Index Scan on test_last_scan_pkey
+                     Index Cond: (idx_col = 1)
+ Optimizer: Postgres query optimizer
+(7 rows)
 
 SELECT count(*) FROM test_last_scan WHERE idx_col = 1;
  count 
@@ -1293,7 +1242,6 @@ SELECT pg_stat_have_stats('relation', :dboid, 
:stats_test_idx1_oid);
 
 -- put enable_seqscan back to on
 SET enable_seqscan TO on;
->>>>>>> REL_16_9
 -- ensure that stats accessors handle NULL input correctly
 SELECT pg_stat_get_replication_slot(NULL);
  pg_stat_get_replication_slot 
@@ -1301,8 +1249,6 @@ SELECT pg_stat_get_replication_slot(NULL);
  
 (1 row)
 
-<<<<<<< HEAD
-=======
 SELECT pg_stat_get_subscription_stats(NULL);
  pg_stat_get_subscription_stats 
 --------------------------------
@@ -1403,14 +1349,17 @@ SET LOCAL enable_nestloop TO on; SET LOCAL 
enable_mergejoin TO off;
 SET LOCAL enable_hashjoin TO off; SET LOCAL enable_material TO off;
 -- ensure plan stays as we expect it to
 EXPLAIN (COSTS OFF) SELECT COUNT(*) FROM test_io_shared t1 INNER JOIN 
test_io_shared t2 USING (a);
-                QUERY PLAN                 
--------------------------------------------
- Aggregate
-   ->  Nested Loop
-         Join Filter: (t1.a = t2.a)
-         ->  Seq Scan on test_io_shared t1
-         ->  Seq Scan on test_io_shared t2
-(5 rows)
+                      QUERY PLAN                       
+-------------------------------------------------------
+ Finalize Aggregate
+   ->  Gather Motion 3:1  (slice1; segments: 3)
+         ->  Partial Aggregate
+               ->  Nested Loop
+                     Join Filter: (t1.a = t2.a)
+                     ->  Seq Scan on test_io_shared t1
+                     ->  Seq Scan on test_io_shared t2
+ Optimizer: Postgres query optimizer
+(8 rows)
 
 SELECT COUNT(*) FROM test_io_shared t1 INNER JOIN test_io_shared t2 USING (a);
  count 
@@ -1653,11 +1602,13 @@ INSERT INTO brin_hot_2 VALUES (1, 100);
 CREATE INDEX ON brin_hot_2 USING brin (b) WHERE a = 2;
 UPDATE brin_hot_2 SET a = 2;
 EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_2 WHERE a = 2 AND b = 100;
-            QUERY PLAN             
------------------------------------
- Seq Scan on brin_hot_2
-   Filter: ((a = 2) AND (b = 100))
-(2 rows)
+                QUERY PLAN                
+------------------------------------------
+ Gather Motion 1:1  (slice1; segments: 1)
+   ->  Seq Scan on brin_hot_2
+         Filter: ((a = 2) AND (b = 100))
+ Optimizer: Postgres query optimizer
+(4 rows)
 
 SELECT COUNT(*) FROM brin_hot_2 WHERE a = 2 AND b = 100;
  count 
@@ -1667,13 +1618,15 @@ SELECT COUNT(*) FROM brin_hot_2 WHERE a = 2 AND b = 100;
 
 SET enable_seqscan = off;
 EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_2 WHERE a = 2 AND b = 100;
-                 QUERY PLAN                  
----------------------------------------------
- Bitmap Heap Scan on brin_hot_2
-   Recheck Cond: ((b = 100) AND (a = 2))
-   ->  Bitmap Index Scan on brin_hot_2_b_idx
-         Index Cond: (b = 100)
-(4 rows)
+                    QUERY PLAN                     
+---------------------------------------------------
+ Gather Motion 3:1  (slice1; segments: 3)
+   ->  Bitmap Heap Scan on brin_hot_2
+         Recheck Cond: ((b = 100) AND (a = 2))
+         ->  Bitmap Index Scan on brin_hot_2_b_idx
+               Index Cond: (b = 100)
+ Optimizer: Postgres query optimizer
+(6 rows)
 
 SELECT COUNT(*) FROM brin_hot_2 WHERE a = 2 AND b = 100;
  count 
@@ -1690,13 +1643,15 @@ INSERT INTO brin_hot_3 SELECT 1, repeat(' ', 500) FROM 
generate_series(1, 20);
 CREATE INDEX ON brin_hot_3 USING brin (a) WITH (pages_per_range = 1);
 UPDATE brin_hot_3 SET a = 2;
 EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_3 WHERE a = 2;
-                 QUERY PLAN                  
----------------------------------------------
- Bitmap Heap Scan on brin_hot_3
-   Recheck Cond: (a = 2)
-   ->  Bitmap Index Scan on brin_hot_3_a_idx
-         Index Cond: (a = 2)
-(4 rows)
+                    QUERY PLAN                     
+---------------------------------------------------
+ Gather Motion 1:1  (slice1; segments: 1)
+   ->  Bitmap Heap Scan on brin_hot_3
+         Recheck Cond: (a = 2)
+         ->  Bitmap Index Scan on brin_hot_3_a_idx
+               Index Cond: (a = 2)
+ Optimizer: Postgres query optimizer
+(6 rows)
 
 SELECT COUNT(*) FROM brin_hot_3 WHERE a = 2;
  count 
@@ -1706,5 +1661,4 @@ SELECT COUNT(*) FROM brin_hot_3 WHERE a = 2;
 
 DROP TABLE brin_hot_3;
 SET enable_seqscan = on;
->>>>>>> REL_16_9
 -- End of Stats Test
diff --git a/src/test/regress/expected/tid.out 
b/src/test/regress/expected/tid.out
index 56633984436..4dd11297d73 100644
--- a/src/test/regress/expected/tid.out
+++ b/src/test/regress/expected/tid.out
@@ -79,12 +79,7 @@ DROP SEQUENCE tid_seq;
 -- Index, fails with incorrect relation type
 CREATE INDEX tid_ind ON tid_tab(a);
 SELECT currtid2('tid_ind'::text, '(0,1)'::tid); -- fails
-<<<<<<< HEAD
 ERROR:  function currtid2 is not supported by GPDB
-=======
-ERROR:  cannot open relation "tid_ind"
-DETAIL:  This operation is not supported for indexes.
->>>>>>> REL_16_9
 DROP INDEX tid_ind;
 -- Partitioned table, no storage
 CREATE TABLE tid_part (a int) PARTITION BY RANGE (a);
diff --git a/src/test/regress/expected/tsearch.out 
b/src/test/regress/expected/tsearch.out
index 944020de239..aa44021e4e4 100644
--- a/src/test/regress/expected/tsearch.out
+++ b/src/test/regress/expected/tsearch.out
@@ -1107,6 +1107,7 @@ SELECT * FROM ts_stat('SELECT a FROM test_tsvector', 
'AB') ORDER BY ndoc DESC, n
  DFG  |    1 |      2
 (1 row)
 
+DROP INDEX wowidx;
 --dictionaries and to_tsvector
 SELECT ts_lexize('english_stem', 'skies');
  ts_lexize 
diff --git a/src/test/regress/expected/vacuum_parallel.out 
b/src/test/regress/expected/vacuum_parallel.out
index c6ac6af69b4..2f5c4117cc3 100644
--- a/src/test/regress/expected/vacuum_parallel.out
+++ b/src/test/regress/expected/vacuum_parallel.out
@@ -24,11 +24,7 @@ WHERE oid = 'vacuum_in_leader_small_index'::regclass AND
 ) as leader_will_handle_small_index;
  leader_will_handle_small_index 
 --------------------------------
-<<<<<<< HEAD
  f
-=======
- t
->>>>>>> REL_16_9
 (1 row)
 
 SELECT count(*) as trigger_parallel_vacuum_nindexes
@@ -44,10 +40,7 @@ WHERE oid in ('regular_sized_index'::regclass, 
'typically_sized_index'::regclass
 -- Parallel VACUUM with B-Tree page deletions, ambulkdelete calls:
 DELETE FROM parallel_vacuum_table;
 VACUUM (PARALLEL 4, INDEX_CLEANUP ON) parallel_vacuum_table;
-<<<<<<< HEAD
 WARNING:  disabling parallel option of vacuum on "parallel_vacuum_table" --- 
cannot vacuum tables in parallel
-=======
->>>>>>> REL_16_9
 -- Since vacuum_in_leader_small_index uses deduplication, we expect an
 -- assertion failure with bug #17245 (in the absence of bugfix):
 INSERT INTO parallel_vacuum_table SELECT i FROM generate_series(1, 10000) i;
diff --git a/src/test/regress/parallel_schedule 
b/src/test/regress/parallel_schedule
index 39f6488fbef..fca6c86c05e 100644
--- a/src/test/regress/parallel_schedule
+++ b/src/test/regress/parallel_schedule
@@ -45,7 +45,6 @@ test: copy copyselect copydml insert insert_conflict
 test: create_function_c create_misc create_operator create_procedure 
create_table create_type create_schema
 test: create_index create_index_spgist create_view index_including 
index_including_gist
 # Depends on things setup for create_index
-test: gp_gin_index
 
 # ----------
 # Another group of parallel tests
@@ -156,6 +155,8 @@ test: select_views portals_p2 cluster dependency guc 
bitmapops combocid tsearch
 # ----------
 test: json jsonb json_encoding jsonpath jsonpath_encoding jsonb_jsonpath 
sqljson
 
+test: gp_gin_index
+
 # ----------
 # Another group of parallel tests
 # with depends on create_misc
diff --git a/src/test/regress/sql/aggregates.sql 
b/src/test/regress/sql/aggregates.sql
index 0b094a42caa..1c17f282b06 100644
--- a/src/test/regress/sql/aggregates.sql
+++ b/src/test/regress/sql/aggregates.sql
@@ -993,7 +993,6 @@ group by y;
 -- Ensure results are correct.
 -- start_ignore
 select * from v_pagg_test order by y;
--- end_ignore
 
 -- Ensure parallel aggregation is actually being used.
 explain (costs off) select * from v_pagg_test order by y;
@@ -1001,6 +1000,7 @@ explain (costs off) select * from v_pagg_test order by y;
 -- Ensure results are the same without parallel aggregation.
 set max_parallel_workers_per_gather = 0;
 select * from v_pagg_test order by y;
+-- end_ignore
 
 -- Check that we don't fail on anonymous record types.
 set max_parallel_workers_per_gather = 2;
diff --git a/src/test/regress/sql/create_function_sql.sql 
b/src/test/regress/sql/create_function_sql.sql
index a93180d5f32..89e9af3a499 100644
--- a/src/test/regress/sql/create_function_sql.sql
+++ b/src/test/regress/sql/create_function_sql.sql
@@ -180,11 +180,7 @@ CREATE FUNCTION functest_S_13() RETURNS boolean
         SELECT false;
     END;
 
-<<<<<<< HEAD:src/test/regress/sql/create_function_3.sql
--- check display of function argments in sub-SELECT
-=======
 -- check display of function arguments in sub-SELECT
->>>>>>> REL_16_9:src/test/regress/sql/create_function_sql.sql
 CREATE TABLE functest1 (i int);
 CREATE FUNCTION functest_S_16(a int, b int) RETURNS void
     LANGUAGE SQL
diff --git a/src/test/regress/sql/memoize.sql b/src/test/regress/sql/memoize.sql
index 18c92048188..985cc7fd095 100644
--- a/src/test/regress/sql/memoize.sql
+++ b/src/test/regress/sql/memoize.sql
@@ -1,12 +1,9 @@
 -- Perform tests on the Memoize node.
 
-<<<<<<< HEAD
 -- GPDB_14_MERGE_FIXME:
 -- 1.test memoize in CBDB as enable_nestloop is false by default
 -- 2.enable memoize in orca
 
-=======
->>>>>>> REL_16_9
 -- The cache hits/misses/evictions from the Memoize node can vary between
 -- machines.  Let's just replace the number with an 'N'.  In order to allow us
 -- to perform validation when the measure was zero, we replace a zero value
@@ -30,10 +27,7 @@ begin
         ln := regexp_replace(ln, 'Evictions: 0', 'Evictions: Zero');
         ln := regexp_replace(ln, 'Evictions: \d+', 'Evictions: N');
         ln := regexp_replace(ln, 'Memory Usage: \d+', 'Memory Usage: N');
-<<<<<<< HEAD
         ln := regexp_replace(ln, 'Memory: \d+', 'Memory: N');
-=======
->>>>>>> REL_16_9
        ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N');
        ln := regexp_replace(ln, 'loops=\d+', 'loops=N');
         return next ln;
@@ -42,11 +36,8 @@ end;
 $$;
 
 -- Ensure we get a memoize node on the inner side of the nested loop
-<<<<<<< HEAD
 SET optimizer_enable_hashjoin TO off;
 SET optimizer_enable_bitmapscan TO off;
-=======
->>>>>>> REL_16_9
 SET enable_hashjoin TO off;
 SET enable_bitmapscan TO off;
 
@@ -63,24 +54,12 @@ WHERE t2.unique1 < 1000;
 -- Try with LATERAL joins
 SELECT explain_memoize('
 SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
-<<<<<<< HEAD
-LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
-=======
 LATERAL (SELECT t2.unique1 FROM tenk1 t2
          WHERE t1.twenty = t2.unique1 OFFSET 0) t2
->>>>>>> REL_16_9
 WHERE t1.unique1 < 1000;', false);
 
 -- And check we get the expected results.
 SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
-<<<<<<< HEAD
-LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
-WHERE t1.unique1 < 1000;
-
--- Reduce work_mem so that we see some cache evictions
-SET work_mem TO '64kB';
-SET enable_mergejoin TO off;
-=======
 LATERAL (SELECT t2.unique1 FROM tenk1 t2
          WHERE t1.twenty = t2.unique1 OFFSET 0) t2
 WHERE t1.unique1 < 1000;
@@ -110,7 +89,6 @@ DROP TABLE expr_key;
 -- Reduce work_mem and hash_mem_multiplier so that we see some cache evictions
 SET work_mem TO '64kB';
 SET hash_mem_multiplier TO 1.0;
->>>>>>> REL_16_9
 -- Ensure we get some evictions.  We're unable to validate the hits and misses
 -- here as the number of entries that fit in the cache at once will vary
 -- between different machines.
@@ -141,11 +119,7 @@ DROP TABLE flt;
 CREATE TABLE strtest (n name, t text);
 CREATE INDEX strtest_n_idx ON strtest (n);
 CREATE INDEX strtest_t_idx ON strtest (t);
-<<<<<<< HEAD
-INSERT INTO strtest 
VALUES('one','one'),('two','two'),('three',repeat(md5('three'),100));
-=======
 INSERT INTO strtest 
VALUES('one','one'),('two','two'),('three',repeat(fipshash('three'),100));
->>>>>>> REL_16_9
 -- duplicate rows so we get some cache hits
 INSERT INTO strtest SELECT * FROM strtest;
 ANALYZE strtest;
@@ -160,8 +134,6 @@ SELECT * FROM strtest s1 INNER JOIN strtest s2 ON s1.t >= 
s2.t;', false);
 
 DROP TABLE strtest;
 
-<<<<<<< HEAD
-=======
 -- Ensure memoize works with partitionwise join
 SET enable_partitionwise_join TO on;
 
@@ -189,7 +161,6 @@ DROP TABLE prt;
 
 RESET enable_partitionwise_join;
 
->>>>>>> REL_16_9
 -- Exercise Memoize code that flushes the cache when a parameter changes which
 -- is not part of the cache key.
 
@@ -213,16 +184,11 @@ WHERE unique1 < 3
 RESET enable_seqscan;
 RESET enable_mergejoin;
 RESET work_mem;
-<<<<<<< HEAD
 RESET enable_bitmapscan;
 RESET enable_hashjoin;
 RESET optimizer_enable_hashjoin;
 RESET optimizer_enable_bitmapscan;
-=======
 RESET hash_mem_multiplier;
-RESET enable_bitmapscan;
-RESET enable_hashjoin;
->>>>>>> REL_16_9
 
 -- Test parallel plans with Memoize
 SET min_parallel_table_scan_size TO 0;
diff --git a/src/test/regress/sql/stats.sql b/src/test/regress/sql/stats.sql
index 65fc979858a..c4afe1d10f3 100644
--- a/src/test/regress/sql/stats.sql
+++ b/src/test/regress/sql/stats.sql
@@ -30,69 +30,7 @@ SELECT t.seq_scan, t.seq_tup_read, t.idx_scan, 
t.idx_tup_fetch,
   FROM pg_catalog.pg_stat_user_tables AS t,
        pg_catalog.pg_statio_user_tables AS b
  WHERE t.relname='tenk2' AND b.relname='tenk2';
-<<<<<<< HEAD
-
--- function to wait for counters to advance
-create function wait_for_stats() returns void as $$
-declare
-  start_time timestamptz := clock_timestamp();
-  updated1 bool;
-  updated2 bool;
-  updated3 bool;
-  updated4 bool;
-  updated5 bool;
-begin
-  -- we don't want to wait forever; loop will exit after 30 seconds
-  for i in 1 .. 300 loop
-
-    -- With parallel query, the seqscan and indexscan on tenk2 might be done
-    -- in parallel worker processes, which will send their stats counters
-    -- asynchronously to what our own session does.  So we must check for
-    -- those counts to be registered separately from the update counts.
-
-    -- check to see if seqscan has been sensed
-    SELECT (st.seq_scan >= pr.seq_scan + 1) INTO updated1
-      FROM pg_stat_user_tables AS st, pg_class AS cl, prevstats AS pr
-     WHERE st.relname='tenk2' AND cl.relname='tenk2';
-
-    -- check to see if indexscan has been sensed
-    SELECT (st.idx_scan >= pr.idx_scan + 1) INTO updated2
-      FROM pg_stat_user_tables AS st, pg_class AS cl, prevstats AS pr
-     WHERE st.relname='tenk2' AND cl.relname='tenk2';
-
-    -- check to see if all updates have been sensed
-    SELECT (n_tup_ins > 0) INTO updated3
-      FROM pg_stat_user_tables WHERE relname='trunc_stats_test4';
-
-    -- We must also check explicitly that pg_stat_get_snapshot_timestamp has
-    -- advanced, because that comes from the global stats file which might
-    -- be older than the per-DB stats file we got the other values from.
-    SELECT (pr.snap_ts < pg_stat_get_snapshot_timestamp()) INTO updated4
-      FROM prevstats AS pr;
-
-    -- check to see if idx_tup_fetch has been sensed
-    SELECT (st.idx_tup_fetch >= pr.idx_tup_fetch + 1) INTO updated5
-      FROM pg_stat_user_tables AS st, pg_class AS cl, prevstats AS pr
-     WHERE st.relname='tenk2' AND cl.relname='tenk2';
-
-    exit when updated1 and updated2 and updated3 and updated4 and updated5;
-
-    -- wait a little
-    perform pg_sleep_for('100 milliseconds');
-
-    -- reset stats snapshot so we can test again
-    perform pg_stat_clear_snapshot();
-
-  end loop;
-
-  -- report time waited in postmaster log (where it won't change test output)
-  raise log 'wait_for_stats delayed % seconds',
-    extract(epoch from clock_timestamp() - start_time);
-end
-$$ language plpgsql;
-=======
 COMMIT;
->>>>>>> REL_16_9
 
 -- test effects of TRUNCATE on n_live_tup/n_dead_tup counters
 CREATE TABLE trunc_stats_test(id serial);
@@ -359,12 +297,6 @@ DROP TABLE trunc_stats_test, trunc_stats_test1, 
trunc_stats_test2, trunc_stats_t
 DROP TABLE prevstats;
 
 
-<<<<<<< HEAD
--- ensure that stats accessors handle NULL input correctly
-SELECT pg_stat_get_replication_slot(NULL);
-
-
-=======
 -----
 -- Test that last_seq_scan, last_idx_scan are correctly maintained
 --
@@ -915,5 +847,4 @@ DROP TABLE brin_hot_3;
 
 SET enable_seqscan = on;
 
->>>>>>> REL_16_9
 -- End of Stats Test
diff --git a/src/test/regress/sql/tsearch.sql b/src/test/regress/sql/tsearch.sql
index fa7efac2ad7..226a78c996f 100644
--- a/src/test/regress/sql/tsearch.sql
+++ b/src/test/regress/sql/tsearch.sql
@@ -270,6 +270,8 @@ INSERT INTO test_tsvector VALUES ('???', 'DFG:1A,2B,6C,10 
FGH');
 SELECT * FROM ts_stat('SELECT a FROM test_tsvector') ORDER BY ndoc DESC, 
nentry DESC, word LIMIT 10;
 SELECT * FROM ts_stat('SELECT a FROM test_tsvector', 'AB') ORDER BY ndoc DESC, 
nentry DESC, word;
 
+DROP INDEX wowidx;
+
 --dictionaries and to_tsvector
 
 SELECT ts_lexize('english_stem', 'skies');


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to