This is an automated email from the ASF dual-hosted git repository.

chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git


The following commit(s) were added to refs/heads/cbdb-postgres-merge by this 
push:
     new 7f9b492a61a Fix linking errors
7f9b492a61a is described below

commit 7f9b492a61acc7cc08de0236ff1fdce3829f25f0
Author: Jinbao Chen <[email protected]>
AuthorDate: Wed Oct 8 16:44:51 2025 +0800

    Fix linking errors
---
 gpcontrib/gp_replica_check/gp_replica_check.c |   2 +-
 src/backend/access/transam/xact.c             |   7 -
 src/backend/access/transam/xlog.c             |   5 -
 src/backend/commands/explain.c                |   2 +-
 src/backend/commands/storagecmds.c            |   4 +-
 src/backend/commands/vacuum.c                 | 200 ++++++++++++++++++++++++++
 src/backend/commands/vacuum_ao.c              |   1 +
 src/backend/executor/nodePartitionSelector.c  |   6 +-
 src/backend/optimizer/plan/planner.c          |   2 +
 src/backend/parser/analyze.c                  |   6 +-
 src/backend/tcop/postgres.c                   |   3 +
 src/backend/utils/adt/dbsize.c                |   4 +-
 src/backend/utils/cache/relcache.c            |   2 +-
 src/backend/utils/misc/Makefile               |   1 +
 src/backend/utils/sort/tuplestore.c           |   8 +-
 src/include/catalog/catalog.h                 |   2 -
 src/include/catalog/pg_proc.dat               |   3 -
 src/include/commands/vacuum.h                 |  19 ++-
 src/include/nodes/queryjumble.h               |   2 +-
 src/include/storage/buffile.h                 |   5 -
 src/include/storage/bufmgr.h                  |   1 -
 src/include/{nodes => utils}/queryjumble.h    |  14 +-
 src/include/utils/relmapper.h                 |   4 -
 23 files changed, 243 insertions(+), 60 deletions(-)

diff --git a/gpcontrib/gp_replica_check/gp_replica_check.c 
b/gpcontrib/gp_replica_check/gp_replica_check.c
index f34d9a7f906..68eaa3cefd2 100644
--- a/gpcontrib/gp_replica_check/gp_replica_check.c
+++ b/gpcontrib/gp_replica_check/gp_replica_check.c
@@ -543,7 +543,7 @@ get_relfilenode_map()
                Oid                     rnode;
                /* Its relmapped relation, need to fetch the mapping from 
relmap file */
                if (classtuple->relfilenode == InvalidOid)
-                       rnode = RelationMapOidToFilenode(classtuple->oid,
+                       rnode = RelationMapOidToFilenumber(classtuple->oid,
                                                                                
         classtuple->relisshared);
                else
                        rnode = classtuple->relfilenode;
diff --git a/src/backend/access/transam/xact.c 
b/src/backend/access/transam/xact.c
index 2086edea458..cb12173b1ce 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -1675,13 +1675,6 @@ RecordTransactionCommit(void)
                replorigin = (replorigin_session_origin != InvalidRepOriginId &&
                                          replorigin_session_origin != 
DoNotReplicateId);
 
-               /*
-                * Begin commit critical section and insert the commit XLOG 
record.
-                */
-               /* Tell bufmgr and smgr to prepare for commit */
-               if (markXidCommitted)
-                       BufmgrCommit();
-
                if (isDtxPrepared)
                        
SIMPLE_FAULT_INJECTOR("before_xlog_xact_distributed_commit");
 
diff --git a/src/backend/access/transam/xlog.c 
b/src/backend/access/transam/xlog.c
index 9cc43869e82..4721cf2f2d8 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -5199,11 +5199,6 @@ UpdateCatalogForStandbyPromotion(void)
        if (MyBackendId > MaxBackends || MyBackendId <= 0)
                        elog(FATAL, "bad backend id: %d", MyBackendId);
 
-       /*
-        * bufmgr needs another initialization call too
-        */
-       InitBufferPoolBackend();
-
        /* Start transaction locally */
        old_role = Gp_role;
        Gp_role = GP_ROLE_UTILITY;
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 33259c839af..be21ca00012 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -306,7 +306,7 @@ ExplainQuery(ParseState *pstate, ExplainStmt *stmt,
 
        query = castNode(Query, stmt->query);
        if (IsQueryIdEnabled())
-               jstate = JumbleQuery(query, pstate->p_sourcetext);
+               jstate = JumbleQuery(query);
 
        if (post_parse_analyze_hook)
                (*post_parse_analyze_hook) (pstate, query, jstate);
diff --git a/src/backend/commands/storagecmds.c 
b/src/backend/commands/storagecmds.c
index b3881d0f3a4..344379effdd 100644
--- a/src/backend/commands/storagecmds.c
+++ b/src/backend/commands/storagecmds.c
@@ -324,7 +324,7 @@ storage_user_mapping_ddl_aclcheck(Oid umuserid, Oid 
serverid, const char *server
                {
                        AclResult       aclresult;
 
-                       aclresult = gp_storage_server_aclcheck(serverid, 
curuserid, ACL_USAGE);
+                       aclresult = object_aclcheck(StorageServerRelationId, 
serverid, curuserid, ACL_USAGE);
                        if (aclresult != ACLCHECK_OK)
                                aclcheck_error(aclresult, 
OBJECT_STORAGE_SERVER, servername);
                }
@@ -569,7 +569,7 @@ RemoveStorageServer(DropStorageServerStmt *stmt)
                {
                        AclResult       aclresult;
 
-                       aclresult = gp_storage_server_aclcheck(serverId, 
curuserid, ACL_USAGE);
+                       aclresult = object_aclcheck(StorageServerRelationId, 
serverId, curuserid, ACL_USAGE);
                        if (aclresult != ACLCHECK_OK)
                                aclcheck_error(aclresult, 
OBJECT_STORAGE_SERVER, stmt->servername);
                }
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 59fb8c4e3b8..42d8aff5182 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -3881,3 +3881,203 @@ vac_cmp_itemptr(const void *left, const void *right)
 
        return 0;
 }
+
+
+void
+vacuum_set_xid_limits(Relation rel,
+                                         int freeze_min_age,
+                                         int freeze_table_age,
+                                         int multixact_freeze_min_age,
+                                         int multixact_freeze_table_age,
+                                         TransactionId *oldestXmin,
+                                         TransactionId *freezeLimit,
+                                         TransactionId *xidFullScanLimit,
+                                         MultiXactId *multiXactCutoff,
+                                         MultiXactId *mxactFullScanLimit)
+{
+       int                     freezemin;
+       int                     mxid_freezemin;
+       int                     effective_multixact_freeze_max_age;
+       TransactionId limit;
+       TransactionId safeLimit;
+       MultiXactId oldestMxact;
+       MultiXactId mxactLimit;
+       MultiXactId safeMxactLimit;
+
+       /*
+        * We can always ignore processes running lazy vacuum.  This is because 
we
+        * use these values only for deciding which tuples we must keep in the
+        * tables.  Since lazy vacuum doesn't write its XID anywhere (usually no
+        * XID assigned), it's safe to ignore it.  In theory it could be
+        * problematic to ignore lazy vacuums in a full vacuum, but keep in mind
+        * that only one vacuum process can be working on a particular table at
+        * any time, and that each vacuum is always an independent transaction.
+        */
+       *oldestXmin = GetOldestNonRemovableTransactionId(rel);
+
+       if (OldSnapshotThresholdActive())
+       {
+               TransactionId limit_xmin;
+               TimestampTz limit_ts;
+
+               if (TransactionIdLimitedForOldSnapshots(*oldestXmin, rel,
+                                                                               
                &limit_xmin, &limit_ts))
+               {
+                       /*
+                        * TODO: We should only set the threshold if we are 
pruning on the
+                        * basis of the increased limits.  Not as crucial here 
as it is
+                        * for opportunistic pruning (which often happens at a 
much higher
+                        * frequency), but would still be a significant 
improvement.
+                        */
+                       SetOldSnapshotThresholdTimestamp(limit_ts, limit_xmin);
+                       *oldestXmin = limit_xmin;
+               }
+       }
+
+       Assert(TransactionIdIsNormal(*oldestXmin));
+
+       /*
+        * Determine the minimum freeze age to use: as specified by the caller, 
or
+        * vacuum_freeze_min_age, but in any case not more than half
+        * autovacuum_freeze_max_age, so that autovacuums to prevent XID
+        * wraparound won't occur too frequently.
+        */
+       freezemin = freeze_min_age;
+       if (freezemin < 0)
+               freezemin = vacuum_freeze_min_age;
+       freezemin = Min(freezemin, autovacuum_freeze_max_age / 2);
+       Assert(freezemin >= 0);
+
+       /*
+        * Compute the cutoff XID, being careful not to generate a "permanent" 
XID
+        */
+       limit = *oldestXmin - freezemin;
+       if (!TransactionIdIsNormal(limit))
+               limit = FirstNormalTransactionId;
+
+       /*
+        * If oldestXmin is very far back (in practice, more than
+        * autovacuum_freeze_max_age / 2 XIDs old), complain and force a minimum
+        * freeze age of zero.
+        */
+       safeLimit = ReadNextTransactionId() - autovacuum_freeze_max_age;
+       if (!TransactionIdIsNormal(safeLimit))
+               safeLimit = FirstNormalTransactionId;
+
+       if (TransactionIdPrecedes(limit, safeLimit))
+       {
+               ereport(WARNING,
+                               (errmsg("oldest xmin is far in the past"),
+                                               errhint("Close open 
transactions soon to avoid wraparound problems.\n"
+                                                               "You might also 
need to commit or roll back old prepared transactions, or drop stale 
replication slots.")));
+               limit = *oldestXmin;
+       }
+
+       *freezeLimit = limit;
+
+       /*
+        * Compute the multixact age for which freezing is urgent.  This is
+        * normally autovacuum_multixact_freeze_max_age, but may be less if we 
are
+        * short of multixact member space.
+        */
+       effective_multixact_freeze_max_age = MultiXactMemberFreezeThreshold();
+
+       /*
+        * Determine the minimum multixact freeze age to use: as specified by
+        * caller, or vacuum_multixact_freeze_min_age, but in any case not more
+        * than half effective_multixact_freeze_max_age, so that autovacuums to
+        * prevent MultiXact wraparound won't occur too frequently.
+        */
+       mxid_freezemin = multixact_freeze_min_age;
+       if (mxid_freezemin < 0)
+               mxid_freezemin = vacuum_multixact_freeze_min_age;
+       mxid_freezemin = Min(mxid_freezemin,
+                                                
effective_multixact_freeze_max_age / 2);
+       Assert(mxid_freezemin >= 0);
+
+       /* compute the cutoff multi, being careful to generate a valid value */
+       oldestMxact = GetOldestMultiXactId();
+       mxactLimit = oldestMxact - mxid_freezemin;
+       if (mxactLimit < FirstMultiXactId)
+               mxactLimit = FirstMultiXactId;
+
+       safeMxactLimit =
+                       ReadNextMultiXactId() - 
effective_multixact_freeze_max_age;
+       if (safeMxactLimit < FirstMultiXactId)
+               safeMxactLimit = FirstMultiXactId;
+
+       if (MultiXactIdPrecedes(mxactLimit, safeMxactLimit))
+       {
+               ereport(WARNING,
+                               (errmsg("oldest multixact is far in the past"),
+                                               errhint("Close open 
transactions with multixacts soon to avoid wraparound problems.")));
+               /* Use the safe limit, unless an older mxact is still running */
+               if (MultiXactIdPrecedes(oldestMxact, safeMxactLimit))
+                       mxactLimit = oldestMxact;
+               else
+                       mxactLimit = safeMxactLimit;
+       }
+
+       *multiXactCutoff = mxactLimit;
+
+       if (xidFullScanLimit != NULL)
+       {
+               int                     freezetable;
+
+               Assert(mxactFullScanLimit != NULL);
+
+               /*
+                * Determine the table freeze age to use: as specified by the 
caller,
+                * or vacuum_freeze_table_age, but in any case not more than
+                * autovacuum_freeze_max_age * 0.95, so that if you have e.g 
nightly
+                * VACUUM schedule, the nightly VACUUM gets a chance to freeze 
tuples
+                * before anti-wraparound autovacuum is launched.
+                */
+               freezetable = freeze_table_age;
+               if (freezetable < 0)
+                       freezetable = vacuum_freeze_table_age;
+               freezetable = Min(freezetable, autovacuum_freeze_max_age * 
0.95);
+               Assert(freezetable >= 0);
+
+               /*
+                * Compute XID limit causing a full-table vacuum, being careful 
not to
+                * generate a "permanent" XID.
+                */
+               limit = ReadNextTransactionId() - freezetable;
+               if (!TransactionIdIsNormal(limit))
+                       limit = FirstNormalTransactionId;
+
+               *xidFullScanLimit = limit;
+
+               /*
+                * Similar to the above, determine the table freeze age to use 
for
+                * multixacts: as specified by the caller, or
+                * vacuum_multixact_freeze_table_age, but in any case not more 
than
+                * autovacuum_multixact_freeze_table_age * 0.95, so that if you 
have
+                * e.g. nightly VACUUM schedule, the nightly VACUUM gets a 
chance to
+                * freeze multixacts before anti-wraparound autovacuum is 
launched.
+                */
+               freezetable = multixact_freeze_table_age;
+               if (freezetable < 0)
+                       freezetable = vacuum_multixact_freeze_table_age;
+               freezetable = Min(freezetable,
+                                                 
effective_multixact_freeze_max_age * 0.95);
+               Assert(freezetable >= 0);
+
+               /*
+                * Compute MultiXact limit causing a full-table vacuum, being 
careful
+                * to generate a valid MultiXact value.
+                */
+               mxactLimit = ReadNextMultiXactId() - freezetable;
+               if (mxactLimit < FirstMultiXactId)
+                       mxactLimit = FirstMultiXactId;
+
+               *mxactFullScanLimit = mxactLimit;
+       }
+       else
+       {
+               Assert(mxactFullScanLimit == NULL);
+       }
+}
+
+
diff --git a/src/backend/commands/vacuum_ao.c b/src/backend/commands/vacuum_ao.c
index 4895cc50e33..9d7f8f779e3 100644
--- a/src/backend/commands/vacuum_ao.c
+++ b/src/backend/commands/vacuum_ao.c
@@ -288,6 +288,7 @@ ao_vacuum_rel_post_cleanup(Relation onerel, VacuumParams 
*params, BufferAccessSt
                                                                 &relhasindex,
                                                                 
&total_file_segs);
 
+       /* MERGE16_FIXME: How to set limits for ao */
        vacuum_set_xid_limits(onerel,
                                                  params->freeze_min_age,
                                                  params->freeze_table_age,
diff --git a/src/backend/executor/nodePartitionSelector.c 
b/src/backend/executor/nodePartitionSelector.c
index 76c84bca94d..7e4fceb5f9e 100644
--- a/src/backend/executor/nodePartitionSelector.c
+++ b/src/backend/executor/nodePartitionSelector.c
@@ -78,6 +78,7 @@ PartitionSelectorState *
 ExecInitPartitionSelector(PartitionSelector *node, EState *estate, int eflags)
 {
        PartitionSelectorState *psstate;
+       Bitmapset  *validsubplans;
 
        /* check for unsupported flags */
        Assert (!(eflags & (EXEC_FLAG_MARK | EXEC_FLAG_BACKWARD)));
@@ -101,8 +102,9 @@ ExecInitPartitionSelector(PartitionSelector *node, EState 
*estate, int eflags)
        outerPlanState(psstate) = ExecInitNode(outerPlan(node), estate, eflags);
 
        /* Create the working data structure for pruning. */
-       psstate->prune_state = ExecCreatePartitionPruneState(&psstate->ps,
-                                                                               
                                 node->part_prune_info);
+       /* MERGE16_FIXME: This use of ExecInitPartitionPruning may be incorrect 
*/
+       psstate->prune_state = ExecInitPartitionPruning(&psstate->ps, 
node->paramid,
+                                                                               
                                 node->part_prune_info, &validsubplans);
 
        return psstate;
 }
diff --git a/src/backend/optimizer/plan/planner.c 
b/src/backend/optimizer/plan/planner.c
index 4e767aaea03..356d994d5f8 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -8819,6 +8819,7 @@ create_partial_grouping_paths(PlannerInfo *root,
                                                                                
 NIL,
                                                                                
 agg_partial_costs,
                                                                                
 dNumPartialGroups), root);
+#if 0
                        else
                                add_path(partially_grouped_rel, (Path *)
                                                 create_group_path(root,
@@ -8827,6 +8828,7 @@ create_partial_grouping_paths(PlannerInfo *root,
                                                                                
   root->processed_groupClause,
                                                                                
   NIL,
                                                                                
   dNumPartialGroups), root);
+#endif
                }
        }
 
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index b75bf05c21b..bf4a6de1b15 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -173,7 +173,7 @@ parse_analyze_fixedparams(RawStmt *parseTree, const char 
*sourceText,
        query = transformTopLevelStmt(pstate, parseTree);
 
        if (IsQueryIdEnabled())
-               jstate = JumbleQuery(query, sourceText);
+               jstate = JumbleQuery(query);
 
        if (post_parse_analyze_hook)
                (*post_parse_analyze_hook) (pstate, query, jstate);
@@ -215,7 +215,7 @@ parse_analyze_varparams(RawStmt *parseTree, const char 
*sourceText,
        check_variable_parameters(pstate, query);
 
        if (IsQueryIdEnabled())
-               jstate = JumbleQuery(query, sourceText);
+               jstate = JumbleQuery(query);
 
        if (post_parse_analyze_hook)
                (*post_parse_analyze_hook) (pstate, query, jstate);
@@ -252,7 +252,7 @@ parse_analyze_withcb(RawStmt *parseTree, const char 
*sourceText,
        query = transformTopLevelStmt(pstate, parseTree);
 
        if (IsQueryIdEnabled())
-               jstate = JumbleQuery(query, sourceText);
+               jstate = JumbleQuery(query);
 
        if (post_parse_analyze_hook)
                (*post_parse_analyze_hook) (pstate, query, jstate);
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 2efcee1edb3..394a6a5420e 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -131,6 +131,9 @@ int                 PostAuthDelay = 0;
 /* Time between checks that the client is still connected. */
 int                    client_connection_check_interval = 0;
 
+/* flags for non-system relation kinds to restrict use */
+int                    restrict_nonsystem_relation_kind;
+
 /*
  * Hook for extensions, to get notified when query cancel or DIE signal is
  * received. This allows the extension to stop whatever it's doing as
diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c
index 851a61aaed7..924e42e36f7 100644
--- a/src/backend/utils/adt/dbsize.c
+++ b/src/backend/utils/adt/dbsize.c
@@ -1219,7 +1219,7 @@ pg_relation_filenode(PG_FUNCTION_ARGS)
                        if (relform->relfilenode)
                                result = relform->relfilenode;
                        else                            /* Consult the relation 
mapper */
-                               result = RelationMapOidToFilenode(relid,
+                               result = RelationMapOidToFilenumber(relid,
                                                                                
                  relform->relisshared);
                        break;
 
@@ -1314,7 +1314,7 @@ pg_relation_filepath(PG_FUNCTION_ARGS)
                        if (relform->relfilenode)
                                rlocator.relNumber = relform->relfilenode;
                        else                            /* Consult the relation 
mapper */
-                               rlocator.relNumber = 
RelationMapOidToFilenode(relid,
+                               rlocator.relNumber = 
RelationMapOidToFilenumber(relid,
                                                                                
                                 relform->relisshared);
                        break;
 
diff --git a/src/backend/utils/cache/relcache.c 
b/src/backend/utils/cache/relcache.c
index 3a0d585b7df..313e6c1dc2e 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -3822,7 +3822,7 @@ RelationBuildLocalRelation(const char *relname,
                        relfilenumber = relid;
                else
                {
-                       relfilenumber = GetNewRelFileNode(reltablespace, NULL, 
relpersistence);
+                       relfilenumber = GetNewRelFileNumber(reltablespace, 
NULL, relpersistence);
                        if (Gp_role == GP_ROLE_EXECUTE || IsBinaryUpgrade)
                                AdvanceObjectId(relid);
                }
diff --git a/src/backend/utils/misc/Makefile b/src/backend/utils/misc/Makefile
index 38f4204761e..38a72665aec 100644
--- a/src/backend/utils/misc/Makefile
+++ b/src/backend/utils/misc/Makefile
@@ -29,6 +29,7 @@ OBJS = \
        pg_rusage.o \
        ps_status.o \
        queryenvironment.o \
+       queryjumble.o \
        rls.o \
        sampling.o \
        superuser.o \
diff --git a/src/backend/utils/sort/tuplestore.c 
b/src/backend/utils/sort/tuplestore.c
index 48d76388d06..bb91a720e33 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -1716,7 +1716,7 @@ tuplestore_make_shared(Tuplestorestate *state, 
SharedFileSet *fileset, const cha
        oldowner = CurrentResourceOwner;
        CurrentResourceOwner = state->resowner;
 
-       state->myfile = BufFileCreateShared(fileset, filename, state->work_set);
+       state->myfile = BufFileCreateFileSet(&fileset.fs, filename, 
state->work_set);
        CurrentResourceOwner = oldowner;
 
        /*
@@ -1747,7 +1747,7 @@ tuplestore_freeze(Tuplestorestate *state)
        Assert(state->share_status == TSHARE_WRITER);
        Assert(!state->frozen);
        dumptuples(state);
-       BufFileExportShared(state->myfile);
+       BufFileExportFileSet(state->myfile);
        state->frozen = true;
 }
 
@@ -1775,7 +1775,7 @@ tuplestore_open_shared(SharedFileSet *fileset, const char 
*filename)
        state->writetup = writetup_forbidden;
        state->readtup = readtup_heap;
 
-       state->myfile = BufFileOpenShared(fileset, filename, O_RDONLY);
+       state->myfile = BufFileOpenFileSet(&fileset->fs, filename, O_RDONLY, 
false);
        state->readptrs[0].file = 0;
        state->readptrs[0].offset = 0L;
        state->status = TSS_READFILE;
@@ -1885,7 +1885,7 @@ tuplestore_make_sharedV2(Tuplestorestate *state, 
SharedFileSet *fileset,
        oldowner = CurrentResourceOwner;
        CurrentResourceOwner = owner;
 
-       state->myfile = BufFileCreateShared(fileset, filename, state->work_set);
+       state->myfile = BufFileCreateFileSet(&fileset->fs, filename, 
state->work_set);
        CurrentResourceOwner = oldowner;
 
        /*
diff --git a/src/include/catalog/catalog.h b/src/include/catalog/catalog.h
index 4824bf80456..903c1dcfe70 100644
--- a/src/include/catalog/catalog.h
+++ b/src/include/catalog/catalog.h
@@ -53,8 +53,6 @@ extern bool IsSharedRelation(Oid relationId);
 
 extern Oid GetNewOidWithIndex(Relation relation, Oid indexId,
                                                          AttrNumber oidcolumn);
-extern Oid GetNewRelFileNode(Oid reltablespace, Relation pg_class,
-                                                        char relpersistence);
 
 extern void reldir_and_filename(RelFileLocator rnode, BackendId backend, 
ForkNumber forknum,
                                        char **dir, char **filename);
diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat
index 835c4848fc2..58518eb5a3d 100644
--- a/src/include/catalog/pg_proc.dat
+++ b/src/include/catalog/pg_proc.dat
@@ -12200,9 +12200,6 @@
 { oid => 6039, descr => 'Statistics: Cloudberry session id of backend',
    proname => 'pg_stat_get_backend_session_id', provolatile => 's', 
proparallel => 'r', prorettype => 'int4', proargtypes => 'int4', prosrc => 
'pg_stat_get_backend_session_id' },
 
-{ oid => 6042, descr => 'change priority of all the backends for a given 
session id',
-   proname => 'pg_renice_session', provolatile => 'v', proparallel => 'r', 
prorettype => 'int4', proargtypes => 'int4 int4', prosrc => 'pg_renice_session' 
},
-
 { oid => 7098, descr => 'get replication error',
    proname => 'gp_replication_error', provolatile => 'v', proparallel => 'r', 
prorettype => 'text', proargtypes => '', prosrc => 'gp_replication_error' },
 
diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h
index 38bf39ba069..9d989890d21 100644
--- a/src/include/commands/vacuum.h
+++ b/src/include/commands/vacuum.h
@@ -464,15 +464,6 @@ extern void vac_update_relstats(Relation relation,
                                                                bool 
*minmulti_updated,
                                                                bool 
in_outer_xact,
                                                                bool isvacuum);
-extern void vacuum_set_xid_limits(Relation rel,
-                                                                 int 
freeze_min_age, int freeze_table_age,
-                                                                 int 
multixact_freeze_min_age,
-                                                                 int 
multixact_freeze_table_age,
-                                                                 TransactionId 
*oldestXmin,
-                                                                 TransactionId 
*freezeLimit,
-                                                                 TransactionId 
*xidFullScanLimit,
-                                                                 MultiXactId 
*multiXactCutoff,
-                                                                 MultiXactId 
*mxactFullScanLimit);
 extern bool vacuum_get_cutoffs(Relation rel, const VacuumParams *params,
                                                           struct VacuumCutoffs 
*cutoffs);
 extern bool vacuum_xid_failsafe_check(const struct VacuumCutoffs *cutoffs);
@@ -493,7 +484,15 @@ extern Size vac_max_items_to_alloc_size(int max_items);
 /* In postmaster/autovacuum.c */
 extern void AutoVacuumUpdateCostLimit(void);
 extern void VacuumUpdateCosts(void);
-
+extern void vacuum_set_xid_limits(Relation rel,
+                                                                 int 
freeze_min_age, int freeze_table_age,
+                                                                 int 
multixact_freeze_min_age,
+                                                                 int 
multixact_freeze_table_age,
+                                                                 TransactionId 
*oldestXmin,
+                                                                 TransactionId 
*freezeLimit,
+                                                                 TransactionId 
*xidFullScanLimit,
+                                                                 MultiXactId 
*multiXactCutoff,
+                                                                 MultiXactId 
*mxactFullScanLimit);
 /* in commands/vacuumparallel.c */
 extern ParallelVacuumState *parallel_vacuum_init(Relation rel, Relation 
*indrels,
                                                                                
                 int nindexes, int nrequested_workers,
diff --git a/src/include/nodes/queryjumble.h b/src/include/nodes/queryjumble.h
index f49f47a6a7d..a63eb809ada 100644
--- a/src/include/nodes/queryjumble.h
+++ b/src/include/nodes/queryjumble.h
@@ -64,7 +64,7 @@ extern PGDLLIMPORT int compute_query_id;
 
 
 extern const char *CleanQuerytext(const char *query, int *location, int *len);
-extern JumbleState *JumbleQuery(Query *query, const char *querytext);
+extern JumbleState *JumbleQuery(Query *query);
 extern JumbleState *JumbleQueryDirect(Query *query, const char *querytext);
 extern void EnableQueryId(void);
 
diff --git a/src/include/storage/buffile.h b/src/include/storage/buffile.h
index 4c49e50814c..870c8451953 100644
--- a/src/include/storage/buffile.h
+++ b/src/include/storage/buffile.h
@@ -55,11 +55,6 @@ extern int   BufFileSeekBlock(BufFile *file, int64 blknum);
 extern int64 BufFileSize(BufFile *file);
 extern long BufFileAppend(BufFile *target, BufFile *source);
 
-extern BufFile *BufFileCreateShared(SharedFileSet *fileset, const char *name, 
struct workfile_set *work_set);
-extern void BufFileExportShared(BufFile *file);
-extern BufFile *BufFileOpenShared(SharedFileSet *fileset, const char *name,
-                                                                 int mode);
-
 extern BufFile *BufFileCreateFileSet(FileSet *fileset, const char *name, 
workfile_set *work_set);
 extern void BufFileExportFileSet(BufFile *file);
 extern BufFile *BufFileOpenFileSet(FileSet *fileset, const char *name,
diff --git a/src/include/storage/bufmgr.h b/src/include/storage/bufmgr.h
index f50907e479d..1e01e0902e5 100644
--- a/src/include/storage/bufmgr.h
+++ b/src/include/storage/bufmgr.h
@@ -422,7 +422,6 @@ extern bool ConditionalLockBufferForCleanup(Buffer buffer);
 extern bool IsBufferCleanupOK(Buffer buffer);
 extern bool HoldingBufferPinThatDelaysRecovery(void);
 
-extern void BufmgrCommit(void);
 extern bool BgBufferSync(struct WritebackContext *wb_context);
 
 extern void AtProcExit_LocalBuffers(void);
diff --git a/src/include/nodes/queryjumble.h b/src/include/utils/queryjumble.h
similarity index 88%
copy from src/include/nodes/queryjumble.h
copy to src/include/utils/queryjumble.h
index f49f47a6a7d..c472bddc64a 100644
--- a/src/include/nodes/queryjumble.h
+++ b/src/include/utils/queryjumble.h
@@ -3,19 +3,21 @@
  * queryjumble.h
  *       Query normalization and fingerprinting.
  *
- * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *       src/include/nodes/queryjumble.h
+ *       src/include/utils/queryjumble.h
  *
  *-------------------------------------------------------------------------
  */
-#ifndef QUERYJUMBLE_H
-#define QUERYJUMBLE_H
+#ifndef QUERYJUBLE_H
+#define QUERYJUBLE_H
 
 #include "nodes/parsenodes.h"
 
+#define JUMBLE_SIZE                            1024    /* query serialization 
buffer size */
+
 /*
  * Struct for tracking locations/lengths of constants during normalization
  */
@@ -60,7 +62,7 @@ enum ComputeQueryIdType
 };
 
 /* GUC parameters */
-extern PGDLLIMPORT int compute_query_id;
+extern int     compute_query_id;
 
 
 extern const char *CleanQuerytext(const char *query, int *location, int *len);
@@ -68,7 +70,7 @@ extern JumbleState *JumbleQuery(Query *query, const char 
*querytext);
 extern JumbleState *JumbleQueryDirect(Query *query, const char *querytext);
 extern void EnableQueryId(void);
 
-extern PGDLLIMPORT bool query_id_enabled;
+extern bool query_id_enabled;
 
 /*
  * Returns whether query identifier computation has been enabled, either
diff --git a/src/include/utils/relmapper.h b/src/include/utils/relmapper.h
index 3b694814f5a..5c173bdbb3c 100644
--- a/src/include/utils/relmapper.h
+++ b/src/include/utils/relmapper.h
@@ -35,10 +35,6 @@ typedef struct xl_relmap_update
 #define MinSizeOfRelmapUpdate offsetof(xl_relmap_update, data)
 
 
-extern Oid  RelationMapOidToFilenode(Oid relationId, bool shared);
-
-extern Oid     RelationMapFilenodeToOid(Oid filenode, bool shared);
-
 extern RelFileNumber RelationMapOidToFilenumber(Oid relationId, bool shared);
 
 extern Oid     RelationMapFilenumberToOid(RelFileNumber filenumber, bool 
shared);


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to