This is an automated email from the ASF dual-hosted git repository.
chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git
The following commit(s) were added to refs/heads/cbdb-postgres-merge by this
push:
new 6f4033b4f25 Fix compile errors for utils/cache
6f4033b4f25 is described below
commit 6f4033b4f2554a23859480d5fd339866dc050b7b
Author: Jinbao Chen <[email protected]>
AuthorDate: Mon Oct 6 17:10:51 2025 +0800
Fix compile errors for utils/cache
---
src/backend/utils/cache/catcache.c | 35 ++++++++++++++-------------
src/backend/utils/cache/relcache.c | 49 +++++++++-----------------------------
src/backend/utils/cache/typcache.c | 22 +++++++++--------
3 files changed, 41 insertions(+), 65 deletions(-)
diff --git a/src/backend/utils/cache/catcache.c
b/src/backend/utils/cache/catcache.c
index b31f0d51730..36e6ea76022 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -1519,6 +1519,24 @@ SearchCatCacheMiss(CatCache *cache,
do
{
+ /*
+ * Ok, need to make a lookup in the relation, copy the scankey
and
+ * fill out any per-call fields. (We must re-do this when
retrying,
+ * because systable_beginscan scribbles on the scankey.)
+ */
+ memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * nkeys);
+ cur_skey[0].sk_argument = v1;
+ cur_skey[1].sk_argument = v2;
+ cur_skey[2].sk_argument = v3;
+ cur_skey[3].sk_argument = v4;
+
+ scandesc = systable_beginscan(relation,
+
cache->cc_indexoid,
+
IndexScanOK(cache, cur_skey),
+ NULL,
+ nkeys,
+
cur_skey);
+
/*
* Good place to sanity check the tuple, before adding it to
cache.
* So if its fetched using index, lets cross verify tuple
intended is the tuple
@@ -1530,23 +1548,6 @@ SearchCatCacheMiss(CatCache *cache,
CrossCheckTuple(cache->id, v1, v2, v3, v4, ntp);
}
- ct = CatalogCacheCreateEntry(cache, ntp, arguments,
-
hashValue, hashIndex,
- false);
- /* immediately set the refcount to 1 */
- ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
- ct->refcount++;
- ResourceOwnerRememberCatCacheRef(CurrentResourceOwner,
&ct->tuple);
- break; /* assume only one
match */
- }
-
- scandesc = systable_beginscan(relation,
-
cache->cc_indexoid,
-
IndexScanOK(cache, cur_skey),
- NULL,
- nkeys,
-
cur_skey);
-
ct = NULL;
stale = false;
diff --git a/src/backend/utils/cache/relcache.c
b/src/backend/utils/cache/relcache.c
index f99d2fc5ce4..3a0d585b7df 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -2251,9 +2251,9 @@ RelationDecrementReferenceCount(Relation rel)
elog(ERROR,
#endif
"Relation decrement reference count found relation
%u/%u/%u with bad count (reference count %d)",
- rel->rd_node.spcNode,
- rel->rd_node.dbNode,
- rel->rd_node.relNode,
+ rel->rd_locator.spcOid,
+ rel->rd_locator.dbOid,
+ rel->rd_locator.relNumber,
rel->rd_refcnt);
}
@@ -3816,13 +3816,13 @@ RelationBuildLocalRelation(const char *relname,
* manager in Cloudberry breaks if this happens, see
GPDB_91_MERGE_FIXME in
* GetNewRelFileNode() for details.
*/
- if (relfilenode == 1 || mapped_relation)
+ if (relfilenumber == 1 || mapped_relation)
{
if (relid < FirstNormalObjectId) /* bootstrap only */
- relfilenode = relid;
+ relfilenumber = relid;
else
{
- relfilenode = GetNewRelFileNode(reltablespace, NULL,
relpersistence);
+ relfilenumber = GetNewRelFileNode(reltablespace, NULL,
relpersistence);
if (Gp_role == GP_ROLE_EXECUTE || IsBinaryUpgrade)
AdvanceObjectId(relid);
}
@@ -3908,36 +3908,9 @@ RelationSetNewRelfilenumber(Relation relation, char
persistence)
TransactionId freezeXid = InvalidTransactionId;
RelFileLocator newrlocator;
- if (!IsBinaryUpgrade)
- {
- /* Allocate a new relfilenumber */
- newrelfilenumber =
GetNewRelFileNumber(relation->rd_rel->reltablespace,
-
NULL, persistence);
- }
- else if (relation->rd_rel->relkind == RELKIND_INDEX)
- {
- if
(!OidIsValid(binary_upgrade_next_index_pg_class_relfilenumber))
- ereport(ERROR,
-
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("index relfilenumber value not
set when in binary upgrade mode")));
-
- newrelfilenumber =
binary_upgrade_next_index_pg_class_relfilenumber;
- binary_upgrade_next_index_pg_class_relfilenumber = InvalidOid;
- }
- else if (relation->rd_rel->relkind == RELKIND_RELATION)
- {
- if
(!OidIsValid(binary_upgrade_next_heap_pg_class_relfilenumber))
- ereport(ERROR,
-
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("heap relfilenumber value not
set when in binary upgrade mode")));
-
- newrelfilenumber =
binary_upgrade_next_heap_pg_class_relfilenumber;
- binary_upgrade_next_heap_pg_class_relfilenumber = InvalidOid;
- }
- else
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("unexpected request for new
relfilenumber in binary upgrade mode")));
+ /* Allocate a new relfilenumber */
+ newrelfilenumber = GetNewRelFileNumber(relation->rd_rel->reltablespace,
+
NULL, persistence);
/*
* Get a writable copy of the pg_class tuple for the given relation.
@@ -3975,7 +3948,7 @@ RelationSetNewRelfilenumber(Relation relation, char
persistence)
* fails at this stage, the new cluster will need to be
recreated
* anyway.
*/
- srel = smgropen(relation->rd_locator, relation->rd_backend);
+ srel = smgropen(relation->rd_locator, relation->rd_backend,
SMGR_MD, relation);
smgrdounlinkall(&srel, 1, false);
smgrclose(srel);
}
@@ -4008,7 +3981,7 @@ RelationSetNewRelfilenumber(Relation relation, char
persistence)
/* handle these directly, at least for now */
SMgrRelation srel;
- srel = RelationCreateStorage(newrlocator, persistence, true);
+ srel = RelationCreateStorage(newrlocator, persistence, true,
SMGR_MD, relation);
smgrclose(srel);
}
else
diff --git a/src/backend/utils/cache/typcache.c
b/src/backend/utils/cache/typcache.c
index fb59bef6807..13a6f379081 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -275,10 +275,15 @@ static const dshash_parameters srtr_typmod_table_params =
{
/* hashtable for recognizing registered record types */
static HTAB *RecordCacheHash = NULL;
-/* arrays of info about registered record types, indexed by assigned typmod */
-static TupleDesc *RecordCacheArray = NULL;
-static uint64 *RecordIdentifierArray = NULL;
-static int32 RecordCacheArrayLen = 0; /* allocated length of above arrays */
+typedef struct RecordCacheArrayEntry
+{
+ uint64 id;
+ TupleDesc tupdesc;
+} RecordCacheArrayEntry;
+
+/* array of info about registered record types, indexed by assigned typmod */
+static RecordCacheArrayEntry *RecordCacheArray = NULL;
+static int32 RecordCacheArrayLen = 0; /* allocated length of above array */
int32 NextRecordTypmod = 0; /* number of entries used */
/*
@@ -1926,13 +1931,10 @@ reset_record_cache(void)
if (RecordCacheArray != NULL)
{
Assert(RecordCacheArrayLen != 0);
- Assert(RecordIdentifierArray != NULL);
pfree(RecordCacheArray);
- pfree(RecordIdentifierArray);
RecordCacheArray = NULL;
- RecordIdentifierArray = NULL;
RecordCacheArrayLen = 0;
}
}
@@ -2032,7 +2034,7 @@ assign_record_type_typmod(TupleDesc tupDesc)
ensure_record_cache_typmod_slot_exists(entDesc->tdtypmod);
}
- RecordCacheArray[entDesc->tdtypmod] = entDesc;
+ RecordCacheArray[entDesc->tdtypmod].tupdesc = entDesc;
/* Assign a unique tupdesc identifier, too. */
RecordCacheArray[entDesc->tdtypmod].id = ++tupledesc_id_counter;
@@ -2496,7 +2498,7 @@ build_tuple_node_list(int start)
for (; i < NextRecordTypmod; i++)
{
- TupleDesc tmp = RecordCacheArray[i];
+ TupleDesc tmp = RecordCacheArray[i].tupdesc;
TupleDescNode *node = palloc0(sizeof(TupleDescNode));
node->type = T_TupleDescNode;
@@ -2509,7 +2511,7 @@ build_tuple_node_list(int start)
{
for (; i <
GetSharedNextRecordTypmod(CurrentSession->shared_typmod_registry); i++)
{
- TupleDesc tmp = RecordCacheArray[i];
+ TupleDesc tmp = RecordCacheArray[i].tupdesc;
TupleDescNode *node = palloc0(sizeof(TupleDescNode));
node->type = T_TupleDescNode;
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]