This is an automated email from the ASF dual-hosted git repository.
chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git
The following commit(s) were added to refs/heads/cbdb-postgres-merge by this
push:
new 4d40917a98f Enable initdb
4d40917a98f is described below
commit 4d40917a98f70d10a048a048a852b9287b0344c2
Author: Jinbao Chen <[email protected]>
AuthorDate: Tue Oct 14 22:58:16 2025 +0800
Enable initdb
---
src/backend/access/heap/heapam.c | 2 +-
src/backend/access/transam/xlogrecovery.c | 4 +-
src/backend/catalog/cdb_schema.sql | 44 ++++++++--------
src/backend/catalog/sql_features.txt | 40 +++++++-------
src/backend/catalog/system_views.sql | 70 +++++++++++++-----------
src/backend/cdb/cdbpath.c | 1 +
src/backend/nodes/copyfuncs.c | 88 ++++++++++++++++++++++++++++++-
src/backend/nodes/equalfuncs.c | 72 +++++++++++++++++++++++++
src/backend/nodes/outfast.c | 3 ++
src/backend/nodes/outfuncs.c | 20 +++++++
src/backend/nodes/readfast.c | 6 +++
src/backend/nodes/readfuncs.c | 21 ++++++++
src/backend/utils/cache/catcache.c | 21 ++++----
13 files changed, 306 insertions(+), 86 deletions(-)
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 71a6f6c5aae..90380d52eac 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -479,11 +479,11 @@ heapgetpage(TableScanDesc sscan, BlockNumber block)
ItemId lpp = PageGetItemId(page, lineoff);
HeapTupleData loctup;
bool valid;
- HeapTupleHeader theader = (HeapTupleHeader) PageGetItem((Page)
page, lpp);
if (!ItemIdIsNormal(lpp))
continue;
+ HeapTupleHeader theader = (HeapTupleHeader) PageGetItem((Page)
page, lpp);
loctup.t_tableOid = RelationGetRelid(scan->rs_base.rs_rd);
loctup.t_data = (HeapTupleHeader) PageGetItem(page, lpp);
diff --git a/src/backend/access/transam/xlogrecovery.c
b/src/backend/access/transam/xlogrecovery.c
index 9ff198ab2ca..a98b5e41a5d 100644
--- a/src/backend/access/transam/xlogrecovery.c
+++ b/src/backend/access/transam/xlogrecovery.c
@@ -4067,9 +4067,9 @@ ReadCheckpointRecord(XLogPrefetcher *xlogprefetcher,
XLogRecPtr RecPtr,
(errmsg("invalid xl_info in checkpoint
record")));
return NULL;
}
- if (record->xl_tot_len != SizeOfXLogRecord +
SizeOfXLogRecordDataHeaderShort + sizeof(CheckPoint))
+ if (record->xl_tot_len < SizeOfXLogRecord +
SizeOfXLogRecordDataHeaderShort + sizeof(CheckPoint))
{
- ereport(LOG,
+ ereport(PANIC,
(errmsg("invalid length of checkpoint
record")));
return NULL;
}
diff --git a/src/backend/catalog/cdb_schema.sql
b/src/backend/catalog/cdb_schema.sql
index 180895513e8..505ae2166f1 100644
--- a/src/backend/catalog/cdb_schema.sql
+++ b/src/backend/catalog/cdb_schema.sql
@@ -46,29 +46,29 @@ GRANT SELECT ON pg_catalog.gp_distributed_log TO PUBLIC;
-- pg_tablespace_location wrapper functions to see Greenplum cluster-wide
tablespace locations
-CREATE FUNCTION gp_tablespace_segment_location (IN tblspc_oid oid, OUT
gp_segment_id int, OUT tblspc_loc text)
-RETURNS SETOF RECORD AS
-$$
-DECLARE
- seg_id int;
-BEGIN
- EXECUTE 'select pg_catalog.gp_execution_segment()' INTO seg_id;
- -- check if execute in entrydb QE to prevent giving wrong results
- IF seg_id = -1 THEN
- RAISE EXCEPTION 'Cannot execute in entrydb, this query is not currently
supported by GPDB.';
- END IF;
- RETURN QUERY SELECT pg_catalog.gp_execution_segment() as gp_segment_id, *
- FROM pg_catalog.pg_tablespace_location($1);
-END;
-$$ LANGUAGE plpgsql EXECUTE ON ALL SEGMENTS;
+-- CREATE FUNCTION gp_tablespace_segment_location (IN tblspc_oid oid, OUT
gp_segment_id int, OUT tblspc_loc text)
+-- RETURNS SETOF RECORD AS
+-- $$
+-- DECLARE
+-- seg_id int;
+-- BEGIN
+-- EXECUTE 'select pg_catalog.gp_execution_segment()' INTO seg_id;
+-- -- check if execute in entrydb QE to prevent giving wrong results
+-- IF seg_id = -1 THEN
+-- RAISE EXCEPTION 'Cannot execute in entrydb, this query is not currently
supported by GPDB.';
+-- END IF;
+-- RETURN QUERY SELECT pg_catalog.gp_execution_segment() as gp_segment_id, *
+-- FROM pg_catalog.pg_tablespace_location($1);
+-- END;
+-- $$ LANGUAGE plpgsql EXECUTE ON ALL SEGMENTS;
-CREATE FUNCTION gp_tablespace_location (IN tblspc_oid oid, OUT gp_segment_id
int, OUT tblspc_loc text)
-RETURNS SETOF RECORD
-AS
- 'SELECT * FROM pg_catalog.gp_tablespace_segment_location($1)
- UNION ALL
- SELECT pg_catalog.gp_execution_segment() as gp_segment_id, * FROM
pg_catalog.pg_tablespace_location($1)'
-LANGUAGE SQL EXECUTE ON COORDINATOR;
+-- CREATE FUNCTION gp_tablespace_location (IN tblspc_oid oid, OUT
gp_segment_id int, OUT tblspc_loc text)
+-- RETURNS SETOF RECORD
+-- AS
+-- 'SELECT * FROM pg_catalog.gp_tablespace_segment_location($1)
+-- UNION ALL
+-- SELECT pg_catalog.gp_execution_segment() as gp_segment_id, * FROM
pg_catalog.pg_tablespace_location($1)'
+-- LANGUAGE SQL EXECUTE ON COORDINATOR;
RESET log_min_messages;
diff --git a/src/backend/catalog/sql_features.txt
b/src/backend/catalog/sql_features.txt
index 5b19bcf950e..396b81cd218 100644
--- a/src/backend/catalog/sql_features.txt
+++ b/src/backend/catalog/sql_features.txt
@@ -192,12 +192,12 @@ F052 Intervals and datetime arithmetic
YES
F053 OVERLAPS predicate YES
F054 TIMESTAMP in DATE type precedence list NO
F081 UNION and EXCEPT in views YES
-F111 Isolation levels other than SERIALIZABLE YES
-F112 Isolation level READ UNCOMMITTED NO
+F111 Isolation levels other than SERIALIZABLE YES
+F112 Isolation level READ UNCOMMITTED NO
F113 Isolation level READ COMMITTED YES
-F114 Isolation level REPEATABLE READ NO
+F114 Isolation level REPEATABLE READ NO
F120 Get diagnostics statement NO
-F121 Basic diagnostics management NO
+F121 Basic diagnostics management NO
F122 Enhanced diagnostics management NO
F123 All diagnostics NO
F124 SET TRANSACTION statement: DIAGNOSTICS SIZE clause
NO
@@ -265,16 +265,16 @@ F405 NATURAL JOIN YES
F406 FULL OUTER JOIN YES
F407 CROSS JOIN YES
F411 Time zone specification YES differences regarding
literal interpretation
-F421 National character YES
+F421 National character YES
F431 Read-only scrollable cursors YES
F432 FETCH with explicit NEXT YES
-F433 FETCH FIRST NO
+F433 FETCH FIRST NO
F434 FETCH LAST YES
-F435 FETCH PRIOR NO
-F436 FETCH ABSOLUTE NO
-F437 FETCH RELATIVE NO
+F435 FETCH PRIOR NO
+F436 FETCH ABSOLUTE NO
+F437 FETCH RELATIVE NO
F438 Scrollable cursors YES
-F441 Extended set function support YES
+F441 Extended set function support YES
F442 Mixed column references in set functions YES
F451 Character set definition NO
F461 Named character sets NO
@@ -448,20 +448,20 @@ T176 Sequence generator support
NO supported except for NEXT VALUE FOR
T177 Sequence generator support: simple restart option
YES
T178 Identity columns: simple restart option YES
T180 System-versioned tables NO
-T181 Application-time period tables NO
-T191 Referential action RESTRICT NO
+T181 Application-time period tables NO
+T191 Referential action RESTRICT NO
T200 Trigger DDL NO similar but not fully compatible
-T201 Comparable data types for referential constraints
NO
+T201 Comparable data types for referential constraints
NO
T211 Basic trigger capability NO
-T212 Enhanced trigger capability NO
-T213 INSTEAD OF triggers NO
-T214 BEFORE triggers NO
-T215 AFTER triggers NO
-T216 Ability to require true search condition before trigger is invoked
NO
+T212 Enhanced trigger capability NO
+T213 INSTEAD OF triggers NO
+T214 BEFORE triggers NO
+T215 AFTER triggers NO
+T216 Ability to require true search condition before trigger is invoked
NO
T217 TRIGGER privilege YES
T218 Multiple triggers for the same event executed in the order created
NO intentionally omitted
-T231 Sensitive cursors YES
-T241 START TRANSACTION statement YES
+T231 Sensitive cursors YES
+T241 START TRANSACTION statement YES
T251 SET TRANSACTION statement: LOCAL option NO
T261 Chained transactions YES
T262 Multiple server transactions NO
diff --git a/src/backend/catalog/system_views.sql
b/src/backend/catalog/system_views.sql
index 0e0802dbb1f..8858c3ebcd5 100644
--- a/src/backend/catalog/system_views.sql
+++ b/src/backend/catalog/system_views.sql
@@ -718,13 +718,16 @@ SELECT
s.schemaname,
s.relname,
m.seq_scan,
+ m.last_seq_scan,
m.seq_tup_read,
m.idx_scan,
+ m.last_idx_scan,
m.idx_tup_fetch,
m.n_tup_ins,
m.n_tup_upd,
m.n_tup_del,
m.n_tup_hot_upd,
+ m.n_tup_newpage_upd,
m.n_live_tup,
m.n_dead_tup,
m.n_mod_since_analyze,
@@ -743,13 +746,16 @@ FROM
allt.schemaname,
allt.relname,
case when d.policytype = 'r' then
(sum(seq_scan)/d.numsegments)::bigint else sum(seq_scan) end seq_scan,
+ max(last_seq_scan) as last_seq_scan,
case when d.policytype = 'r' then
(sum(seq_tup_read)/d.numsegments)::bigint else sum(seq_tup_read) end
seq_tup_read,
case when d.policytype = 'r' then
(sum(idx_scan)/d.numsegments)::bigint else sum(idx_scan) end idx_scan,
+ max(last_idx_scan) as last_idx_scan,
case when d.policytype = 'r' then
(sum(idx_tup_fetch)/d.numsegments)::bigint else sum(idx_tup_fetch) end
idx_tup_fetch,
case when d.policytype = 'r' then
(sum(n_tup_ins)/d.numsegments)::bigint else sum(n_tup_ins) end n_tup_ins,
case when d.policytype = 'r' then
(sum(n_tup_upd)/d.numsegments)::bigint else sum(n_tup_upd) end n_tup_upd,
case when d.policytype = 'r' then
(sum(n_tup_del)/d.numsegments)::bigint else sum(n_tup_del) end n_tup_del,
case when d.policytype = 'r' then
(sum(n_tup_hot_upd)/d.numsegments)::bigint else sum(n_tup_hot_upd) end
n_tup_hot_upd,
+ max(n_tup_newpage_upd) as n_tup_newpage_upd,
case when d.policytype = 'r' then
(sum(n_live_tup)/d.numsegments)::bigint else sum(n_live_tup) end n_live_tup,
case when d.policytype = 'r' then
(sum(n_dead_tup)/d.numsegments)::bigint else sum(n_dead_tup) end n_dead_tup,
case when d.policytype = 'r' then
(sum(n_mod_since_analyze)/d.numsegments)::bigint else sum(n_mod_since_analyze)
end n_mod_since_analyze,
@@ -844,25 +850,38 @@ CREATE VIEW pg_stat_xact_user_tables AS
schemaname !~ '^pg_toast';
CREATE VIEW pg_statio_all_tables AS
- SELECT
- C.oid AS relid,
- N.nspname AS schemaname,
- C.relname AS relname,
- pg_stat_get_blocks_fetched(C.oid) -
- pg_stat_get_blocks_hit(C.oid) AS heap_blks_read,
- pg_stat_get_blocks_hit(C.oid) AS heap_blks_hit,
- I.idx_blks_read AS idx_blks_read,
- I.idx_blks_hit AS idx_blks_hit,
- pg_stat_get_blocks_fetched(T.oid) -
- pg_stat_get_blocks_hit(T.oid) AS toast_blks_read,
- pg_stat_get_blocks_hit(T.oid) AS toast_blks_hit,
- X.idx_blks_read AS tidx_blks_read,
- X.idx_blks_hit AS tidx_blks_hit
- FROM pg_class C LEFT JOIN
- pg_class T ON C.reltoastrelid = T.oid
- LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
- WHERE C.relkind IN ('r', 't', 'm', 'o', 'b', 'M')
- GROUP BY C.oid, N.nspname, C.relname, T.oid, X.indexrelid;
+SELECT
+ C.oid AS relid,
+ N.nspname AS schemaname,
+ C.relname AS relname,
+ pg_stat_get_blocks_fetched(C.oid) -
+ pg_stat_get_blocks_hit(C.oid) AS heap_blks_read,
+ pg_stat_get_blocks_hit(C.oid) AS heap_blks_hit,
+ I.idx_blks_read AS idx_blks_read,
+ I.idx_blks_hit AS idx_blks_hit,
+ pg_stat_get_blocks_fetched(T.oid) -
+ pg_stat_get_blocks_hit(T.oid) AS toast_blks_read,
+ pg_stat_get_blocks_hit(T.oid) AS toast_blks_hit,
+ X.idx_blks_read AS tidx_blks_read,
+ X.idx_blks_hit AS tidx_blks_hit
+FROM pg_class C LEFT JOIN
+ pg_class T ON C.reltoastrelid = T.oid
+ LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
+ LEFT JOIN LATERAL (
+ SELECT sum(pg_stat_get_blocks_fetched(indexrelid) -
+ pg_stat_get_blocks_hit(indexrelid))::bigint
+ AS idx_blks_read,
+ sum(pg_stat_get_blocks_hit(indexrelid))::bigint
+ AS idx_blks_hit
+ FROM pg_index WHERE indrelid = C.oid ) I ON true
+ LEFT JOIN LATERAL (
+ SELECT sum(pg_stat_get_blocks_fetched(indexrelid) -
+ pg_stat_get_blocks_hit(indexrelid))::bigint
+ AS idx_blks_read,
+ sum(pg_stat_get_blocks_hit(indexrelid))::bigint
+ AS idx_blks_hit
+ FROM pg_index WHERE indrelid = T.oid ) X ON true
+WHERE C.relkind IN ('r', 't', 'm', 'o', 'b', 'M');
CREATE VIEW pg_statio_sys_tables AS
SELECT * FROM pg_statio_all_tables
@@ -901,6 +920,7 @@ SELECT
s.relname,
s.indexrelname,
m.idx_scan,
+ m.last_idx_scan,
m.idx_tup_read,
m.idx_tup_fetch
FROM
@@ -911,6 +931,7 @@ FROM
relname,
indexrelname,
sum(idx_scan) as idx_scan,
+ max(last_idx_scan) as last_idx_scan,
sum(idx_tup_read) as idx_tup_read,
sum(idx_tup_fetch) as idx_tup_fetch
FROM
@@ -1317,17 +1338,6 @@ CREATE VIEW pg_stat_database AS
SELECT oid, datname FROM pg_database
) D;
-CREATE VIEW pg_stat_resqueues AS
- SELECT
- pg_catalog.gp_execution_segment() AS gp_segment_id,
- Q.oid AS queueid,
- Q.rsqname AS queuename,
- pg_stat_get_queue_num_exec(Q.oid) AS n_queries_exec,
- pg_stat_get_queue_num_wait(Q.oid) AS n_queries_wait,
- pg_stat_get_queue_elapsed_exec(Q.oid) AS elapsed_exec,
- pg_stat_get_queue_elapsed_wait(Q.oid) AS elapsed_wait
- FROM pg_resqueue AS Q;
-
-- Resource queue views
CREATE VIEW pg_resqueue_status AS
diff --git a/src/backend/cdb/cdbpath.c b/src/backend/cdb/cdbpath.c
index 4aef9a94280..0a3b437d696 100644
--- a/src/backend/cdb/cdbpath.c
+++ b/src/backend/cdb/cdbpath.c
@@ -1464,6 +1464,7 @@ cdbpath_motion_for_join(PlannerInfo *root,
case JOIN_ANTI:
case JOIN_LEFT:
case JOIN_LASJ_NOTIN:
+ case JOIN_RIGHT_ANTI:
outer.ok_to_replicate = false;
break;
case JOIN_RIGHT:
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index 5f804090cbb..04fe72e2e86 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -3828,7 +3828,7 @@ _copyTriggerTransition(const TriggerTransition *from)
static Query *
_copyQuery(const Query *from)
{
- Query *newnode = makeNode(Query);
+ Query *newnode = makeNode(Query);
COPY_SCALAR_FIELD(commandType);
COPY_SCALAR_FIELD(querySource);
@@ -3851,7 +3851,10 @@ _copyQuery(const Query *from)
COPY_SCALAR_FIELD(isReturn);
COPY_NODE_FIELD(cteList);
COPY_NODE_FIELD(rtable);
+ COPY_NODE_FIELD(rteperminfos);
COPY_NODE_FIELD(jointree);
+ COPY_NODE_FIELD(mergeActionList);
+ COPY_SCALAR_FIELD(mergeUseOuterJoin);
COPY_NODE_FIELD(targetList);
COPY_SCALAR_FIELD(override);
COPY_NODE_FIELD(onConflict);
@@ -6391,6 +6394,74 @@ _copyEphemeralNamedRelationInfo(const
EphemeralNamedRelationInfo *from)
return newnode;
}
+
+static Integer *
+_copyInteger(const Integer *from)
+{
+ Integer *newnode = makeNode(Integer);
+
+ COPY_SCALAR_FIELD(ival);
+
+ return newnode;
+}
+
+static Float *
+_copyFloat(const Float *from)
+{
+ Float *newnode = makeNode(Float);
+
+ COPY_STRING_FIELD(fval);
+
+ return newnode;
+}
+
+static Boolean *
+_copyBoolean(const Boolean *from)
+{
+ Boolean *newnode = makeNode(Boolean);
+
+ COPY_SCALAR_FIELD(boolval);
+
+ return newnode;
+}
+
+static String *
+_copyString(const String *from)
+{
+ String *newnode = makeNode(String);
+
+ COPY_STRING_FIELD(sval);
+
+ return newnode;
+}
+
+static BitString *
+_copyBitString(const BitString *from)
+{
+ BitString *newnode = makeNode(BitString);
+
+ COPY_STRING_FIELD(bsval);
+
+ return newnode;
+}
+
+
+static RTEPermissionInfo *
+_copyRTEPermissionInfo(const RTEPermissionInfo *from)
+{
+ RTEPermissionInfo *newnode = makeNode(RTEPermissionInfo);
+
+ COPY_SCALAR_FIELD(relid);
+ COPY_SCALAR_FIELD(inh);
+ COPY_SCALAR_FIELD(requiredPerms);
+ COPY_SCALAR_FIELD(checkAsUser);
+ COPY_BITMAPSET_FIELD(selectedCols);
+ COPY_BITMAPSET_FIELD(insertedCols);
+ COPY_BITMAPSET_FIELD(updatedCols);
+
+ return newnode;
+}
+
/*
* copyObjectImpl -- implementation of copyObject(); see nodes/nodes.h
*
@@ -6837,9 +6908,20 @@ copyObjectImpl(const void *from)
* VALUE NODES
*/
case T_Integer:
+ retval = _copyInteger(from);
+ break;
case T_Float:
+ retval = _copyFloat(from);
+ break;
+ case T_Boolean:
+ retval = _copyBoolean(from);
+ break;
case T_String:
+ retval = _copyString(from);
+ break;
case T_BitString:
+ retval = _copyBitString(from);
+ break;
case T_Null:
retval = _copyValue(from);
break;
@@ -7571,6 +7653,10 @@ copyObjectImpl(const void *from)
case T_EphemeralNamedRelationInfo:
retval = _copyEphemeralNamedRelationInfo(from);
break;
+
+ case T_RTEPermissionInfo:
+ retval = _copyRTEPermissionInfo(from);
+ break;
default:
elog(ERROR, "unrecognized node type: %d", (int)
nodeTag(from));
retval = 0; /* keep compiler quiet
*/
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index bb7b2b0d89a..19027a82f6f 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -1054,7 +1054,10 @@ _equalQuery(const Query *a, const Query *b)
COMPARE_SCALAR_FIELD(isReturn);
COMPARE_NODE_FIELD(cteList);
COMPARE_NODE_FIELD(rtable);
+ COMPARE_NODE_FIELD(rteperminfos);
COMPARE_NODE_FIELD(jointree);
+ COMPARE_NODE_FIELD(mergeActionList);
+ COMPARE_SCALAR_FIELD(mergeUseOuterJoin);
COMPARE_NODE_FIELD(targetList);
COMPARE_SCALAR_FIELD(override);
COMPARE_NODE_FIELD(onConflict);
@@ -3622,6 +3625,61 @@ _equalValue(const Value *a, const Value *b)
return true;
}
+
+static bool
+_equalInteger(const Integer *a, const Integer *b)
+{
+ COMPARE_SCALAR_FIELD(ival);
+
+ return true;
+}
+
+static bool
+_equalFloat(const Float *a, const Float *b)
+{
+ COMPARE_STRING_FIELD(fval);
+
+ return true;
+}
+
+static bool
+_equalBoolean(const Boolean *a, const Boolean *b)
+{
+ COMPARE_SCALAR_FIELD(boolval);
+
+ return true;
+}
+
+static bool
+_equalString(const String *a, const String *b)
+{
+ COMPARE_STRING_FIELD(sval);
+
+ return true;
+}
+
+static bool
+_equalBitString(const BitString *a, const BitString *b)
+{
+ COMPARE_STRING_FIELD(bsval);
+
+ return true;
+}
+
+static bool
+_equalRTEPermissionInfo(const RTEPermissionInfo *a, const RTEPermissionInfo *b)
+{
+ COMPARE_SCALAR_FIELD(relid);
+ COMPARE_SCALAR_FIELD(inh);
+ COMPARE_SCALAR_FIELD(requiredPerms);
+ COMPARE_SCALAR_FIELD(checkAsUser);
+ COMPARE_BITMAPSET_FIELD(selectedCols);
+ COMPARE_BITMAPSET_FIELD(insertedCols);
+ COMPARE_BITMAPSET_FIELD(updatedCols);
+
+ return true;
+}
+
/*
* equal
* returns whether two nodes are equal
@@ -3846,9 +3904,20 @@ equal(const void *a, const void *b)
break;
case T_Integer:
+ retval = _equalInteger(a, b);
+ break;
case T_Float:
+ retval = _equalFloat(a, b);
+ break;
+ case T_Boolean:
+ retval = _equalBoolean(a, b);
+ break;
case T_String:
+ retval = _equalString(a, b);
+ break;
case T_BitString:
+ retval = _equalBitString(a, b);
+ break;
case T_Null:
retval = _equalValue(a, b);
break;
@@ -4502,6 +4571,9 @@ equal(const void *a, const void *b)
case T_DropTaskStmt:
retval = _equalDropTaskStmt(a, b);
break;
+ case T_RTEPermissionInfo:
+ retval = _equalRTEPermissionInfo(a, b);
+ break;
default:
elog(ERROR, "unrecognized node type: %d",
diff --git a/src/backend/nodes/outfast.c b/src/backend/nodes/outfast.c
index bfc244b904a..2d1d846a72f 100644
--- a/src/backend/nodes/outfast.c
+++ b/src/backend/nodes/outfast.c
@@ -1966,6 +1966,9 @@ _outNode(StringInfo str, void *obj)
case T_AlterDatabaseStmt:
_outAlterDatabaseStmt(str, obj);
break;
+ case T_RTEPermissionInfo:
+ _outRTEPermissionInfo(str, obj);
+ break;
default:
elog(ERROR, "could not serialize unrecognized
node type: %d",
(int) nodeTag(obj));
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index 51fd5f00df4..7bd3533bc34 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -3534,7 +3534,10 @@ _outQuery(StringInfo str, const Query *node)
WRITE_BOOL_FIELD(isReturn);
WRITE_NODE_FIELD(cteList);
WRITE_NODE_FIELD(rtable);
+ WRITE_NODE_FIELD(rteperminfos);
WRITE_NODE_FIELD(jointree);
+ WRITE_NODE_FIELD(mergeActionList);
+ WRITE_BOOL_FIELD(mergeUseOuterJoin);
WRITE_NODE_FIELD(targetList);
WRITE_ENUM_FIELD(override, OverridingKind);
WRITE_NODE_FIELD(onConflict);
@@ -4271,6 +4274,20 @@ _outDropTaskStmt(StringInfo str, const DropTaskStmt
*node)
WRITE_BOOL_FIELD(missing_ok);
}
+static void
+_outRTEPermissionInfo(StringInfo str, const RTEPermissionInfo *node)
+{
+ WRITE_NODE_TYPE("RTEPERMISSIONINFO");
+
+ WRITE_OID_FIELD(relid);
+ WRITE_BOOL_FIELD(inh);
+ WRITE_UINT64_FIELD(requiredPerms);
+ WRITE_OID_FIELD(checkAsUser);
+ WRITE_BITMAPSET_FIELD(selectedCols);
+ WRITE_BITMAPSET_FIELD(insertedCols);
+ WRITE_BITMAPSET_FIELD(updatedCols);
+}
+
#include "outfuncs_common.c"
#ifndef COMPILING_BINARY_FUNCS
/*
@@ -5464,6 +5481,9 @@ outNode(StringInfo str, const void *obj)
case T_DropTaskStmt:
_outDropTaskStmt(str, obj);
break;
+ case T_RTEPermissionInfo:
+ _outRTEPermissionInfo(str, obj);
+ break;
default:
/*
diff --git a/src/backend/nodes/readfast.c b/src/backend/nodes/readfast.c
index 2e344796aa4..1ae6fea97c9 100644
--- a/src/backend/nodes/readfast.c
+++ b/src/backend/nodes/readfast.c
@@ -268,7 +268,10 @@ _readQuery(void)
READ_BOOL_FIELD(canOptSelectLockingClause);
READ_NODE_FIELD(cteList);
READ_NODE_FIELD(rtable);
+ READ_NODE_FIELD(rteperminfos);
READ_NODE_FIELD(jointree);
+ READ_NODE_FIELD(mergeActionList);
+ READ_BOOL_FIELD(mergeUseOuterJoin);
READ_NODE_FIELD(targetList);
READ_NODE_FIELD(withCheckOptions);
READ_NODE_FIELD(onConflict);
@@ -2963,6 +2966,9 @@ readNodeBinary(void)
case T_DropTaskStmt:
return_value = _readDropTaskStmt();
break;
+ case T_RTEPermissionInfo:
+ return_value = _readRTEPermissionInfo();
+ break;
default:
return_value = NULL; /* keep the compiler
silent */
elog(ERROR, "could not deserialize unrecognized
node type: %d",
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index 5d9ae29a830..5ac2765382a 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -297,7 +297,10 @@ _readQuery(void)
READ_BOOL_FIELD(isReturn);
READ_NODE_FIELD(cteList);
READ_NODE_FIELD(rtable);
+ READ_NODE_FIELD(rteperminfos);
READ_NODE_FIELD(jointree);
+ READ_NODE_FIELD(mergeActionList);
+ READ_BOOL_FIELD(mergeUseOuterJoin);
READ_NODE_FIELD(targetList);
READ_ENUM_FIELD(override, OverridingKind);
READ_NODE_FIELD(onConflict);
@@ -2897,6 +2900,22 @@ _readPartitionRangeDatum(void)
READ_DONE();
}
+static RTEPermissionInfo *
+_readRTEPermissionInfo(void)
+{
+ READ_LOCALS(RTEPermissionInfo);
+
+ READ_OID_FIELD(relid);
+ READ_BOOL_FIELD(inh);
+ READ_UINT64_FIELD(requiredPerms);
+ READ_OID_FIELD(checkAsUser);
+ READ_BITMAPSET_FIELD(selectedCols);
+ READ_BITMAPSET_FIELD(insertedCols);
+ READ_BITMAPSET_FIELD(updatedCols);
+
+ READ_DONE();
+}
+
#include "readfuncs_common.c"
#ifndef COMPILING_BINARY_FUNCS
/*
@@ -3414,6 +3433,8 @@ parseNodeString(void)
return_value = _readReturnStmt();
else if (MATCHX("DROPDIRECTORYTABLESTMT"))
return_value = _readDropDirectoryTableStmt();
+ else if (MATCHX("RTEPERMISSIONINFO"))
+ return_value = _readRTEPermissionInfo();
else
{
ereport(ERROR,
diff --git a/src/backend/utils/cache/catcache.c
b/src/backend/utils/cache/catcache.c
index 36e6ea76022..bffd49868c4 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -1537,22 +1537,23 @@ SearchCatCacheMiss(CatCache *cache,
nkeys,
cur_skey);
- /*
- * Good place to sanity check the tuple, before adding it to
cache.
- * So if its fetched using index, lets cross verify tuple
intended is the tuple
- * fetched. If not fail and contain the damage which maybe
caused due to
- * index corruption for some reason.
- */
- if (scandesc->irel)
- {
- CrossCheckTuple(cache->id, v1, v2, v3, v4, ntp);
- }
ct = NULL;
stale = false;
while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
{
+ /*
+ * Good place to sanity check the tuple, before adding
it to cache.
+ * So if its fetched using index, lets cross verify
tuple intended is the tuple
+ * fetched. If not fail and contain the damage which
maybe caused due to
+ * index corruption for some reason.
+ */
+ if (scandesc->irel)
+ {
+ CrossCheckTuple(cache->id, v1, v2, v3, v4, ntp);
+ }
+
ct = CatalogCacheCreateEntry(cache, ntp, NULL,
hashValue, hashIndex);
/* upon failure, we must start the scan over */
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]