From 6cb6c402d894cdbe44701d97cd644e4a4fd2cf75 Mon Sep 17 00:00:00 2001
From: Amit Langote <amitlan@postgresql.org>
Date: Wed, 18 Feb 2026 23:37:19 +0900
Subject: [PATCH v3 2/2] Cache per-batch resources for fast-path foreign key
 checks

The fast-path FK check introduced in the previous commits opens and
closes the PK relation, index, scan descriptor, and tuple slot on
every trigger invocation.  For bulk operations that fire thousands of
FK triggers in a single statement, this repeated setup/teardown
dominates the cost.

Introduce RI_FastPathEntry, a per-constraint hash table that caches
the open Relation (pk_rel, idx_rel), IndexScanDesc, TupleTableSlot,
and a registered Snapshot across all trigger invocations within a
single trigger-firing batch.  Entries are created lazily on first use
via ri_FastPathGetEntry() and persist until the batch ends.

The snapshot is registered once at entry creation.  On subsequent
rows, only CommandCounterIncrement() + a direct curcid patch on the
registered copy is needed, avoiding the per-row GetSnapshotData()
cost (which takes ProcArrayLock and iterates all backend slots).
SnapshotSetCommandId() only patches the process-global statics, not
registered copies, so we patch entry->snapshot->curcid ourselves.

Lifecycle management:

  - AfterTriggerBatchCallback: A new general-purpose callback
    mechanism in trigger.c.  Callbacks registered via
    RegisterAfterTriggerBatchCallback() fire at the end of each
    trigger-firing batch (AfterTriggerEndQuery for immediate
    constraints, AfterTriggerFireDeferred at COMMIT, and
    AfterTriggerSetState for SET CONSTRAINTS IMMEDIATE).  The RI code
    registers ri_FastPathCleanup as a batch callback, which does
    orderly teardown: index_endscan, index_close, table_close,
    ExecDropSingleTupleTableSlot, UnregisterSnapshot.

  - XactCallback: ri_FastPathXactCallback NULLs the static cache
    pointer at transaction end.  On the normal path, cleanup already
    ran via the batch callback; this handles the abort path where
    TopTransactionContext destruction frees the memory but
    ResourceOwner handles the actual resource cleanup.

  - SubXactCallback: ri_FastPathSubXactCallback NULLs the static
    cache pointer on subtransaction abort.  ResourceOwner already
    cleaned up the resources; this prevents the batch callback from
    trying to double-close them.

  - AfterTriggerBatchIsActive(): Exported accessor that returns true
    when afterTriggers.query_depth >= 0.  During ALTER TABLE ... ADD
    FOREIGN KEY validation, RI triggers are called directly outside
    the after-trigger framework, so batch callbacks would never fire.
    The fast-path code uses this to fall back to a non-cached
    per-invocation path (open/scan/close each call) in that context.
---
 src/backend/commands/trigger.c            |  84 +++++++
 src/backend/utils/adt/ri_triggers.c       | 253 ++++++++++++++++++++--
 src/include/commands/trigger.h            |  18 ++
 src/test/regress/expected/foreign_key.out |  47 ++++
 src/test/regress/sql/foreign_key.sql      |  44 ++++
 src/tools/pgindent/typedefs.list          |   3 +
 6 files changed, 435 insertions(+), 14 deletions(-)

diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 8df915f63fb..7adeae5c7e5 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -3891,6 +3891,8 @@ typedef struct AfterTriggersData
 	/* per-subtransaction-level data: */
 	AfterTriggersTransData *trans_stack;	/* array of structs shown below */
 	int			maxtransdepth;	/* allocated len of above array */
+
+	List	   *batch_callbacks;	/* List of AfterTriggerCallbackItem */
 } AfterTriggersData;
 
 struct AfterTriggersQueryData
@@ -3927,6 +3929,13 @@ struct AfterTriggersTableData
 	TupleTableSlot *storeslot;	/* for converting to tuplestore's format */
 };
 
+/* Entry in afterTriggers.batch_callbacks */
+typedef struct AfterTriggerCallbackItem
+{
+	AfterTriggerBatchCallback callback;
+	void	   *arg;
+} AfterTriggerCallbackItem;
+
 static AfterTriggersData afterTriggers;
 
 static void AfterTriggerExecute(EState *estate,
@@ -3962,6 +3971,7 @@ static SetConstraintState SetConstraintStateAddItem(SetConstraintState state,
 													Oid tgoid, bool tgisdeferred);
 static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent);
 
+static void FireAfterTriggerBatchCallbacks(void);
 
 /*
  * Get the FDW tuplestore for the current trigger query level, creating it
@@ -5087,6 +5097,7 @@ AfterTriggerBeginXact(void)
 	 */
 	afterTriggers.firing_counter = (CommandId) 1;	/* mustn't be 0 */
 	afterTriggers.query_depth = -1;
+	afterTriggers.batch_callbacks = NIL;
 
 	/*
 	 * Verify that there is no leftover state remaining.  If these assertions
@@ -5208,6 +5219,8 @@ AfterTriggerEndQuery(EState *estate)
 			break;
 	}
 
+	FireAfterTriggerBatchCallbacks();
+
 	/* Release query-level-local storage, including tuplestores if any */
 	AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
 
@@ -5315,6 +5328,8 @@ AfterTriggerFireDeferred(void)
 			break;				/* all fired */
 	}
 
+	FireAfterTriggerBatchCallbacks();
+
 	/*
 	 * We don't bother freeing the event list, since it will go away anyway
 	 * (and more efficiently than via pfree) in AfterTriggerEndXact.
@@ -6057,6 +6072,8 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
 				break;			/* all fired */
 		}
 
+		FireAfterTriggerBatchCallbacks();
+
 		if (snapshot_set)
 			PopActiveSnapshot();
 	}
@@ -6753,3 +6770,70 @@ check_modified_virtual_generated(TupleDesc tupdesc, HeapTuple tuple)
 
 	return tuple;
 }
+
+/*
+ * RegisterAfterTriggerBatchCallback
+ *		Register a function to be called when the current trigger-firing
+ *		batch completes.
+ *
+ * Must be called from within a trigger function's execution context
+ * (i.e., while afterTriggers state is active).
+ *
+ * The callback list is cleared after invocation, so the caller must
+ * re-register for each new batch if needed.
+ */
+void
+RegisterAfterTriggerBatchCallback(AfterTriggerBatchCallback callback,
+								  void *arg)
+{
+	AfterTriggerCallbackItem *item;
+	MemoryContext oldcxt;
+
+	/*
+	 * Allocate in TopTransactionContext so the item survives for the duration
+	 * of the batch, which may span multiple trigger invocations.
+	 */
+	oldcxt = MemoryContextSwitchTo(TopTransactionContext);
+	item = palloc(sizeof(AfterTriggerCallbackItem));
+	item->callback = callback;
+	item->arg = arg;
+	afterTriggers.batch_callbacks =
+		lappend(afterTriggers.batch_callbacks, item);
+	MemoryContextSwitchTo(oldcxt);
+}
+
+/*
+ * FireAfterTriggerBatchCallbacks
+ *		Invoke and clear all registered batch callbacks.
+ *
+ * Called at the end of each trigger-firing batch.
+ */
+static void
+FireAfterTriggerBatchCallbacks(void)
+{
+	ListCell   *lc;
+
+	foreach(lc, afterTriggers.batch_callbacks)
+	{
+		AfterTriggerCallbackItem *item = lfirst(lc);
+
+		item->callback(item->arg);
+	}
+
+	list_free_deep(afterTriggers.batch_callbacks);
+	afterTriggers.batch_callbacks = NIL;
+}
+
+/*
+ * AfterTriggerBatchIsActive
+ *		Returns true if we're inside a query-level trigger batch where
+ *		registered batch callbacks will actually be invoked.
+ *
+ * This is false during validateForeignKeyConstraint(), which calls
+ * RI trigger functions directly outside the after-trigger framework.
+ */
+bool
+AfterTriggerBatchIsActive(void)
+{
+	return afterTriggers.query_depth >= 0;
+}
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 45cc742fa19..2dbb33f436d 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -190,6 +190,23 @@ typedef struct RI_CompareHashEntry
 	FmgrInfo	cast_func_finfo;	/* in case we must coerce input */
 } RI_CompareHashEntry;
 
+/*
+ * RI_FastPathEntry
+ *		Per-constraint cache of resources needed by ri_FastPathCheck().
+ *
+ * One entry per constraint, keyed by pg_constraint OID.  Created lazily
+ * by ri_FastPathGetEntry() on first use within a trigger-firing batch
+ * and torn down by ri_FastPathCleanup() at batch end.
+ */
+typedef struct RI_FastPathEntry
+{
+	Oid			conoid;			/* hash key: pg_constraint OID */
+	Relation	pk_rel;
+	Relation	idx_rel;
+	IndexScanDesc scandesc;
+	TupleTableSlot *slot;
+	Snapshot	snapshot;		/* registered snapshot for the scan */
+} RI_FastPathEntry;
 
 /*
  * Local data
@@ -199,6 +216,8 @@ static HTAB *ri_query_cache = NULL;
 static HTAB *ri_compare_cache = NULL;
 static dclist_head ri_constraint_cache_valid_list;
 
+static HTAB *ri_fastpath_cache = NULL;
+static bool ri_fastpath_callback_registered = false;
 
 /*
  * Local function prototypes
@@ -267,6 +286,8 @@ pg_noreturn static void ri_ReportViolation(const RI_ConstraintInfo *riinfo,
 										   Relation pk_rel, Relation fk_rel,
 										   TupleTableSlot *violatorslot, TupleDesc tupdesc,
 										   int queryno, bool is_restrict, bool partgone);
+static RI_FastPathEntry *ri_FastPathGetEntry(const RI_ConstraintInfo *riinfo);
+static void ri_FastPathCleanup(void *arg);
 
 
 /*
@@ -2696,6 +2717,19 @@ ri_FastPathCheck(const RI_ConstraintInfo *riinfo,
 	Oid			saved_userid;
 	int			saved_sec_context;
 	Snapshot	snapshot;
+	bool		use_cache;
+	RI_FastPathEntry *fpentry = NULL;
+
+	/*
+	 * Use the per-batch cache only if we're inside the after-trigger
+	 * framework, where our cleanup callback will fire.  During ALTER TABLE
+	 * ... ADD FOREIGN KEY validation, triggers are called directly and the
+	 * callback would never run, leaking resources.
+	 */
+	use_cache = AfterTriggerBatchIsActive();
+
+	if (use_cache)
+		fpentry = ri_FastPathGetEntry(riinfo);
 
 	/*
 	 * Advance the command counter so the snapshot sees the effects of prior
@@ -2703,15 +2737,40 @@ ri_FastPathCheck(const RI_ConstraintInfo *riinfo,
 	 * ri_PerformCheck().
 	 */
 	CommandCounterIncrement();
-	snapshot = RegisterSnapshot(GetLatestSnapshot());
-
-	pk_rel = table_open(riinfo->pk_relid, RowShareLock);
-	idx_rel = index_open(riinfo->conindid, AccessShareLock);
+	if (use_cache)
+	{
+		/*
+		 * The snapshot was registered once when the cache entry was created.
+		 * We just patch curcid to reflect the new command counter.
+		 * SnapshotSetCommandId() only patches process-global statics, not
+		 * registered copies, so we do it directly.
+		 *
+		 * The xmin/xmax/xip fields don't need refreshing: within a single
+		 * statement batch, only curcid changes between rows.
+		 */
+		Assert(fpentry && fpentry->snapshot != NULL);
+		snapshot = fpentry->snapshot;
+		snapshot->curcid = GetCurrentCommandId(false);
+	}
+	else
+		snapshot = RegisterSnapshot(GetLatestSnapshot());
 
-	slot = table_slot_create(pk_rel, NULL);
-	scandesc = index_beginscan(pk_rel, idx_rel,
-							   snapshot, NULL,
-							   riinfo->nkeys, 0);
+	if (use_cache)
+	{
+		pk_rel = fpentry->pk_rel;
+		idx_rel = fpentry->idx_rel;
+		scandesc = fpentry->scandesc;
+		slot = fpentry->slot;
+	}
+	else
+	{
+		pk_rel = table_open(riinfo->pk_relid, RowShareLock);
+		idx_rel = index_open(riinfo->conindid, AccessShareLock);
+		scandesc = index_beginscan(pk_rel, idx_rel,
+								   snapshot, NULL,
+								   riinfo->nkeys, 0);
+		slot = table_slot_create(pk_rel, NULL);
+	}
 
 	if (!riinfo->fpmeta_valid)
 		ri_populate_fastpath_metadata((RI_ConstraintInfo *) riinfo,
@@ -2782,12 +2841,15 @@ ri_FastPathCheck(const RI_ConstraintInfo *riinfo,
 		ExecDropSingleTupleTableSlot(xact_slot);
 	}
 
-	index_endscan(scandesc);
-	index_close(idx_rel, NoLock);
-	table_close(pk_rel, NoLock);
-	ExecDropSingleTupleTableSlot(slot);
-
-	UnregisterSnapshot(snapshot);
+	/* Non-cached path: clean up per-invocation resources */
+	if (!use_cache)
+	{
+		index_endscan(scandesc);
+		index_close(idx_rel, NoLock);
+		table_close(pk_rel, NoLock);
+		ExecDropSingleTupleTableSlot(slot);
+		UnregisterSnapshot(snapshot);
+	}
 
 	SetUserIdAndSecContext(saved_userid, saved_sec_context);
 
@@ -3673,3 +3735,166 @@ RI_FKey_trigger_type(Oid tgfoid)
 
 	return RI_TRIGGER_NONE;
 }
+
+/*
+ * ri_FastPathCleanup
+ *		Tear down all cached fast-path state.
+ *
+ * Called as an AfterTriggerBatchCallback at end of batch.
+ */
+static void
+ri_FastPathCleanup(void *arg)
+{
+	HASH_SEQ_STATUS status;
+	RI_FastPathEntry *entry;
+
+	if (ri_fastpath_cache == NULL)
+		return;
+
+	hash_seq_init(&status, ri_fastpath_cache);
+	while ((entry = hash_seq_search(&status)) != NULL)
+	{
+		if (entry->scandesc)
+			index_endscan(entry->scandesc);
+		if (entry->idx_rel)
+			index_close(entry->idx_rel, NoLock);
+		if (entry->pk_rel)
+			table_close(entry->pk_rel, NoLock);
+		if (entry->slot)
+			ExecDropSingleTupleTableSlot(entry->slot);
+		if (entry->snapshot)
+			UnregisterSnapshot(entry->snapshot);
+	}
+
+	hash_destroy(ri_fastpath_cache);
+	ri_fastpath_cache = NULL;
+	ri_fastpath_callback_registered = false;
+}
+
+static bool ri_fastpath_xact_callback_registered = false;
+
+static void
+ri_FastPathXactCallback(XactEvent event, void *arg)
+{
+	/*
+	 * TopTransactionContext is destroyed at end of transaction, taking the
+	 * hash table and all cached resources with it.  Just reset our static
+	 * pointers so we don't dereference freed memory.
+	 *
+	 * In the normal (non-error) path, ri_FastPathCleanup already ran via the
+	 * batch callback and did orderly teardown.  Here we're just handling the
+	 * abort path where that callback never fired.
+	 */
+	ri_fastpath_cache = NULL;
+	ri_fastpath_callback_registered = false;
+}
+
+static void
+ri_FastPathSubXactCallback(SubXactEvent event, SubTransactionId mySubid,
+						   SubTransactionId parentSubid, void *arg)
+{
+	if (event == SUBXACT_EVENT_ABORT_SUB)
+	{
+		/*
+		 * ResourceOwner already cleaned up relations, scans, and snapshots.
+		 * Just NULL our pointers so the still-registered batch callback
+		 * becomes a no-op.  The hash table memory in TopTransactionContext
+		 * will be freed at transaction end.
+		 */
+		ri_fastpath_cache = NULL;
+		ri_fastpath_callback_registered = false;
+	}
+}
+
+/*
+ * ri_FastPathGetEntry
+ *		Look up or create a per-batch cache entry for the given constraint.
+ *
+ * On first call for a constraint within a batch: opens pk_rel and the
+ * index, begins an index scan, allocates a result slot, and registers
+ * the cleanup callback.
+ *
+ * On subsequent calls: returns the existing entry.  Caller uses
+ * index_rescan() with new keys.
+ */
+static RI_FastPathEntry *
+ri_FastPathGetEntry(const RI_ConstraintInfo *riinfo)
+{
+	RI_FastPathEntry *entry;
+	bool		found;
+
+	/* Create hash table on first use in this batch */
+	if (ri_fastpath_cache == NULL)
+	{
+		HASHCTL		ctl;
+
+		if (!ri_fastpath_xact_callback_registered)
+		{
+			RegisterXactCallback(ri_FastPathXactCallback, NULL);
+			RegisterSubXactCallback(ri_FastPathSubXactCallback, NULL);
+			ri_fastpath_xact_callback_registered = true;
+		}
+
+		ctl.keysize = sizeof(Oid);
+		ctl.entrysize = sizeof(RI_FastPathEntry);
+		ctl.hcxt = TopTransactionContext;
+		ri_fastpath_cache = hash_create("RI fast-path cache",
+										16,
+										&ctl,
+										HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	}
+
+	entry = hash_search(ri_fastpath_cache, &riinfo->constraint_id,
+						HASH_ENTER, &found);
+
+	if (!found)
+	{
+		MemoryContext oldcxt;
+
+		/*
+		 * Zero out non-key fields so ri_FastPathCleanup is safe if we error
+		 * out during partial initialization below.
+		 */
+		memset(((char *) entry) + sizeof(Oid), 0,
+			   sizeof(RI_FastPathEntry) - sizeof(Oid));
+
+		oldcxt = MemoryContextSwitchTo(TopTransactionContext);
+
+		/*
+		 * Open PK table and its unique index.
+		 *
+		 * RowShareLock on pk_rel matches what the SPI path's SELECT ... FOR
+		 * KEY SHARE would acquire as a relation-level lock. AccessShareLock
+		 * on the index is standard for index scans.
+		 *
+		 * We don't release these locks until end of transaction, matching SPI
+		 * behavior.
+		 */
+		entry->pk_rel = table_open(riinfo->pk_relid, RowShareLock);
+		entry->idx_rel = index_open(riinfo->conindid, AccessShareLock);
+
+		/*
+		 * Register an initial snapshot.  Its curcid will be patched in place
+		 * on each subsequent row (see ri_FastPathCheck()), avoiding per-row
+		 * GetSnapshotData() overhead.
+		 */
+		entry->snapshot = RegisterSnapshot(GetLatestSnapshot());
+
+		entry->slot = table_slot_create(entry->pk_rel, NULL);
+
+		entry->scandesc = index_beginscan(entry->pk_rel, entry->idx_rel,
+										  entry->snapshot, NULL,
+										  riinfo->nkeys, 0);
+
+		MemoryContextSwitchTo(oldcxt);
+
+		/* Ensure cleanup at end of this trigger-firing batch */
+		if (!ri_fastpath_callback_registered)
+		{
+			RegisterAfterTriggerBatchCallback(ri_FastPathCleanup, NULL);
+			ri_fastpath_callback_registered = true;
+		}
+	}
+
+	return entry;
+}
diff --git a/src/include/commands/trigger.h b/src/include/commands/trigger.h
index 556c86bf5e1..4304abffc8d 100644
--- a/src/include/commands/trigger.h
+++ b/src/include/commands/trigger.h
@@ -289,4 +289,22 @@ extern void RI_PartitionRemove_Check(Trigger *trigger, Relation fk_rel,
 
 extern int	RI_FKey_trigger_type(Oid tgfoid);
 
+/*
+ * Callback type for end-of-trigger-batch notifications.
+ *
+ * Registered via RegisterAfterTriggerBatchCallback().  Invoked when
+ * a batch of after-trigger processing completes:
+ *	- AfterTriggerEndQuery()      (immediate constraints)
+ *	- AfterTriggerFireDeferred()  (deferred constraints at COMMIT)
+ *	- AfterTriggerSetState()      (SET CONSTRAINTS IMMEDIATE)
+ *
+ * The callback list is cleared after each batch.  Callers must
+ * re-register if they need to be called again in a subsequent batch.
+ */
+typedef void (*AfterTriggerBatchCallback) (void *arg);
+
+extern void RegisterAfterTriggerBatchCallback(AfterTriggerBatchCallback callback,
+											  void *arg);
+extern bool AfterTriggerBatchIsActive(void);
+
 #endif							/* TRIGGER_H */
diff --git a/src/test/regress/expected/foreign_key.out b/src/test/regress/expected/foreign_key.out
index 0826f518004..db831ddfc5c 100644
--- a/src/test/regress/expected/foreign_key.out
+++ b/src/test/regress/expected/foreign_key.out
@@ -3504,3 +3504,50 @@ DETAIL:  drop cascades to table fkpart13_t1
 drop cascades to table fkpart13_t2
 drop cascades to table fkpart13_t3
 RESET search_path;
+-- Tests foreign key check fast-path no-cache path.
+CREATE TABLE fp_pk_alter (a int PRIMARY KEY);
+INSERT INTO fp_pk_alter SELECT generate_series(1, 100);
+CREATE TABLE fp_fk_alter (a int);
+INSERT INTO fp_fk_alter SELECT generate_series(1, 100);
+-- Validation path: should succeed
+ALTER TABLE fp_fk_alter ADD FOREIGN KEY (a) REFERENCES fp_pk_alter;
+INSERT INTO fp_fk_alter VALUES (101);  -- should fail (constraint active)
+ERROR:  insert or update on table "fp_fk_alter" violates foreign key constraint "fp_fk_alter_a_fkey"
+DETAIL:  Key (a)=(101) is not present in table "fp_pk_alter".
+DROP TABLE fp_fk_alter, fp_pk_alter;
+-- Separate test: validation catches existing violation
+CREATE TABLE fp_pk_alter2 (a int PRIMARY KEY);
+INSERT INTO fp_pk_alter2 VALUES (1);
+CREATE TABLE fp_fk_alter2 (a int);
+INSERT INTO fp_fk_alter2 VALUES (1), (200);  -- 200 has no PK match
+ALTER TABLE fp_fk_alter2 ADD FOREIGN KEY (a) REFERENCES fp_pk_alter2;  -- should fail
+ERROR:  insert or update on table "fp_fk_alter2" violates foreign key constraint "fp_fk_alter2_a_fkey"
+DETAIL:  Key (a)=(200) is not present in table "fp_pk_alter2".
+DROP TABLE fp_fk_alter2, fp_pk_alter2;
+-- Tests that the fast-path handles caching for multiple constraints
+CREATE TABLE fp_pk1 (a int PRIMARY KEY);
+CREATE TABLE fp_pk2 (b int PRIMARY KEY);
+INSERT INTO fp_pk1 VALUES (1);
+INSERT INTO fp_pk2 VALUES (1);
+CREATE TABLE fp_multi_fk (
+    a int REFERENCES fp_pk1,
+    b int REFERENCES fp_pk2
+);
+INSERT INTO fp_multi_fk VALUES (1, 1);  -- two constraints, one batch
+INSERT INTO fp_multi_fk VALUES (1, 2);  -- second constraint fails
+ERROR:  insert or update on table "fp_multi_fk" violates foreign key constraint "fp_multi_fk_b_fkey"
+DETAIL:  Key (b)=(2) is not present in table "fp_pk2".
+DROP TABLE fp_multi_fk, fp_pk1, fp_pk2;
+-- Test that fast-path cache handles deferred constraints and SET CONSTRAINTS IMMEDIATE
+CREATE TABLE fp_pk_defer (a int PRIMARY KEY);
+CREATE TABLE fp_fk_defer (a int REFERENCES fp_pk_defer DEFERRABLE INITIALLY DEFERRED);
+INSERT INTO fp_pk_defer VALUES (1), (2);
+BEGIN;
+INSERT INTO fp_fk_defer VALUES (1);
+INSERT INTO fp_fk_defer VALUES (2);
+SET CONSTRAINTS ALL IMMEDIATE;  -- fires batch callback here
+INSERT INTO fp_fk_defer VALUES (3);  -- should fail, also tests that cache was cleaned up
+ERROR:  insert or update on table "fp_fk_defer" violates foreign key constraint "fp_fk_defer_a_fkey"
+DETAIL:  Key (a)=(3) is not present in table "fp_pk_defer".
+COMMIT;
+DROP TABLE fp_pk_defer, fp_fk_defer;
diff --git a/src/test/regress/sql/foreign_key.sql b/src/test/regress/sql/foreign_key.sql
index e9ee29331cb..0762caa3682 100644
--- a/src/test/regress/sql/foreign_key.sql
+++ b/src/test/regress/sql/foreign_key.sql
@@ -2498,3 +2498,47 @@ WITH cte AS (
 
 DROP SCHEMA fkpart13 CASCADE;
 RESET search_path;
+
+-- Tests foreign key check fast-path no-cache path.
+CREATE TABLE fp_pk_alter (a int PRIMARY KEY);
+INSERT INTO fp_pk_alter SELECT generate_series(1, 100);
+CREATE TABLE fp_fk_alter (a int);
+INSERT INTO fp_fk_alter SELECT generate_series(1, 100);
+-- Validation path: should succeed
+ALTER TABLE fp_fk_alter ADD FOREIGN KEY (a) REFERENCES fp_pk_alter;
+INSERT INTO fp_fk_alter VALUES (101);  -- should fail (constraint active)
+DROP TABLE fp_fk_alter, fp_pk_alter;
+
+-- Separate test: validation catches existing violation
+CREATE TABLE fp_pk_alter2 (a int PRIMARY KEY);
+INSERT INTO fp_pk_alter2 VALUES (1);
+CREATE TABLE fp_fk_alter2 (a int);
+INSERT INTO fp_fk_alter2 VALUES (1), (200);  -- 200 has no PK match
+ALTER TABLE fp_fk_alter2 ADD FOREIGN KEY (a) REFERENCES fp_pk_alter2;  -- should fail
+DROP TABLE fp_fk_alter2, fp_pk_alter2;
+
+-- Tests that the fast-path handles caching for multiple constraints
+CREATE TABLE fp_pk1 (a int PRIMARY KEY);
+CREATE TABLE fp_pk2 (b int PRIMARY KEY);
+INSERT INTO fp_pk1 VALUES (1);
+INSERT INTO fp_pk2 VALUES (1);
+CREATE TABLE fp_multi_fk (
+    a int REFERENCES fp_pk1,
+    b int REFERENCES fp_pk2
+);
+INSERT INTO fp_multi_fk VALUES (1, 1);  -- two constraints, one batch
+INSERT INTO fp_multi_fk VALUES (1, 2);  -- second constraint fails
+DROP TABLE fp_multi_fk, fp_pk1, fp_pk2;
+
+-- Test that fast-path cache handles deferred constraints and SET CONSTRAINTS IMMEDIATE
+CREATE TABLE fp_pk_defer (a int PRIMARY KEY);
+CREATE TABLE fp_fk_defer (a int REFERENCES fp_pk_defer DEFERRABLE INITIALLY DEFERRED);
+INSERT INTO fp_pk_defer VALUES (1), (2);
+
+BEGIN;
+INSERT INTO fp_fk_defer VALUES (1);
+INSERT INTO fp_fk_defer VALUES (2);
+SET CONSTRAINTS ALL IMMEDIATE;  -- fires batch callback here
+INSERT INTO fp_fk_defer VALUES (3);  -- should fail, also tests that cache was cleaned up
+COMMIT;
+DROP TABLE fp_pk_defer, fp_fk_defer;
diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list
index 241945734ec..c8388ea33d3 100644
--- a/src/tools/pgindent/typedefs.list
+++ b/src/tools/pgindent/typedefs.list
@@ -30,6 +30,8 @@ AddForeignUpdateTargets_function
 AddrInfo
 AffixNode
 AffixNodeData
+AfterTriggerBatchCallback
+AfterTriggerCallbackItem
 AfterTriggerEvent
 AfterTriggerEventChunk
 AfterTriggerEventData
@@ -2446,6 +2448,7 @@ RIX
 RI_CompareHashEntry
 RI_CompareKey
 RI_ConstraintInfo
+RI_FastPathEntry
 RI_QueryHashEntry
 RI_QueryKey
 RTEKind
-- 
2.47.3

