From 50c3d20b8257994a4c9548267142d2ac794657df Mon Sep 17 00:00:00 2001
From: Hou Zhijie <houzj.fnst@cn.fujitsu.com>
Date: Tue, 25 Mar 2025 12:58:34 +0800
Subject: [PATCH v2] Fix crashes in logical replication during unique
 constraint violations on leaf partitions.

This commit addresses a logical replication crash introduced after commit
9ff6867, which occurs when incoming INSERT or UPDATE operations violate unique
constraints on leaf partitions.

The issue arises because the unique key conflict detection depended on the last
ExecOpenIndices call to construct necessary index information, where the
speculative parameter was set to true. However, this index opening was
redundant because the indexes were already opened during target partition
identification via the parent table (e.g., through ExecFindPartition) and hence
was removed in 9ff6867. Unfortunately, ExecFindPartition opens indexes without
constructing index information required for conflict detection, leading to the
crash.

While one solution could be to adjust ExecFindPartition to build this necessary
index information by adding a function parameter, this commit opts for a more
efficient approach: delaying index information initialization until a potential
conflict is detected. This avoids unnecessary index information construction
when there is no conflict.
---
 src/backend/executor/execIndexing.c      |  5 ++--
 src/backend/executor/execReplication.c   | 31 ++++++++++++++++++++
 src/backend/replication/logical/worker.c |  4 +--
 src/test/subscription/t/035_conflicts.pl | 37 +++++++++++++++++++++++-
 4 files changed, 71 insertions(+), 6 deletions(-)

diff --git a/src/backend/executor/execIndexing.c b/src/backend/executor/execIndexing.c
index e3fe9b78bb5..bdf862b2406 100644
--- a/src/backend/executor/execIndexing.c
+++ b/src/backend/executor/execIndexing.c
@@ -214,9 +214,8 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative)
 		ii = BuildIndexInfo(indexDesc);
 
 		/*
-		 * If the indexes are to be used for speculative insertion or conflict
-		 * detection in logical replication, add extra information required by
-		 * unique index entries.
+		 * If the indexes are to be used for speculative insertion, add extra
+		 * information required by unique index entries.
 		 */
 		if (speculative && ii->ii_Unique && !indexDesc->rd_index->indisexclusion)
 			BuildSpeculativeIndexInfo(indexDesc, ii);
diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c
index ede89ea3cf9..c9b2aedbeef 100644
--- a/src/backend/executor/execReplication.c
+++ b/src/backend/executor/execReplication.c
@@ -431,6 +431,30 @@ retry:
 	return found;
 }
 
+/*
+ * Build additional index information necessary for conflict detection.
+ */
+static void
+BuildConflictIndexInfo(ResultRelInfo *resultRelInfo, Oid conflictindex)
+{
+	for (int i = 0; i < resultRelInfo->ri_NumIndices; i++)
+	{
+		Relation	indexRelation = resultRelInfo->ri_IndexRelationDescs[i];
+		IndexInfo  *indexRelationInfo = resultRelInfo->ri_IndexRelationInfo[i];
+
+		if (conflictindex != RelationGetRelid(indexRelation))
+			continue;
+
+		/*
+		 * This Assert will fail if BuildSpeculativeIndexInfo() is called
+		 * twice for the given index.
+		 */
+		Assert(indexRelationInfo->ii_UniqueOps == NULL);
+
+		BuildSpeculativeIndexInfo(indexRelation, indexRelationInfo);
+	}
+}
+
 /*
  * Find the tuple that violates the passed unique index (conflictindex).
  *
@@ -452,6 +476,13 @@ FindConflictTuple(ResultRelInfo *resultRelInfo, EState *estate,
 
 	*conflictslot = NULL;
 
+	/*
+	 * Build additional information required to detect constraints violations.
+	 * Refer to check_exclusion_or_unique_constraint() for details on how this
+	 * information is used.
+	 */
+	BuildConflictIndexInfo(resultRelInfo, conflictindex);
+
 retry:
 	if (ExecCheckIndexConstraints(resultRelInfo, slot, estate,
 								  &conflictTid, &slot->tts_tid,
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index e3b2b144942..5ce596f4576 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -2457,7 +2457,7 @@ apply_handle_insert(StringInfo s)
 	{
 		ResultRelInfo *relinfo = edata->targetRelInfo;
 
-		ExecOpenIndices(relinfo, true);
+		ExecOpenIndices(relinfo, false);
 		apply_handle_insert_internal(edata, relinfo, remoteslot);
 		ExecCloseIndices(relinfo);
 	}
@@ -2680,7 +2680,7 @@ apply_handle_update_internal(ApplyExecutionData *edata,
 	MemoryContext oldctx;
 
 	EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1, NIL);
-	ExecOpenIndices(relinfo, true);
+	ExecOpenIndices(relinfo, false);
 
 	found = FindReplTupleInLocalRel(edata, localrel,
 									&relmapentry->remoterel,
diff --git a/src/test/subscription/t/035_conflicts.pl b/src/test/subscription/t/035_conflicts.pl
index 3a4d44e1d0e..2a7a8239a29 100644
--- a/src/test/subscription/t/035_conflicts.pl
+++ b/src/test/subscription/t/035_conflicts.pl
@@ -25,14 +25,23 @@ $node_subscriber->start;
 $node_publisher->safe_psql('postgres',
 	"CREATE TABLE conf_tab (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);");
 
+$node_publisher->safe_psql('postgres',
+	"CREATE TABLE conf_tab_2 (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);");
+
 # Create same table on subscriber
 $node_subscriber->safe_psql('postgres',
 	"CREATE TABLE conf_tab (a int PRIMARY key, b int UNIQUE, c int UNIQUE);");
 
+$node_subscriber->safe_psql(
+	'postgres', qq[
+	 CREATE TABLE conf_tab_2 (a int PRIMARY KEY, b int, c int, unique(a,b)) PARTITION BY RANGE (a);
+	 CREATE TABLE conf_tab_2_p1 PARTITION OF conf_tab_2 FOR VALUES FROM (MINVALUE) TO (100);
+]);
+
 # Setup logical replication
 my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
 $node_publisher->safe_psql('postgres',
-	"CREATE PUBLICATION pub_tab FOR TABLE conf_tab");
+	"CREATE PUBLICATION pub_tab FOR TABLE conf_tab, conf_tab_2");
 
 # Create the subscription
 my $appname = 'sub_tab';
@@ -110,4 +119,30 @@ $node_subscriber->wait_for_log(
 
 pass('multiple_unique_conflicts detected during update');
 
+# Truncate table to get rid of the error
+$node_subscriber->safe_psql('postgres', "TRUNCATE conf_tab;");
+
+
+##################################################
+# Test multiple_unique_conflicts due to INSERT on a leaf partition
+##################################################
+
+# Insert data in the subscriber table
+$node_subscriber->safe_psql('postgres',
+	"INSERT INTO conf_tab_2 VALUES (55,2,3);");
+
+# Insert data in the publisher table
+$node_publisher->safe_psql('postgres',
+	"INSERT INTO conf_tab_2 VALUES (55,2,3);");
+
+$node_subscriber->wait_for_log(
+	qr/conflict detected on relation \"public.conf_tab_2_p1\": conflict=multiple_unique_conflicts.*
+.*Key already exists in unique index \"conf_tab_2_p1_pkey\".*
+.*Key \(a\)=\(55\); existing local tuple \(55, 2, 3\); remote tuple \(55, 2, 3\).*
+.*Key already exists in unique index \"conf_tab_2_p1_a_b_key\".*
+.*Key \(a, b\)=\(55, 2\); existing local tuple \(55, 2, 3\); remote tuple \(55, 2, 3\)./,
+	$log_offset);
+
+pass('multiple_unique_conflicts detected on a leaf partition during insert');
+
 done_testing();
-- 
2.31.1

