From 4793759ffa5aa1a31bca8e19d8aab863adbbd9c4 Mon Sep 17 00:00:00 2001
From: Mikhail Nikalayeu <mihailnikalayeu@gmail.com>
Date: Mon, 16 Jun 2025 22:20:38 +0200
Subject: [PATCH v8 2/2] Fix btree index scan concurrency issues with dirty 
 snapshots

This patch addresses an issue where non-MVCC index scans using SnapshotDirty
or SnapshotSelf could miss tuples due to concurrent modifications. The fix
retains read locks on pages for these special snapshot types until the scan
is done with the page's tuples, preventing concurrent modifications from
causing inconsistent results.

Updated README to document this special case in the btree locking mechanism.
---
 src/backend/access/nbtree/README      | 13 ++++++++++++-
 src/backend/access/nbtree/nbtree.c    | 19 ++++++++++++++++++-
 src/backend/access/nbtree/nbtsearch.c | 16 ++++++++++++----
 src/backend/access/nbtree/nbtutils.c  |  4 +++-
 src/include/access/nbtree.h           |  1 +
 5 files changed, 46 insertions(+), 7 deletions(-)

diff --git a/src/backend/access/nbtree/README b/src/backend/access/nbtree/README
index 53d4a61dc3f..a9280415633 100644
--- a/src/backend/access/nbtree/README
+++ b/src/backend/access/nbtree/README
@@ -85,7 +85,8 @@ move right until we find a page whose right-link matches the page we
 came from.  (Actually, it's even harder than that; see page deletion
 discussion below.)
 
-Page read locks are held only for as long as a scan is examining a page.
+Page read locks are held only for as long as a scan is examining a page
+(with exception for SnapshotDirty and SnapshotSelf scans - see below).
 To minimize lock/unlock traffic, an index scan always searches a leaf page
 to identify all the matching items at once, copying their heap tuple IDs
 into backend-local storage.  The heap tuple IDs are then processed while
@@ -103,6 +104,16 @@ We also remember the left-link, and follow it when the scan moves backwards
 (though this requires extra handling to account for concurrent splits of
 the left sibling; see detailed move-left algorithm below).
 
+Despite the described mechanics in place, inconsistent results may still occur
+during non-MVCC scans (SnapshotDirty and SnapshotSelf). This issue can occur if a 
+concurrent transaction deletes a tuple and inserts a new tuple with a new TID in the 
+same page. If the scan has already visited the page and cached its content in the
+backend-local storage, it might skip the old tuple due to deletion and miss the new 
+tuple because the scan does not re-read the page. To address this issue, for 
+SnapshotDirty and SnapshotSelf scans, we retain the read lock on the page until 
+we're completely done processing all the tuples from that page, preventing 
+concurrent modifications that could lead to inconsistent results.
+
 In most cases we release our lock and pin on a page before attempting
 to acquire pin and lock on the page we are moving to.  In a few places
 it is necessary to lock the next page before releasing the current one.
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index fdff960c130..bda2b821a51 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -393,10 +393,22 @@ btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
 		/* Before leaving current page, deal with any killed items */
 		if (so->numKilled > 0)
 			_bt_killitems(scan);
+		else if (!so->dropLock) /* _bt_killitems always releases lock */
+			_bt_unlockbuf(scan->indexRelation, so->currPos.buf);
 		BTScanPosUnpinIfPinned(so->currPos);
 		BTScanPosInvalidate(so->currPos);
 	}
 
+	/*
+	 * For SnapshotDirty and SnapshotSelf scans, we don't unlock the buffer
+	 * and keep the lock should be until we're completely done with this page.
+	 * This prevents concurrent modifications from causing inconsistent
+	 * results during non-MVCC scans.
+	 *
+	 * See nbtree/README for information about SnapshotDirty and SnapshotSelf.
+	 */
+	so->dropLock = scan->xs_snapshot->snapshot_type != SNAPSHOT_DIRTY
+					&& scan->xs_snapshot->snapshot_type != SNAPSHOT_SELF;
 	/*
 	 * We prefer to eagerly drop leaf page pins before btgettuple returns.
 	 * This avoids making VACUUM wait to acquire a cleanup lock on the page.
@@ -420,7 +432,8 @@ btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
 	 *
 	 * Note: so->dropPin should never change across rescans.
 	 */
-	so->dropPin = (!scan->xs_want_itup &&
+	so->dropPin = (so->dropLock &&
+				   !scan->xs_want_itup &&
 				   IsMVCCSnapshot(scan->xs_snapshot) &&
 				   RelationNeedsWAL(scan->indexRelation) &&
 				   scan->heapRelation != NULL);
@@ -477,6 +490,8 @@ btendscan(IndexScanDesc scan)
 		/* Before leaving current page, deal with any killed items */
 		if (so->numKilled > 0)
 			_bt_killitems(scan);
+		else if (!so->dropLock) /* _bt_killitems always releases lock */
+			_bt_unlockbuf(scan->indexRelation, so->currPos.buf);
 		BTScanPosUnpinIfPinned(so->currPos);
 	}
 
@@ -557,6 +572,8 @@ btrestrpos(IndexScanDesc scan)
 			/* Before leaving current page, deal with any killed items */
 			if (so->numKilled > 0)
 				_bt_killitems(scan);
+			else if (!so->dropLock) /* _bt_killitems always releases lock */
+				_bt_unlockbuf(scan->indexRelation, so->currPos.buf);
 			BTScanPosUnpinIfPinned(so->currPos);
 		}
 
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index 36544ecfd58..04a7485c643 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -57,12 +57,14 @@ static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
 /*
  *	_bt_drop_lock_and_maybe_pin()
  *
- * Unlock so->currPos.buf.  If scan is so->dropPin, drop the pin, too.
+ * Unlock so->currPos.buf if so->dropLock. If scan is so->dropPin, drop the pin, too.
  * Dropping the pin prevents VACUUM from blocking on acquiring a cleanup lock.
  */
 static inline void
 _bt_drop_lock_and_maybe_pin(Relation rel, BTScanOpaque so)
 {
+	if (!so->dropLock)
+		return;
 	if (!so->dropPin)
 	{
 		/* Just drop the lock (not the pin) */
@@ -1532,7 +1534,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
  *	_bt_next() -- Get the next item in a scan.
  *
  *		On entry, so->currPos describes the current page, which may be pinned
- *		but is not locked, and so->currPos.itemIndex identifies which item was
+ *		but is not locked (except for SnapshotDirty and SnapshotSelf scans, where
+ *		the page remains locked), and so->currPos.itemIndex identifies which item was
  *		previously returned.
  *
  *		On success exit, so->currPos is updated as needed, and _bt_returnitem
@@ -2111,7 +2114,9 @@ _bt_returnitem(IndexScanDesc scan, BTScanOpaque so)
  * Wrapper on _bt_readnextpage that performs final steps for the current page.
  *
  * On entry, so->currPos must be valid.  Its buffer will be pinned, though
- * never locked. (Actually, when so->dropPin there won't even be a pin held,
+ * never locked, except for SnapshotDirty and SnapshotSelf scans where the buffer
+ * remains locked until we're done with all tuples from the page
+ * (Actually, when so->dropPin there won't even be a pin held,
  * though so->currPos.currPage must still be set to a valid block number.)
  */
 static bool
@@ -2126,6 +2131,8 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir)
 	/* Before leaving current page, deal with any killed items */
 	if (so->numKilled > 0)
 		_bt_killitems(scan);
+	else if (!so->dropLock) /* _bt_killitems always releases lock */
+		_bt_unlockbuf(scan->indexRelation, so->currPos.buf);
 
 	/*
 	 * Before we modify currPos, make a copy of the page data if there was a
@@ -2265,7 +2272,8 @@ _bt_readfirstpage(IndexScanDesc scan, OffsetNumber offnum, ScanDirection dir)
 	}
 
 	/* There's no actually-matching data on the page in so->currPos.buf */
-	_bt_unlockbuf(scan->indexRelation, so->currPos.buf);
+	if (so->dropLock)
+		_bt_unlockbuf(scan->indexRelation, so->currPos.buf);
 
 	/* Call _bt_readnextpage using its _bt_steppage wrapper function */
 	if (!_bt_steppage(scan, dir))
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index c71d1b6f2e1..33215c89dde 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -3379,8 +3379,10 @@ _bt_killitems(IndexScanDesc scan)
 		 * concurrent VACUUMs from recycling any of the TIDs on the page.
 		 */
 		Assert(BTScanPosIsPinned(so->currPos));
+		/* Lock only if the lock is dropped. */
 		buf = so->currPos.buf;
-		_bt_lockbuf(rel, buf, BT_READ);
+		if (so->dropLock)
+			_bt_lockbuf(rel, buf, BT_READ);
 	}
 	else
 	{
diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h
index e709d2e0afe..ca8ebd7a418 100644
--- a/src/include/access/nbtree.h
+++ b/src/include/access/nbtree.h
@@ -1070,6 +1070,7 @@ typedef struct BTScanOpaqueData
 	/* info about killed items if any (killedItems is NULL if never used) */
 	int		   *killedItems;	/* currPos.items indexes of killed items */
 	int			numKilled;		/* number of currently stored items */
+	bool		dropLock;		/* drop lock on before btgettuple returns? */
 	bool		dropPin;		/* drop leaf pin before btgettuple returns? */
 
 	/*
-- 
2.43.0

