From 6cd94d60d8982c60d230ccca9fc6ae073f25a8b9 Mon Sep 17 00:00:00 2001
From: alterego655 <824662526@qq.com>
Date: Mon, 13 Oct 2025 11:00:50 +0800
Subject: [PATCH v2] pgstattuple: Use streaming read API in pgstatindex
 functions

Replace synchronous ReadBufferExtended() loops with the streaming read
API in pgstatindex_impl() and pgstathashindex().
---
 contrib/pgstattuple/pgstatindex.c | 203 ++++++++++++++++++------------
 1 file changed, 120 insertions(+), 83 deletions(-)

diff --git a/contrib/pgstattuple/pgstatindex.c b/contrib/pgstattuple/pgstatindex.c
index 40823d54fca..4286706f029 100644
--- a/contrib/pgstattuple/pgstatindex.c
+++ b/contrib/pgstattuple/pgstatindex.c
@@ -37,6 +37,7 @@
 #include "funcapi.h"
 #include "miscadmin.h"
 #include "storage/bufmgr.h"
+#include "storage/read_stream.h"
 #include "utils/rel.h"
 #include "utils/varlena.h"
 
@@ -273,58 +274,75 @@ pgstatindex_impl(Relation rel, FunctionCallInfo fcinfo)
 	indexStat.fragments = 0;
 
 	/*
-	 * Scan all blocks except the metapage
+	 * Scan all blocks except the metapage using streaming reads
 	 */
 	nblocks = RelationGetNumberOfBlocks(rel);
 
-	for (blkno = 1; blkno < nblocks; blkno++)
 	{
-		Buffer		buffer;
-		Page		page;
-		BTPageOpaque opaque;
-
-		CHECK_FOR_INTERRUPTS();
-
-		/* Read and lock buffer */
-		buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
-		LockBuffer(buffer, BUFFER_LOCK_SHARE);
-
-		page = BufferGetPage(buffer);
-		opaque = BTPageGetOpaque(page);
-
-		/*
-		 * Determine page type, and update totals.
-		 *
-		 * Note that we arbitrarily bucket deleted pages together without
-		 * considering if they're leaf pages or internal pages.
-		 */
-		if (P_ISDELETED(opaque))
-			indexStat.deleted_pages++;
-		else if (P_IGNORE(opaque))
-			indexStat.empty_pages++;	/* this is the "half dead" state */
-		else if (P_ISLEAF(opaque))
+		BlockRangeReadStreamPrivate p;
+		ReadStream *stream;
+
+		p.current_blocknum = 1;
+		p.last_exclusive = nblocks;
+
+		stream = read_stream_begin_relation(READ_STREAM_FULL |
+											READ_STREAM_USE_BATCHING,
+											bstrategy,
+											rel,
+											MAIN_FORKNUM,
+											block_range_read_stream_cb,
+											&p,
+											0);
+
+		for (blkno = 1; blkno < nblocks; blkno++)
 		{
-			int			max_avail;
+			Buffer		buffer;
+			Page		page;
+			BTPageOpaque opaque;
 
-			max_avail = BLCKSZ - (BLCKSZ - ((PageHeader) page)->pd_special + SizeOfPageHeaderData);
-			indexStat.max_avail += max_avail;
-			indexStat.free_space += PageGetExactFreeSpace(page);
+			CHECK_FOR_INTERRUPTS();
 
-			indexStat.leaf_pages++;
+			buffer = read_stream_next_buffer(stream, NULL);
+			LockBuffer(buffer, BUFFER_LOCK_SHARE);
+
+			page = BufferGetPage(buffer);
+			opaque = BTPageGetOpaque(page);
 
 			/*
-			 * If the next leaf is on an earlier block, it means a
-			 * fragmentation.
+			 * Determine page type, and update totals.
+			 *
+			 * Note that we arbitrarily bucket deleted pages together without
+			 * considering if they're leaf pages or internal pages.
 			 */
-			if (opaque->btpo_next != P_NONE && opaque->btpo_next < blkno)
-				indexStat.fragments++;
+			if (P_ISDELETED(opaque))
+				indexStat.deleted_pages++;
+			else if (P_IGNORE(opaque))
+				indexStat.empty_pages++;	/* this is the "half dead" state */
+			else if (P_ISLEAF(opaque))
+			{
+				int			max_avail;
+
+				max_avail = BLCKSZ - (BLCKSZ - ((PageHeader) page)->pd_special + SizeOfPageHeaderData);
+				indexStat.max_avail += max_avail;
+				indexStat.free_space += PageGetExactFreeSpace(page);
+
+				indexStat.leaf_pages++;
+
+				/*
+				 * If the next leaf is on an earlier block, it means a
+				 * fragmentation.
+				 */
+				if (opaque->btpo_next != P_NONE && opaque->btpo_next < blkno)
+					indexStat.fragments++;
+			}
+			else
+				indexStat.internal_pages++;
+
+			UnlockReleaseBuffer(buffer);
 		}
-		else
-			indexStat.internal_pages++;
 
-		/* Unlock and release buffer */
-		LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
-		ReleaseBuffer(buffer);
+		Assert(read_stream_next_buffer(stream, NULL) == InvalidBuffer);
+		read_stream_end(stream);
 	}
 
 	relation_close(rel, AccessShareLock);
@@ -636,58 +654,77 @@ pgstathashindex(PG_FUNCTION_ARGS)
 	/* prepare access strategy for this index */
 	bstrategy = GetAccessStrategy(BAS_BULKREAD);
 
-	/* Start from blkno 1 as 0th block is metapage */
-	for (blkno = 1; blkno < nblocks; blkno++)
+	/* Scan all blocks except the metapage using streaming reads */
 	{
-		Buffer		buf;
-		Page		page;
-
-		CHECK_FOR_INTERRUPTS();
-
-		buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
-								 bstrategy);
-		LockBuffer(buf, BUFFER_LOCK_SHARE);
-		page = BufferGetPage(buf);
-
-		if (PageIsNew(page))
-			stats.unused_pages++;
-		else if (PageGetSpecialSize(page) !=
-				 MAXALIGN(sizeof(HashPageOpaqueData)))
-			ereport(ERROR,
-					(errcode(ERRCODE_INDEX_CORRUPTED),
-					 errmsg("index \"%s\" contains corrupted page at block %u",
-							RelationGetRelationName(rel),
-							BufferGetBlockNumber(buf))));
-		else
+		BlockRangeReadStreamPrivate p;
+		ReadStream *stream;
+
+		p.current_blocknum = 1;
+		p.last_exclusive = nblocks;
+
+		stream = read_stream_begin_relation(READ_STREAM_FULL |
+											READ_STREAM_USE_BATCHING,
+											bstrategy,
+											rel,
+											MAIN_FORKNUM,
+											block_range_read_stream_cb,
+											&p,
+											0);
+
+		for (blkno = 1; blkno < nblocks; blkno++)
 		{
-			HashPageOpaque opaque;
-			int			pagetype;
+			Buffer		buf;
+			Page		page;
 
-			opaque = HashPageGetOpaque(page);
-			pagetype = opaque->hasho_flag & LH_PAGE_TYPE;
+			CHECK_FOR_INTERRUPTS();
 
-			if (pagetype == LH_BUCKET_PAGE)
-			{
-				stats.bucket_pages++;
-				GetHashPageStats(page, &stats);
-			}
-			else if (pagetype == LH_OVERFLOW_PAGE)
-			{
-				stats.overflow_pages++;
-				GetHashPageStats(page, &stats);
-			}
-			else if (pagetype == LH_BITMAP_PAGE)
-				stats.bitmap_pages++;
-			else if (pagetype == LH_UNUSED_PAGE)
+			buf = read_stream_next_buffer(stream, NULL);
+			LockBuffer(buf, BUFFER_LOCK_SHARE);
+			page = BufferGetPage(buf);
+
+			if (PageIsNew(page))
 				stats.unused_pages++;
-			else
+			else if (PageGetSpecialSize(page) !=
+					 MAXALIGN(sizeof(HashPageOpaqueData)))
 				ereport(ERROR,
 						(errcode(ERRCODE_INDEX_CORRUPTED),
-						 errmsg("unexpected page type 0x%04X in HASH index \"%s\" block %u",
-								opaque->hasho_flag, RelationGetRelationName(rel),
+						 errmsg("index \"%s\" contains corrupted page at block %u",
+								RelationGetRelationName(rel),
 								BufferGetBlockNumber(buf))));
+			else
+			{
+				HashPageOpaque opaque;
+				int			pagetype;
+
+				opaque = HashPageGetOpaque(page);
+				pagetype = opaque->hasho_flag & LH_PAGE_TYPE;
+
+				if (pagetype == LH_BUCKET_PAGE)
+				{
+					stats.bucket_pages++;
+					GetHashPageStats(page, &stats);
+				}
+				else if (pagetype == LH_OVERFLOW_PAGE)
+				{
+					stats.overflow_pages++;
+					GetHashPageStats(page, &stats);
+				}
+				else if (pagetype == LH_BITMAP_PAGE)
+					stats.bitmap_pages++;
+				else if (pagetype == LH_UNUSED_PAGE)
+					stats.unused_pages++;
+				else
+					ereport(ERROR,
+							(errcode(ERRCODE_INDEX_CORRUPTED),
+							 errmsg("unexpected page type 0x%04X in HASH index \"%s\" block %u",
+									opaque->hasho_flag, RelationGetRelationName(rel),
+									BufferGetBlockNumber(buf))));
+			}
+			UnlockReleaseBuffer(buf);
 		}
-		UnlockReleaseBuffer(buf);
+
+		Assert(read_stream_next_buffer(stream, NULL) == InvalidBuffer);
+		read_stream_end(stream);
 	}
 
 	/* Done accessing the index */
-- 
2.51.0

