On Wed, Jan 17, 2024 at 5:33 PM Melanie Plageman
<melanieplage...@gmail.com> wrote:
>
> This does mean that something is not quite right with 0001 as well as
> 0004. We'd end up checking if we are at 8GB much more often. I should
> probably find a way to replicate the cadence on master.

I believe I've done this in attached v10.

- Melanie
From 23fe74086fadf6ed4c7697e7b35b3ff9e0a5f87a Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplage...@gmail.com>
Date: Tue, 16 Jan 2024 17:28:07 -0500
Subject: [PATCH v10 4/4] Combine FSM updates for prune and no prune cases

The goal is to update the FSM once for each page vacuumed. The logic for
ensuring this is the same for lazy_scan_prune() and lazy_scan_noprune().
Combine those FSM updates. While doing this, de-indent the logic for
calling lazy_scan_noprune() and the first invocation of
lazy_scan_new_or_empty(). With these changes, it is easier to see that
lazy_scan_new_or_empty() is called once before invoking
lazy_scan_noprune() and lazy_scan_prune().

Discussion: https://postgr.es/m/CA%2BTgmoaLTvipm%3Dxx4rJLr07m908PCu%3DQH3uCjD1UOn8YaEuO2g%40mail.gmail.com
---
 src/backend/access/heap/vacuumlazy.c | 89 +++++++++++-----------------
 1 file changed, 33 insertions(+), 56 deletions(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 32631d27c5..703136413f 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -838,6 +838,7 @@ lazy_scan_heap(LVRelState *vacrel)
 		Buffer		buf;
 		Page		page;
 		bool		all_visible_according_to_vm;
+		bool		got_cleanup_lock = false;
 
 		if (blkno == next_unskippable_block)
 		{
@@ -940,54 +941,28 @@ lazy_scan_heap(LVRelState *vacrel)
 		buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
 								 vacrel->bstrategy);
 		page = BufferGetPage(buf);
-		if (!ConditionalLockBufferForCleanup(buf))
-		{
-			LockBuffer(buf, BUFFER_LOCK_SHARE);
 
-			/* Check for new or empty pages before lazy_scan_noprune call */
-			if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, true,
-									   vmbuffer))
-			{
-				/* Processed as new/empty page (lock and pin released) */
-				continue;
-			}
+		got_cleanup_lock = ConditionalLockBufferForCleanup(buf);
 
-			/*
-			 * Collect LP_DEAD items in dead_items array, count tuples,
-			 * determine if rel truncation is safe
-			 */
-			if (lazy_scan_noprune(vacrel, buf, blkno, page, &has_lpdead_items))
-			{
-				Size		freespace = 0;
-				bool		recordfreespace;
+		if (!got_cleanup_lock)
+			LockBuffer(buf, BUFFER_LOCK_SHARE);
 
-				/*
-				 * We processed the page successfully (without a cleanup
-				 * lock).
-				 *
-				 * Update the FSM, just as we would in the case where
-				 * lazy_scan_prune() is called. Our goal is to update the
-				 * freespace map the last time we touch the page. If the
-				 * relation has no indexes, or if index vacuuming is disabled,
-				 * there will be no second heap pass; if this particular page
-				 * has no dead items, the second heap pass will not touch this
-				 * page. So, in those cases, update the FSM now.
-				 *
-				 * After a call to lazy_scan_prune(), we would also try to
-				 * adjust the page-level all-visible bit and the visibility
-				 * map, but we skip that step in this path.
-				 */
-				recordfreespace = vacrel->nindexes == 0
-					|| !vacrel->do_index_vacuuming
-					|| !has_lpdead_items;
-				if (recordfreespace)
-					freespace = PageGetHeapFreeSpace(page);
-				UnlockReleaseBuffer(buf);
-				if (recordfreespace)
-					RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
-				continue;
-			}
+		/* Check for new or empty pages before lazy_scan_[no]prune call */
+		if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, !got_cleanup_lock,
+								   vmbuffer))
+		{
+			/* Processed as new/empty page (lock and pin released) */
+			continue;
+		}
 
+		/*
+		 * Collect LP_DEAD items in dead_items array, count tuples, determine
+		 * if rel truncation is safe. If lazy_scan_noprune() returns false, we
+		 * must get a cleanup lock and call lazy_scan_prune().
+		 */
+		if (!got_cleanup_lock &&
+			!lazy_scan_noprune(vacrel, buf, blkno, page, &has_lpdead_items))
+		{
 			/*
 			 * lazy_scan_noprune could not do all required processing.  Wait
 			 * for a cleanup lock, and call lazy_scan_prune in the usual way.
@@ -995,13 +970,7 @@ lazy_scan_heap(LVRelState *vacrel)
 			Assert(vacrel->aggressive);
 			LockBuffer(buf, BUFFER_LOCK_UNLOCK);
 			LockBufferForCleanup(buf);
-		}
-
-		/* Check for new or empty pages before lazy_scan_prune call */
-		if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, false, vmbuffer))
-		{
-			/* Processed as new/empty page (lock and pin released) */
-			continue;
+			got_cleanup_lock = true;
 		}
 
 		/*
@@ -1014,9 +983,10 @@ lazy_scan_heap(LVRelState *vacrel)
 		 * tuple headers of remaining items with storage. It also determines
 		 * if truncating this block is safe.
 		 */
-		lazy_scan_prune(vacrel, buf, blkno, page,
-						vmbuffer, all_visible_according_to_vm,
-						&has_lpdead_items);
+		if (got_cleanup_lock)
+			lazy_scan_prune(vacrel, buf, blkno, page,
+							vmbuffer, all_visible_according_to_vm,
+							&has_lpdead_items);
 
 		/*
 		 * Final steps for block: drop cleanup lock, record free space in the
@@ -1028,6 +998,12 @@ lazy_scan_heap(LVRelState *vacrel)
 		 * space that lazy_vacuum_heap_page() will make available in cases
 		 * where it's possible to truncate the page's line pointer array.
 		 *
+		 * Our goal is to update the freespace map the last time we touch the
+		 * page. If the relation has no indexes, or if index vacuuming is
+		 * disabled, there will be no second heap pass; if this particular
+		 * page has no dead items, the second heap pass will not touch this
+		 * page. So, in those cases, update the FSM now.
+		 *
 		 * Note: It's not in fact 100% certain that we really will call
 		 * lazy_vacuum_heap_rel() -- lazy_vacuum() might yet opt to skip index
 		 * vacuuming (and so must skip heap vacuuming).  This is deemed okay
@@ -1047,9 +1023,10 @@ lazy_scan_heap(LVRelState *vacrel)
 			/*
 			 * Periodically perform FSM vacuuming to make newly-freed space
 			 * visible on upper FSM pages. This is done after vacuuming if the
-			 * table has indexes.
+			 * table has indexes. Space won't have been freed up if only
+			 * lazy_scan_noprune() was called, so check got_cleanup_lock.
 			 */
-			if (vacrel->nindexes == 0 && has_lpdead_items &&
+			if (got_cleanup_lock && vacrel->nindexes == 0 && has_lpdead_items &&
 				blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
 			{
 				FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
-- 
2.37.2

From 04bc4a900f8fc6dc78bfbbbe5454c9ee1f4bc703 Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplage...@gmail.com>
Date: Tue, 9 Jan 2024 17:17:01 -0500
Subject: [PATCH v10 3/4] Inline LVPagePruneState members in lazy_scan_prune()

After moving the code to update the visibility map from lazy_scan_heap()
into lazy_scan_prune(), the LVPagePruneState is mainly obsolete. Replace
all instances of use of its members in lazy_scan_prune() with local
variables and remove the struct.

Only has_lpdead_items remains in use.
---
 src/backend/access/heap/vacuumlazy.c | 133 ++++++++++++++-------------
 src/tools/pgindent/typedefs.list     |   1 -
 2 files changed, 69 insertions(+), 65 deletions(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 34bc6b6890..32631d27c5 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -212,23 +212,6 @@ typedef struct LVRelState
 	int64		missed_dead_tuples; /* # removable, but not removed */
 } LVRelState;
 
-/*
- * State returned by lazy_scan_prune()
- */
-typedef struct LVPagePruneState
-{
-	bool		has_lpdead_items;	/* includes existing LP_DEAD items */
-
-	/*
-	 * State describes the proper VM bit states to set for the page following
-	 * pruning and freezing.  all_visible implies !has_lpdead_items, but don't
-	 * trust all_frozen result unless all_visible is also set to true.
-	 */
-	bool		all_visible;	/* Every item visible to all? */
-	bool		all_frozen;		/* provided all_visible is also true */
-	TransactionId visibility_cutoff_xid;	/* For recovery conflicts */
-} LVPagePruneState;
-
 /* Struct for saving and restoring vacuum error information. */
 typedef struct LVSavedErrInfo
 {
@@ -250,7 +233,7 @@ static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf,
 static void lazy_scan_prune(LVRelState *vacrel, Buffer buf,
 							BlockNumber blkno, Page page,
 							Buffer vmbuffer, bool all_visible_according_to_vm,
-							LVPagePruneState *prunestate);
+							bool *has_lpdead_items);
 static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf,
 							  BlockNumber blkno, Page page,
 							  bool *has_lpdead_items);
@@ -828,6 +811,7 @@ lazy_scan_heap(LVRelState *vacrel)
 				blkno,
 				next_unskippable_block,
 				next_fsm_block_to_vacuum = 0;
+	bool		has_lpdead_items;
 	VacDeadItems *dead_items = vacrel->dead_items;
 	Buffer		vmbuffer = InvalidBuffer;
 	bool		next_unskippable_allvis,
@@ -854,7 +838,6 @@ lazy_scan_heap(LVRelState *vacrel)
 		Buffer		buf;
 		Page		page;
 		bool		all_visible_according_to_vm;
-		LVPagePruneState prunestate;
 
 		if (blkno == next_unskippable_block)
 		{
@@ -959,8 +942,6 @@ lazy_scan_heap(LVRelState *vacrel)
 		page = BufferGetPage(buf);
 		if (!ConditionalLockBufferForCleanup(buf))
 		{
-			bool		has_lpdead_items;
-
 			LockBuffer(buf, BUFFER_LOCK_SHARE);
 
 			/* Check for new or empty pages before lazy_scan_noprune call */
@@ -1035,7 +1016,7 @@ lazy_scan_heap(LVRelState *vacrel)
 		 */
 		lazy_scan_prune(vacrel, buf, blkno, page,
 						vmbuffer, all_visible_according_to_vm,
-						&prunestate);
+						&has_lpdead_items);
 
 		/*
 		 * Final steps for block: drop cleanup lock, record free space in the
@@ -1056,7 +1037,7 @@ lazy_scan_heap(LVRelState *vacrel)
 		 */
 		if (vacrel->nindexes == 0
 			|| !vacrel->do_index_vacuuming
-			|| !prunestate.has_lpdead_items)
+			|| !has_lpdead_items)
 		{
 			Size		freespace = PageGetHeapFreeSpace(page);
 
@@ -1068,7 +1049,7 @@ lazy_scan_heap(LVRelState *vacrel)
 			 * visible on upper FSM pages. This is done after vacuuming if the
 			 * table has indexes.
 			 */
-			if (vacrel->nindexes == 0 && prunestate.has_lpdead_items &&
+			if (vacrel->nindexes == 0 && has_lpdead_items &&
 				blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
 			{
 				FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
@@ -1383,6 +1364,14 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno,
  * right after this operation completes instead of in the middle of it. Note that
  * any tuple that becomes dead after the call to heap_page_prune() can't need to
  * be frozen, because it was visible to another session when vacuum started.
+ *
+ * vmbuffer is the buffer containing the VM block with visibility information
+ * for the heap block, blkno. all_visible_according_to_vm is the saved
+ * visibility status of the heap block looked up earlier by the caller. We
+ * won't rely entirely on this status, as it may be out of date.
+ *
+ * *has_lpdead_items is set to true or false depending on whether, upon return
+ * from this function, any LP_DEAD items are still present on the page.
  */
 static void
 lazy_scan_prune(LVRelState *vacrel,
@@ -1391,7 +1380,7 @@ lazy_scan_prune(LVRelState *vacrel,
 				Page page,
 				Buffer vmbuffer,
 				bool all_visible_according_to_vm,
-				LVPagePruneState *prunestate)
+				bool *has_lpdead_items)
 {
 	Relation	rel = vacrel->rel;
 	OffsetNumber offnum,
@@ -1404,6 +1393,9 @@ lazy_scan_prune(LVRelState *vacrel,
 				recently_dead_tuples;
 	HeapPageFreeze pagefrz;
 	bool		hastup = false;
+	bool		all_visible,
+				all_frozen;
+	TransactionId visibility_cutoff_xid;
 	int64		fpi_before = pgWalUsage.wal_fpi;
 	OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
 	HeapTupleFreeze frozen[MaxHeapTuplesPerPage];
@@ -1445,12 +1437,21 @@ lazy_scan_prune(LVRelState *vacrel,
 
 	/*
 	 * Now scan the page to collect LP_DEAD items and check for tuples
-	 * requiring freezing among remaining tuples with storage
+	 * requiring freezing among remaining tuples with storage. Keep track of
+	 * the visibility cutoff xid for recovery conflicts.
 	 */
-	prunestate->has_lpdead_items = false;
-	prunestate->all_visible = true;
-	prunestate->all_frozen = true;
-	prunestate->visibility_cutoff_xid = InvalidTransactionId;
+	*has_lpdead_items = false;
+	visibility_cutoff_xid = InvalidTransactionId;
+
+	/*
+	 * We will update the VM after collecting LP_DEAD items and freezing
+	 * tuples. Keep track of whether or not the page is all_visible and
+	 * all_frozen and use this information to update the VM. all_visible
+	 * implies 0 lpdead_items, but don't trust all_frozen result unless
+	 * all_visible is also set to true.
+	 */
+	all_visible = true;
+	all_frozen = true;
 
 	for (offnum = FirstOffsetNumber;
 		 offnum <= maxoff;
@@ -1538,13 +1539,13 @@ lazy_scan_prune(LVRelState *vacrel,
 				 * asynchronously. See SetHintBits for more info. Check that
 				 * the tuple is hinted xmin-committed because of that.
 				 */
-				if (prunestate->all_visible)
+				if (all_visible)
 				{
 					TransactionId xmin;
 
 					if (!HeapTupleHeaderXminCommitted(htup))
 					{
-						prunestate->all_visible = false;
+						all_visible = false;
 						break;
 					}
 
@@ -1556,14 +1557,14 @@ lazy_scan_prune(LVRelState *vacrel,
 					if (!TransactionIdPrecedes(xmin,
 											   vacrel->cutoffs.OldestXmin))
 					{
-						prunestate->all_visible = false;
+						all_visible = false;
 						break;
 					}
 
 					/* Track newest xmin on page. */
-					if (TransactionIdFollows(xmin, prunestate->visibility_cutoff_xid) &&
+					if (TransactionIdFollows(xmin, visibility_cutoff_xid) &&
 						TransactionIdIsNormal(xmin))
-						prunestate->visibility_cutoff_xid = xmin;
+						visibility_cutoff_xid = xmin;
 				}
 				break;
 			case HEAPTUPLE_RECENTLY_DEAD:
@@ -1574,7 +1575,7 @@ lazy_scan_prune(LVRelState *vacrel,
 				 * pruning.)
 				 */
 				recently_dead_tuples++;
-				prunestate->all_visible = false;
+				all_visible = false;
 				break;
 			case HEAPTUPLE_INSERT_IN_PROGRESS:
 
@@ -1585,11 +1586,11 @@ lazy_scan_prune(LVRelState *vacrel,
 				 * results.  This assumption is a bit shaky, but it is what
 				 * acquire_sample_rows() does, so be consistent.
 				 */
-				prunestate->all_visible = false;
+				all_visible = false;
 				break;
 			case HEAPTUPLE_DELETE_IN_PROGRESS:
 				/* This is an expected case during concurrent vacuum */
-				prunestate->all_visible = false;
+				all_visible = false;
 
 				/*
 				 * Count such rows as live.  As above, we assume the deleting
@@ -1619,7 +1620,7 @@ lazy_scan_prune(LVRelState *vacrel,
 		 * definitely cannot be set all-frozen in the visibility map later on
 		 */
 		if (!totally_frozen)
-			prunestate->all_frozen = false;
+			all_frozen = false;
 	}
 
 	/*
@@ -1637,7 +1638,7 @@ lazy_scan_prune(LVRelState *vacrel,
 	 * page all-frozen afterwards (might not happen until final heap pass).
 	 */
 	if (pagefrz.freeze_required || tuples_frozen == 0 ||
-		(prunestate->all_visible && prunestate->all_frozen &&
+		(all_visible && all_frozen &&
 		 fpi_before != pgWalUsage.wal_fpi))
 	{
 		/*
@@ -1675,11 +1676,11 @@ lazy_scan_prune(LVRelState *vacrel,
 			 * once we're done with it.  Otherwise we generate a conservative
 			 * cutoff by stepping back from OldestXmin.
 			 */
-			if (prunestate->all_visible && prunestate->all_frozen)
+			if (all_visible && all_frozen)
 			{
 				/* Using same cutoff when setting VM is now unnecessary */
-				snapshotConflictHorizon = prunestate->visibility_cutoff_xid;
-				prunestate->visibility_cutoff_xid = InvalidTransactionId;
+				snapshotConflictHorizon = visibility_cutoff_xid;
+				visibility_cutoff_xid = InvalidTransactionId;
 			}
 			else
 			{
@@ -1702,7 +1703,7 @@ lazy_scan_prune(LVRelState *vacrel,
 		 */
 		vacrel->NewRelfrozenXid = pagefrz.NoFreezePageRelfrozenXid;
 		vacrel->NewRelminMxid = pagefrz.NoFreezePageRelminMxid;
-		prunestate->all_frozen = false;
+		all_frozen = false;
 		tuples_frozen = 0;		/* avoid miscounts in instrumentation */
 	}
 
@@ -1715,16 +1716,17 @@ lazy_scan_prune(LVRelState *vacrel,
 	 */
 #ifdef USE_ASSERT_CHECKING
 	/* Note that all_frozen value does not matter when !all_visible */
-	if (prunestate->all_visible && lpdead_items == 0)
+	if (all_visible && lpdead_items == 0)
 	{
-		TransactionId cutoff;
-		bool		all_frozen;
+		TransactionId debug_cutoff;
+		bool		debug_all_frozen;
 
-		if (!heap_page_is_all_visible(vacrel, buf, &cutoff, &all_frozen))
+		if (!heap_page_is_all_visible(vacrel, buf,
+									  &debug_cutoff, &debug_all_frozen))
 			Assert(false);
 
-		Assert(!TransactionIdIsValid(cutoff) ||
-			   cutoff == prunestate->visibility_cutoff_xid);
+		Assert(!TransactionIdIsValid(debug_cutoff) ||
+			   debug_cutoff == visibility_cutoff_xid);
 	}
 #endif
 
@@ -1737,7 +1739,6 @@ lazy_scan_prune(LVRelState *vacrel,
 		ItemPointerData tmp;
 
 		vacrel->lpdead_item_pages++;
-		prunestate->has_lpdead_items = true;
 
 		ItemPointerSetBlockNumber(&tmp, blkno);
 
@@ -1762,7 +1763,7 @@ lazy_scan_prune(LVRelState *vacrel,
 		 * Now that freezing has been finalized, unset all_visible.  It needs
 		 * to reflect the present state of things, as expected by our caller.
 		 */
-		prunestate->all_visible = false;
+		all_visible = false;
 	}
 
 	/* Finally, add page-local counts to whole-VACUUM counts */
@@ -1776,19 +1777,23 @@ lazy_scan_prune(LVRelState *vacrel,
 	if (hastup)
 		vacrel->nonempty_pages = blkno + 1;
 
-	Assert(!prunestate->all_visible || !prunestate->has_lpdead_items);
+	/* Did we find LP_DEAD items? */
+	*has_lpdead_items = (lpdead_items > 0);
+
+	Assert(!all_visible || !(*has_lpdead_items));
 
 	/*
 	 * Handle setting visibility map bit based on information from the VM (as
-	 * of last lazy_scan_skip() call), and from prunestate
+	 * of last lazy_scan_skip() call), and from all_visible and all_frozen
+	 * variables
 	 */
-	if (!all_visible_according_to_vm && prunestate->all_visible)
+	if (!all_visible_according_to_vm && all_visible)
 	{
 		uint8		flags = VISIBILITYMAP_ALL_VISIBLE;
 
-		if (prunestate->all_frozen)
+		if (all_frozen)
 		{
-			Assert(!TransactionIdIsValid(prunestate->visibility_cutoff_xid));
+			Assert(!TransactionIdIsValid(visibility_cutoff_xid));
 			flags |= VISIBILITYMAP_ALL_FROZEN;
 		}
 
@@ -1808,7 +1813,7 @@ lazy_scan_prune(LVRelState *vacrel,
 		PageSetAllVisible(page);
 		MarkBufferDirty(buf);
 		visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
-						  vmbuffer, prunestate->visibility_cutoff_xid,
+						  vmbuffer, visibility_cutoff_xid,
 						  flags);
 	}
 
@@ -1841,7 +1846,7 @@ lazy_scan_prune(LVRelState *vacrel,
 	 * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE set,
 	 * however.
 	 */
-	else if (prunestate->has_lpdead_items && PageIsAllVisible(page))
+	else if (lpdead_items > 0 && PageIsAllVisible(page))
 	{
 		elog(WARNING, "page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
 			 vacrel->relname, blkno);
@@ -1854,16 +1859,16 @@ lazy_scan_prune(LVRelState *vacrel,
 	/*
 	 * If the all-visible page is all-frozen but not marked as such yet, mark
 	 * it as all-frozen.  Note that all_frozen is only valid if all_visible is
-	 * true, so we must check both prunestate fields.
+	 * true, so we must check both all_visible and all_frozen.
 	 */
-	else if (all_visible_according_to_vm && prunestate->all_visible &&
-			 prunestate->all_frozen &&
+	else if (all_visible_according_to_vm && all_visible &&
+			 all_frozen &&
 			 !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
 	{
 		/*
 		 * Avoid relying on all_visible_according_to_vm as a proxy for the
 		 * page-level PD_ALL_VISIBLE bit being set, since it might have become
-		 * stale -- even when all_visible is set in prunestate
+		 * stale -- even when all_visible is set
 		 */
 		if (!PageIsAllVisible(page))
 		{
@@ -1878,7 +1883,7 @@ lazy_scan_prune(LVRelState *vacrel,
 		 * since a snapshotConflictHorizon sufficient to make everything safe
 		 * for REDO was logged when the page's tuples were frozen.
 		 */
-		Assert(!TransactionIdIsValid(prunestate->visibility_cutoff_xid));
+		Assert(!TransactionIdIsValid(visibility_cutoff_xid));
 		visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
 						  vmbuffer, InvalidTransactionId,
 						  VISIBILITYMAP_ALL_VISIBLE |
diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list
index 29fd1cae64..17c0104376 100644
--- a/src/tools/pgindent/typedefs.list
+++ b/src/tools/pgindent/typedefs.list
@@ -1405,7 +1405,6 @@ LPVOID
 LPWSTR
 LSEG
 LUID
-LVPagePruneState
 LVRelState
 LVSavedErrInfo
 LWLock
-- 
2.37.2

From 6aa1cd8f04a5ca188100267bc23cfa74426e6af3 Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplage...@gmail.com>
Date: Fri, 5 Jan 2024 11:12:58 -0500
Subject: [PATCH v10 2/4] Move VM update code to lazy_scan_prune()

The visibility map is updated after lazy_scan_prune() returns in
lazy_scan_heap(). Most of the output parameters of lazy_scan_prune() are
used to update the VM in lazy_scan_heap(). Moving that code into
lazy_scan_prune() simplifies lazy_scan_heap() and requires less
communication between the two. This is a pure lift and shift. No VM
update logic is changed. All but one of the members of the prunestate
are now obsolete. It will be removed in a future commit.
---
 src/backend/access/heap/vacuumlazy.c | 226 ++++++++++++++-------------
 1 file changed, 115 insertions(+), 111 deletions(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 2f530ad62c..34bc6b6890 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -249,6 +249,7 @@ static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf,
 								   bool sharelock, Buffer vmbuffer);
 static void lazy_scan_prune(LVRelState *vacrel, Buffer buf,
 							BlockNumber blkno, Page page,
+							Buffer vmbuffer, bool all_visible_according_to_vm,
 							LVPagePruneState *prunestate);
 static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf,
 							  BlockNumber blkno, Page page,
@@ -1032,117 +1033,9 @@ lazy_scan_heap(LVRelState *vacrel)
 		 * tuple headers of remaining items with storage. It also determines
 		 * if truncating this block is safe.
 		 */
-		lazy_scan_prune(vacrel, buf, blkno, page, &prunestate);
-
-		Assert(!prunestate.all_visible || !prunestate.has_lpdead_items);
-
-		/*
-		 * Handle setting visibility map bit based on information from the VM
-		 * (as of last lazy_scan_skip() call), and from prunestate
-		 */
-		if (!all_visible_according_to_vm && prunestate.all_visible)
-		{
-			uint8		flags = VISIBILITYMAP_ALL_VISIBLE;
-
-			if (prunestate.all_frozen)
-			{
-				Assert(!TransactionIdIsValid(prunestate.visibility_cutoff_xid));
-				flags |= VISIBILITYMAP_ALL_FROZEN;
-			}
-
-			/*
-			 * It should never be the case that the visibility map page is set
-			 * while the page-level bit is clear, but the reverse is allowed
-			 * (if checksums are not enabled).  Regardless, set both bits so
-			 * that we get back in sync.
-			 *
-			 * NB: If the heap page is all-visible but the VM bit is not set,
-			 * we don't need to dirty the heap page.  However, if checksums
-			 * are enabled, we do need to make sure that the heap page is
-			 * dirtied before passing it to visibilitymap_set(), because it
-			 * may be logged.  Given that this situation should only happen in
-			 * rare cases after a crash, it is not worth optimizing.
-			 */
-			PageSetAllVisible(page);
-			MarkBufferDirty(buf);
-			visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
-							  vmbuffer, prunestate.visibility_cutoff_xid,
-							  flags);
-		}
-
-		/*
-		 * As of PostgreSQL 9.2, the visibility map bit should never be set if
-		 * the page-level bit is clear.  However, it's possible that the bit
-		 * got cleared after lazy_scan_skip() was called, so we must recheck
-		 * with buffer lock before concluding that the VM is corrupt.
-		 */
-		else if (all_visible_according_to_vm && !PageIsAllVisible(page) &&
-				 visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer) != 0)
-		{
-			elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
-				 vacrel->relname, blkno);
-			visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
-								VISIBILITYMAP_VALID_BITS);
-		}
-
-		/*
-		 * It's possible for the value returned by
-		 * GetOldestNonRemovableTransactionId() to move backwards, so it's not
-		 * wrong for us to see tuples that appear to not be visible to
-		 * everyone yet, while PD_ALL_VISIBLE is already set. The real safe
-		 * xmin value never moves backwards, but
-		 * GetOldestNonRemovableTransactionId() is conservative and sometimes
-		 * returns a value that's unnecessarily small, so if we see that
-		 * contradiction it just means that the tuples that we think are not
-		 * visible to everyone yet actually are, and the PD_ALL_VISIBLE flag
-		 * is correct.
-		 *
-		 * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE
-		 * set, however.
-		 */
-		else if (prunestate.has_lpdead_items && PageIsAllVisible(page))
-		{
-			elog(WARNING, "page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
-				 vacrel->relname, blkno);
-			PageClearAllVisible(page);
-			MarkBufferDirty(buf);
-			visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
-								VISIBILITYMAP_VALID_BITS);
-		}
-
-		/*
-		 * If the all-visible page is all-frozen but not marked as such yet,
-		 * mark it as all-frozen.  Note that all_frozen is only valid if
-		 * all_visible is true, so we must check both prunestate fields.
-		 */
-		else if (all_visible_according_to_vm && prunestate.all_visible &&
-				 prunestate.all_frozen &&
-				 !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
-		{
-			/*
-			 * Avoid relying on all_visible_according_to_vm as a proxy for the
-			 * page-level PD_ALL_VISIBLE bit being set, since it might have
-			 * become stale -- even when all_visible is set in prunestate
-			 */
-			if (!PageIsAllVisible(page))
-			{
-				PageSetAllVisible(page);
-				MarkBufferDirty(buf);
-			}
-
-			/*
-			 * Set the page all-frozen (and all-visible) in the VM.
-			 *
-			 * We can pass InvalidTransactionId as our visibility_cutoff_xid,
-			 * since a snapshotConflictHorizon sufficient to make everything
-			 * safe for REDO was logged when the page's tuples were frozen.
-			 */
-			Assert(!TransactionIdIsValid(prunestate.visibility_cutoff_xid));
-			visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
-							  vmbuffer, InvalidTransactionId,
-							  VISIBILITYMAP_ALL_VISIBLE |
-							  VISIBILITYMAP_ALL_FROZEN);
-		}
+		lazy_scan_prune(vacrel, buf, blkno, page,
+						vmbuffer, all_visible_according_to_vm,
+						&prunestate);
 
 		/*
 		 * Final steps for block: drop cleanup lock, record free space in the
@@ -1496,6 +1389,8 @@ lazy_scan_prune(LVRelState *vacrel,
 				Buffer buf,
 				BlockNumber blkno,
 				Page page,
+				Buffer vmbuffer,
+				bool all_visible_according_to_vm,
 				LVPagePruneState *prunestate)
 {
 	Relation	rel = vacrel->rel;
@@ -1880,6 +1775,115 @@ lazy_scan_prune(LVRelState *vacrel,
 	/* Can't truncate this page */
 	if (hastup)
 		vacrel->nonempty_pages = blkno + 1;
+
+	Assert(!prunestate->all_visible || !prunestate->has_lpdead_items);
+
+	/*
+	 * Handle setting visibility map bit based on information from the VM (as
+	 * of last lazy_scan_skip() call), and from prunestate
+	 */
+	if (!all_visible_according_to_vm && prunestate->all_visible)
+	{
+		uint8		flags = VISIBILITYMAP_ALL_VISIBLE;
+
+		if (prunestate->all_frozen)
+		{
+			Assert(!TransactionIdIsValid(prunestate->visibility_cutoff_xid));
+			flags |= VISIBILITYMAP_ALL_FROZEN;
+		}
+
+		/*
+		 * It should never be the case that the visibility map page is set
+		 * while the page-level bit is clear, but the reverse is allowed (if
+		 * checksums are not enabled).  Regardless, set both bits so that we
+		 * get back in sync.
+		 *
+		 * NB: If the heap page is all-visible but the VM bit is not set, we
+		 * don't need to dirty the heap page.  However, if checksums are
+		 * enabled, we do need to make sure that the heap page is dirtied
+		 * before passing it to visibilitymap_set(), because it may be logged.
+		 * Given that this situation should only happen in rare cases after a
+		 * crash, it is not worth optimizing.
+		 */
+		PageSetAllVisible(page);
+		MarkBufferDirty(buf);
+		visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
+						  vmbuffer, prunestate->visibility_cutoff_xid,
+						  flags);
+	}
+
+	/*
+	 * As of PostgreSQL 9.2, the visibility map bit should never be set if the
+	 * page-level bit is clear.  However, it's possible that the bit got
+	 * cleared after lazy_scan_skip() was called, so we must recheck with
+	 * buffer lock before concluding that the VM is corrupt.
+	 */
+	else if (all_visible_according_to_vm && !PageIsAllVisible(page) &&
+			 visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer) != 0)
+	{
+		elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
+			 vacrel->relname, blkno);
+		visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
+							VISIBILITYMAP_VALID_BITS);
+	}
+
+	/*
+	 * It's possible for the value returned by
+	 * GetOldestNonRemovableTransactionId() to move backwards, so it's not
+	 * wrong for us to see tuples that appear to not be visible to everyone
+	 * yet, while PD_ALL_VISIBLE is already set. The real safe xmin value
+	 * never moves backwards, but GetOldestNonRemovableTransactionId() is
+	 * conservative and sometimes returns a value that's unnecessarily small,
+	 * so if we see that contradiction it just means that the tuples that we
+	 * think are not visible to everyone yet actually are, and the
+	 * PD_ALL_VISIBLE flag is correct.
+	 *
+	 * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE set,
+	 * however.
+	 */
+	else if (prunestate->has_lpdead_items && PageIsAllVisible(page))
+	{
+		elog(WARNING, "page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
+			 vacrel->relname, blkno);
+		PageClearAllVisible(page);
+		MarkBufferDirty(buf);
+		visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
+							VISIBILITYMAP_VALID_BITS);
+	}
+
+	/*
+	 * If the all-visible page is all-frozen but not marked as such yet, mark
+	 * it as all-frozen.  Note that all_frozen is only valid if all_visible is
+	 * true, so we must check both prunestate fields.
+	 */
+	else if (all_visible_according_to_vm && prunestate->all_visible &&
+			 prunestate->all_frozen &&
+			 !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
+	{
+		/*
+		 * Avoid relying on all_visible_according_to_vm as a proxy for the
+		 * page-level PD_ALL_VISIBLE bit being set, since it might have become
+		 * stale -- even when all_visible is set in prunestate
+		 */
+		if (!PageIsAllVisible(page))
+		{
+			PageSetAllVisible(page);
+			MarkBufferDirty(buf);
+		}
+
+		/*
+		 * Set the page all-frozen (and all-visible) in the VM.
+		 *
+		 * We can pass InvalidTransactionId as our visibility_cutoff_xid,
+		 * since a snapshotConflictHorizon sufficient to make everything safe
+		 * for REDO was logged when the page's tuples were frozen.
+		 */
+		Assert(!TransactionIdIsValid(prunestate->visibility_cutoff_xid));
+		visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
+						  vmbuffer, InvalidTransactionId,
+						  VISIBILITYMAP_ALL_VISIBLE |
+						  VISIBILITYMAP_ALL_FROZEN);
+	}
 }
 
 /*
-- 
2.37.2

From e1568664171befdb1281d360f0e20a6545260f39 Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplage...@gmail.com>
Date: Tue, 16 Jan 2024 17:41:55 -0500
Subject: [PATCH v10 1/4] Set would-be dead items LP_UNUSED while pruning

If there are no indexes on a relation, items can be marked LP_UNUSED
instead of LP_DEAD during while pruning. This avoids a separate
invocation of lazy_vacuum_heap_page() and saves a vacuum WAL record.

To accomplish this, pass heap_page_prune() a new parameter, mark_unused_now,
which indicates that dead line pointers should be set to LP_UNUSED
during pruning, allowing earlier reaping of tuples.

We only enable this for vacuum's pruning. On access pruning always
passes mark_unused_now == false.

Discussion: https://postgr.es/m/CAAKRu_bgvb_k0gKOXWzNKWHt560R0smrGe3E8zewKPs8fiMKkw%40mail.gmail.com
---
 src/backend/access/heap/pruneheap.c  |  80 ++++++++++++++---
 src/backend/access/heap/vacuumlazy.c | 129 ++++++++-------------------
 src/include/access/heapam.h          |   1 +
 3 files changed, 108 insertions(+), 102 deletions(-)

diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 3e0a1a260e..5917633567 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -35,6 +35,8 @@ typedef struct
 
 	/* tuple visibility test, initialized for the relation */
 	GlobalVisState *vistest;
+	/* whether or not dead items can be set LP_UNUSED during pruning */
+	bool		mark_unused_now;
 
 	TransactionId new_prune_xid;	/* new prune hint value for page */
 	TransactionId snapshotConflictHorizon;	/* latest xid removed */
@@ -67,6 +69,7 @@ static void heap_prune_record_prunable(PruneState *prstate, TransactionId xid);
 static void heap_prune_record_redirect(PruneState *prstate,
 									   OffsetNumber offnum, OffsetNumber rdoffnum);
 static void heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum);
+static void heap_prune_record_dead_or_unused(PruneState *prstate, OffsetNumber offnum);
 static void heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum);
 static void page_verify_redirects(Page page);
 
@@ -148,7 +151,13 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
 		{
 			PruneResult presult;
 
-			heap_page_prune(relation, buffer, vistest, &presult, NULL);
+			/*
+			 * For now, pass mark_unused_now as false regardless of whether or
+			 * not the relation has indexes, since we cannot safely determine
+			 * that during on-access pruning with the current implementation.
+			 */
+			heap_page_prune(relation, buffer, vistest, false,
+							&presult, NULL);
 
 			/*
 			 * Report the number of tuples reclaimed to pgstats.  This is
@@ -193,6 +202,9 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
  * (see heap_prune_satisfies_vacuum and
  * HeapTupleSatisfiesVacuum).
  *
+ * mark_unused_now indicates whether or not dead items can be set LP_UNUSED during
+ * pruning.
+ *
  * off_loc is the offset location required by the caller to use in error
  * callback.
  *
@@ -203,6 +215,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
 void
 heap_page_prune(Relation relation, Buffer buffer,
 				GlobalVisState *vistest,
+				bool mark_unused_now,
 				PruneResult *presult,
 				OffsetNumber *off_loc)
 {
@@ -227,6 +240,7 @@ heap_page_prune(Relation relation, Buffer buffer,
 	prstate.new_prune_xid = InvalidTransactionId;
 	prstate.rel = relation;
 	prstate.vistest = vistest;
+	prstate.mark_unused_now = mark_unused_now;
 	prstate.snapshotConflictHorizon = InvalidTransactionId;
 	prstate.nredirected = prstate.ndead = prstate.nunused = 0;
 	memset(prstate.marked, 0, sizeof(prstate.marked));
@@ -306,9 +320,9 @@ heap_page_prune(Relation relation, Buffer buffer,
 		if (off_loc)
 			*off_loc = offnum;
 
-		/* Nothing to do if slot is empty or already dead */
+		/* Nothing to do if slot is empty */
 		itemid = PageGetItemId(page, offnum);
-		if (!ItemIdIsUsed(itemid) || ItemIdIsDead(itemid))
+		if (!ItemIdIsUsed(itemid))
 			continue;
 
 		/* Process this item or chain of items */
@@ -581,7 +595,17 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum,
 		 * function.)
 		 */
 		if (ItemIdIsDead(lp))
+		{
+			/*
+			 * If the caller set mark_unused_now true, we can set dead line
+			 * pointers LP_UNUSED now. We don't increment ndeleted here since
+			 * the LP was already marked dead.
+			 */
+			if (unlikely(prstate->mark_unused_now))
+				heap_prune_record_unused(prstate, offnum);
+
 			break;
+		}
 
 		Assert(ItemIdIsNormal(lp));
 		htup = (HeapTupleHeader) PageGetItem(dp, lp);
@@ -715,7 +739,7 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum,
 		 * redirect the root to the correct chain member.
 		 */
 		if (i >= nchain)
-			heap_prune_record_dead(prstate, rootoffnum);
+			heap_prune_record_dead_or_unused(prstate, rootoffnum);
 		else
 			heap_prune_record_redirect(prstate, rootoffnum, chainitems[i]);
 	}
@@ -726,9 +750,9 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum,
 		 * item.  This can happen if the loop in heap_page_prune caused us to
 		 * visit the dead successor of a redirect item before visiting the
 		 * redirect item.  We can clean up by setting the redirect item to
-		 * DEAD state.
+		 * DEAD state or LP_UNUSED if the caller indicated.
 		 */
-		heap_prune_record_dead(prstate, rootoffnum);
+		heap_prune_record_dead_or_unused(prstate, rootoffnum);
 	}
 
 	return ndeleted;
@@ -774,6 +798,27 @@ heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum)
 	prstate->marked[offnum] = true;
 }
 
+/*
+ * Depending on whether or not the caller set mark_unused_now to true, record that a
+ * line pointer should be marked LP_DEAD or LP_UNUSED. There are other cases in
+ * which we will mark line pointers LP_UNUSED, but we will not mark line
+ * pointers LP_DEAD if mark_unused_now is true.
+ */
+static void
+heap_prune_record_dead_or_unused(PruneState *prstate, OffsetNumber offnum)
+{
+	/*
+	 * If the caller set mark_unused_now to true, we can remove dead tuples
+	 * during pruning instead of marking their line pointers dead. Set this
+	 * tuple's line pointer LP_UNUSED. We hint that this option is less
+	 * likely.
+	 */
+	if (unlikely(prstate->mark_unused_now))
+		heap_prune_record_unused(prstate, offnum);
+	else
+		heap_prune_record_dead(prstate, offnum);
+}
+
 /* Record line pointer to be marked unused */
 static void
 heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum)
@@ -903,13 +948,24 @@ heap_page_prune_execute(Buffer buffer,
 #ifdef USE_ASSERT_CHECKING
 
 		/*
-		 * Only heap-only tuples can become LP_UNUSED during pruning.  They
-		 * don't need to be left in place as LP_DEAD items until VACUUM gets
-		 * around to doing index vacuuming.
+		 * When heap_page_prune() was called, mark_unused_now may have been
+		 * passed as true, which allows would-be LP_DEAD items to be made
+		 * LP_UNUSED instead. This is only possible if the relation has no
+		 * indexes. If there are any dead items, then mark_unused_now was not
+		 * true and every item being marked LP_UNUSED must refer to a
+		 * heap-only tuple.
 		 */
-		Assert(ItemIdHasStorage(lp) && ItemIdIsNormal(lp));
-		htup = (HeapTupleHeader) PageGetItem(page, lp);
-		Assert(HeapTupleHeaderIsHeapOnly(htup));
+		if (ndead > 0)
+		{
+			Assert(ItemIdHasStorage(lp) && ItemIdIsNormal(lp));
+			htup = (HeapTupleHeader) PageGetItem(page, lp);
+			Assert(HeapTupleHeaderIsHeapOnly(htup));
+		}
+		else
+		{
+			Assert(ItemIdIsUsed(lp));
+		}
+
 #endif
 
 		ItemIdSetUnused(lp);
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index e7a942e183..2f530ad62c 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -1036,69 +1036,6 @@ lazy_scan_heap(LVRelState *vacrel)
 
 		Assert(!prunestate.all_visible || !prunestate.has_lpdead_items);
 
-		if (vacrel->nindexes == 0)
-		{
-			/*
-			 * Consider the need to do page-at-a-time heap vacuuming when
-			 * using the one-pass strategy now.
-			 *
-			 * The one-pass strategy will never call lazy_vacuum().  The steps
-			 * performed here can be thought of as the one-pass equivalent of
-			 * a call to lazy_vacuum().
-			 */
-			if (prunestate.has_lpdead_items)
-			{
-				Size		freespace;
-
-				lazy_vacuum_heap_page(vacrel, blkno, buf, 0, vmbuffer);
-
-				/* Forget the LP_DEAD items that we just vacuumed */
-				dead_items->num_items = 0;
-
-				/*
-				 * Now perform FSM processing for blkno, and move on to next
-				 * page.
-				 *
-				 * Our call to lazy_vacuum_heap_page() will have considered if
-				 * it's possible to set all_visible/all_frozen independently
-				 * of lazy_scan_prune().  Note that prunestate was invalidated
-				 * by lazy_vacuum_heap_page() call.
-				 */
-				freespace = PageGetHeapFreeSpace(page);
-
-				UnlockReleaseBuffer(buf);
-				RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
-
-				/*
-				 * Periodically perform FSM vacuuming to make newly-freed
-				 * space visible on upper FSM pages. FreeSpaceMapVacuumRange()
-				 * vacuums the portion of the freespace map covering heap
-				 * pages from start to end - 1. Include the block we just
-				 * vacuumed by passing it blkno + 1. Overflow isn't an issue
-				 * because MaxBlockNumber + 1 is InvalidBlockNumber which
-				 * causes FreeSpaceMapVacuumRange() to vacuum freespace map
-				 * pages covering the remainder of the relation.
-				 */
-				if (blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
-				{
-					FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
-											blkno + 1);
-					next_fsm_block_to_vacuum = blkno + 1;
-				}
-
-				continue;
-			}
-
-			/*
-			 * There was no call to lazy_vacuum_heap_page() because pruning
-			 * didn't encounter/create any LP_DEAD items that needed to be
-			 * vacuumed.  Prune state has not been invalidated, so proceed
-			 * with prunestate-driven visibility map and FSM steps (just like
-			 * the two-pass strategy).
-			 */
-			Assert(dead_items->num_items == 0);
-		}
-
 		/*
 		 * Handle setting visibility map bit based on information from the VM
 		 * (as of last lazy_scan_skip() call), and from prunestate
@@ -1209,38 +1146,45 @@ lazy_scan_heap(LVRelState *vacrel)
 
 		/*
 		 * Final steps for block: drop cleanup lock, record free space in the
-		 * FSM
+		 * FSM.
+		 *
+		 * If we will likely do index vacuuming, wait until
+		 * lazy_vacuum_heap_rel() to save free space. This doesn't just save
+		 * us some cycles; it also allows us to record any additional free
+		 * space that lazy_vacuum_heap_page() will make available in cases
+		 * where it's possible to truncate the page's line pointer array.
+		 *
+		 * Note: It's not in fact 100% certain that we really will call
+		 * lazy_vacuum_heap_rel() -- lazy_vacuum() might yet opt to skip index
+		 * vacuuming (and so must skip heap vacuuming).  This is deemed okay
+		 * because it only happens in emergencies, or when there is very
+		 * little free space anyway. (Besides, we start recording free space
+		 * in the FSM once index vacuuming has been abandoned.)
 		 */
-		if (prunestate.has_lpdead_items && vacrel->do_index_vacuuming)
-		{
-			/*
-			 * Wait until lazy_vacuum_heap_rel() to save free space.  This
-			 * doesn't just save us some cycles; it also allows us to record
-			 * any additional free space that lazy_vacuum_heap_page() will
-			 * make available in cases where it's possible to truncate the
-			 * page's line pointer array.
-			 *
-			 * Note: It's not in fact 100% certain that we really will call
-			 * lazy_vacuum_heap_rel() -- lazy_vacuum() might yet opt to skip
-			 * index vacuuming (and so must skip heap vacuuming).  This is
-			 * deemed okay because it only happens in emergencies, or when
-			 * there is very little free space anyway. (Besides, we start
-			 * recording free space in the FSM once index vacuuming has been
-			 * abandoned.)
-			 *
-			 * Note: The one-pass (no indexes) case is only supposed to make
-			 * it this far when there were no LP_DEAD items during pruning.
-			 */
-			Assert(vacrel->nindexes > 0);
-			UnlockReleaseBuffer(buf);
-		}
-		else
+		if (vacrel->nindexes == 0
+			|| !vacrel->do_index_vacuuming
+			|| !prunestate.has_lpdead_items)
 		{
 			Size		freespace = PageGetHeapFreeSpace(page);
 
 			UnlockReleaseBuffer(buf);
 			RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
+
+			/*
+			 * Periodically perform FSM vacuuming to make newly-freed space
+			 * visible on upper FSM pages. This is done after vacuuming if the
+			 * table has indexes.
+			 */
+			if (vacrel->nindexes == 0 && prunestate.has_lpdead_items &&
+				blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
+			{
+				FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
+										blkno);
+				next_fsm_block_to_vacuum = blkno;
+			}
 		}
+		else
+			UnlockReleaseBuffer(buf);
 	}
 
 	vacrel->blkno = InvalidBlockNumber;
@@ -1596,8 +1540,13 @@ lazy_scan_prune(LVRelState *vacrel,
 	 * in presult.ndeleted. It should not be confused with lpdead_items;
 	 * lpdead_items's final value can be thought of as the number of tuples
 	 * that were deleted from indexes.
+	 *
+	 * If the relation has no indexes, we can immediately mark would-be dead
+	 * items LP_UNUSED, so mark_unused_now should be true if no indexes and
+	 * false otherwise.
 	 */
-	heap_page_prune(rel, buf, vacrel->vistest, &presult, &vacrel->offnum);
+	heap_page_prune(rel, buf, vacrel->vistest, vacrel->nindexes == 0,
+					&presult, &vacrel->offnum);
 
 	/*
 	 * Now scan the page to collect LP_DEAD items and check for tuples
@@ -2520,7 +2469,7 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
 	bool		all_frozen;
 	LVSavedErrInfo saved_err_info;
 
-	Assert(vacrel->nindexes == 0 || vacrel->do_index_vacuuming);
+	Assert(vacrel->do_index_vacuuming);
 
 	pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno);
 
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index 932ec0d6f2..4b133f6859 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -320,6 +320,7 @@ struct GlobalVisState;
 extern void heap_page_prune_opt(Relation relation, Buffer buffer);
 extern void heap_page_prune(Relation relation, Buffer buffer,
 							struct GlobalVisState *vistest,
+							bool mark_unused_now,
 							PruneResult *presult,
 							OffsetNumber *off_loc);
 extern void heap_page_prune_execute(Buffer buffer,
-- 
2.37.2

Reply via email to