On Mon, Nov 13, 2023 at 5:28 PM Melanie Plageman
<melanieplage...@gmail.com> wrote:
> When there are no indexes on the relation, we can set would-be dead
> items LP_UNUSED and remove them during pruning. This saves us a vacuum
> WAL record, reducing WAL volume (and time spent writing and syncing
> WAL).
...
> Note that (on principle) this patch set is on top of the bug fix I
> proposed in [1].
>
> [1] 
> https://www.postgresql.org/message-id/CAAKRu_YiL%3D44GvGnt1dpYouDSSoV7wzxVoXs8m3p311rp-TVQQ%40mail.gmail.com

Rebased on top of fix in b2e237afddc56a and registered for the january fest
https://commitfest.postgresql.org/46/4665/

- Melanie
From ad07d577d7309bc0603357da3548c249c580bd1b Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplage...@gmail.com>
Date: Mon, 13 Nov 2023 14:10:05 -0500
Subject: [PATCH v2 2/2] Set would-be dead items LP_UNUSED while pruning

If there are no indexes on a relation, items can be marked LP_UNUSED
instead of LP_DEAD during lazy_scan_prune(). This avoids a separate
invocation of lazy_vacuum_heap_page() and saves a vacuum WAL record.

To accomplish this, pass lazy_scan_prune() a new parameter, pronto_reap,
which indicates that dead line pointers should be set to LP_UNUSED
during pruning, allowing earlier reaping of tuples.

Because we don't update the freespace map until after dropping the lock
on the buffer and we need the lock while we update the visibility map,
save our intent to update the freespace map in
LVPagePruneState.recordfreespace.

Discussion: https://postgr.es/m/CAAKRu_bgvb_k0gKOXWzNKWHt560R0smrGe3E8zewKPs8fiMKkw%40mail.gmail.com
---
 src/backend/access/heap/heapam.c     |   9 +-
 src/backend/access/heap/pruneheap.c  |  77 +++++++++++---
 src/backend/access/heap/vacuumlazy.c | 146 ++++++++++-----------------
 src/include/access/heapam.h          |   3 +-
 4 files changed, 125 insertions(+), 110 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 14de8158d49..b578c32eeb6 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8803,8 +8803,13 @@ heap_xlog_prune(XLogReaderState *record)
 		nunused = (end - nowunused);
 		Assert(nunused >= 0);
 
-		/* Update all line pointers per the record, and repair fragmentation */
-		heap_page_prune_execute(buffer,
+		/*
+		 * Update all line pointers per the record, and repair fragmentation.
+		 * We always pass pronto_reap as true, because we don't know whether
+		 * or not this option was used when pruning. This reduces the
+		 * validation done on replay in an assert build.
+		 */
+		heap_page_prune_execute(buffer, true,
 								redirected, nredirected,
 								nowdead, ndead,
 								nowunused, nunused);
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index c5f1abd95a9..750a03c9259 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -35,6 +35,8 @@ typedef struct
 
 	/* tuple visibility test, initialized for the relation */
 	GlobalVisState *vistest;
+	/* whether or not dead items can be set LP_UNUSED during pruning */
+	bool		pronto_reap;
 
 	TransactionId new_prune_xid;	/* new prune hint value for page */
 	TransactionId snapshotConflictHorizon;	/* latest xid removed */
@@ -148,7 +150,8 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
 		{
 			PruneResult presult;
 
-			heap_page_prune(relation, buffer, vistest, &presult, NULL);
+			heap_page_prune(relation, buffer, vistest, false,
+							&presult, NULL);
 
 			/*
 			 * Report the number of tuples reclaimed to pgstats.  This is
@@ -193,6 +196,9 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
  * (see heap_prune_satisfies_vacuum and
  * HeapTupleSatisfiesVacuum).
  *
+ * pronto_reap indicates whether or not dead items can be set LP_UNUSED during
+ * pruning.
+ *
  * off_loc is the offset location required by the caller to use in error
  * callback.
  *
@@ -203,6 +209,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
 void
 heap_page_prune(Relation relation, Buffer buffer,
 				GlobalVisState *vistest,
+				bool pronto_reap,
 				PruneResult *presult,
 				OffsetNumber *off_loc)
 {
@@ -227,6 +234,7 @@ heap_page_prune(Relation relation, Buffer buffer,
 	prstate.new_prune_xid = InvalidTransactionId;
 	prstate.rel = relation;
 	prstate.vistest = vistest;
+	prstate.pronto_reap = pronto_reap;
 	prstate.snapshotConflictHorizon = InvalidTransactionId;
 	prstate.nredirected = prstate.ndead = prstate.nunused = 0;
 	memset(prstate.marked, 0, sizeof(prstate.marked));
@@ -306,9 +314,9 @@ heap_page_prune(Relation relation, Buffer buffer,
 		if (off_loc)
 			*off_loc = offnum;
 
-		/* Nothing to do if slot is empty or already dead */
+		/* Nothing to do if slot is empty */
 		itemid = PageGetItemId(page, offnum);
-		if (!ItemIdIsUsed(itemid) || ItemIdIsDead(itemid))
+		if (!ItemIdIsUsed(itemid))
 			continue;
 
 		/* Process this item or chain of items */
@@ -330,7 +338,7 @@ heap_page_prune(Relation relation, Buffer buffer,
 		 * Apply the planned item changes, then repair page fragmentation, and
 		 * update the page's hint bit about whether it has free line pointers.
 		 */
-		heap_page_prune_execute(buffer,
+		heap_page_prune_execute(buffer, prstate.pronto_reap,
 								prstate.redirected, prstate.nredirected,
 								prstate.nowdead, prstate.ndead,
 								prstate.nowunused, prstate.nunused);
@@ -581,7 +589,17 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum,
 		 * function.)
 		 */
 		if (ItemIdIsDead(lp))
+		{
+			/*
+			 * If the relation has no indexes, we can set dead line pointers
+			 * LP_UNUSED now. We don't increment ndeleted here since the LP
+			 * was already marked dead.
+			 */
+			if (prstate->pronto_reap)
+				heap_prune_record_unused(prstate, offnum);
+
 			break;
+		}
 
 		Assert(ItemIdIsNormal(lp));
 		htup = (HeapTupleHeader) PageGetItem(dp, lp);
@@ -715,7 +733,17 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum,
 		 * redirect the root to the correct chain member.
 		 */
 		if (i >= nchain)
-			heap_prune_record_dead(prstate, rootoffnum);
+		{
+			/*
+			 * If the relation has no indexes, we can remove dead tuples
+			 * during pruning instead of marking their line pointers dead. Set
+			 * this tuple's line pointer LP_UNUSED.
+			 */
+			if (prstate->pronto_reap)
+				heap_prune_record_unused(prstate, rootoffnum);
+			else
+				heap_prune_record_dead(prstate, rootoffnum);
+		}
 		else
 			heap_prune_record_redirect(prstate, rootoffnum, chainitems[i]);
 	}
@@ -726,9 +754,12 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum,
 		 * item.  This can happen if the loop in heap_page_prune caused us to
 		 * visit the dead successor of a redirect item before visiting the
 		 * redirect item.  We can clean up by setting the redirect item to
-		 * DEAD state.
+		 * DEAD state. If pronto_reap is true, we can set it LP_UNUSED now.
 		 */
-		heap_prune_record_dead(prstate, rootoffnum);
+		if (prstate->pronto_reap)
+			heap_prune_record_unused(prstate, rootoffnum);
+		else
+			heap_prune_record_dead(prstate, rootoffnum);
 	}
 
 	return ndeleted;
@@ -792,7 +823,7 @@ heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum)
  * buffer.
  */
 void
-heap_page_prune_execute(Buffer buffer,
+heap_page_prune_execute(Buffer buffer, bool pronto_reap,
 						OffsetNumber *redirected, int nredirected,
 						OffsetNumber *nowdead, int ndead,
 						OffsetNumber *nowunused, int nunused)
@@ -902,14 +933,28 @@ heap_page_prune_execute(Buffer buffer,
 
 #ifdef USE_ASSERT_CHECKING
 
-		/*
-		 * Only heap-only tuples can become LP_UNUSED during pruning.  They
-		 * don't need to be left in place as LP_DEAD items until VACUUM gets
-		 * around to doing index vacuuming.
-		 */
-		Assert(ItemIdHasStorage(lp) && ItemIdIsNormal(lp));
-		htup = (HeapTupleHeader) PageGetItem(page, lp);
-		Assert(HeapTupleHeaderIsHeapOnly(htup));
+		if (pronto_reap)
+		{
+			/*
+			 * If the relation has no indexes, we may set any of LP_NORMAL,
+			 * LP_REDIRECT, or LP_DEAD items to LP_UNUSED during pruning. We
+			 * can't check much here except that, if the item is LP_NORMAL, it
+			 * should have storage before it is set LP_UNUSED.
+			 */
+			Assert(!ItemIdIsNormal(lp) || ItemIdHasStorage(lp));
+		}
+		else
+		{
+			/*
+			 * If the relation has indexes, only heap-only tuples can become
+			 * LP_UNUSED during pruning. They don't need to be left in place
+			 * as LP_DEAD items until VACUUM gets around to doing index
+			 * vacuuming.
+			 */
+			Assert(ItemIdHasStorage(lp) && ItemIdIsNormal(lp));
+			htup = (HeapTupleHeader) PageGetItem(page, lp);
+			Assert(HeapTupleHeaderIsHeapOnly(htup));
+		}
 #endif
 
 		ItemIdSetUnused(lp);
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index d158cccd00d..26e1ea110f4 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -218,6 +218,8 @@ typedef struct LVRelState
 typedef struct LVPagePruneState
 {
 	bool		has_lpdead_items;	/* includes existing LP_DEAD items */
+	/* whether or not caller should do FSM processing for block */
+	bool		recordfreespace;
 
 	/*
 	 * State describes the proper VM bit states to set for the page following
@@ -829,6 +831,7 @@ lazy_scan_heap(LVRelState *vacrel)
 				next_fsm_block_to_vacuum = 0;
 	VacDeadItems *dead_items = vacrel->dead_items;
 	Buffer		vmbuffer = InvalidBuffer;
+	int			tuples_already_deleted;
 	bool		next_unskippable_allvis,
 				skipping_current_range;
 	const int	initprog_index[] = {
@@ -1009,6 +1012,8 @@ lazy_scan_heap(LVRelState *vacrel)
 			continue;
 		}
 
+		tuples_already_deleted = vacrel->tuples_deleted;
+
 		/*
 		 * Prune, freeze, and count tuples.
 		 *
@@ -1022,69 +1027,6 @@ lazy_scan_heap(LVRelState *vacrel)
 
 		Assert(!prunestate.all_visible || !prunestate.has_lpdead_items);
 
-		if (vacrel->nindexes == 0)
-		{
-			/*
-			 * Consider the need to do page-at-a-time heap vacuuming when
-			 * using the one-pass strategy now.
-			 *
-			 * The one-pass strategy will never call lazy_vacuum().  The steps
-			 * performed here can be thought of as the one-pass equivalent of
-			 * a call to lazy_vacuum().
-			 */
-			if (prunestate.has_lpdead_items)
-			{
-				Size		freespace;
-
-				lazy_vacuum_heap_page(vacrel, blkno, buf, 0, vmbuffer);
-
-				/* Forget the LP_DEAD items that we just vacuumed */
-				dead_items->num_items = 0;
-
-				/*
-				 * Now perform FSM processing for blkno, and move on to next
-				 * page.
-				 *
-				 * Our call to lazy_vacuum_heap_page() will have considered if
-				 * it's possible to set all_visible/all_frozen independently
-				 * of lazy_scan_prune().  Note that prunestate was invalidated
-				 * by lazy_vacuum_heap_page() call.
-				 */
-				freespace = PageGetHeapFreeSpace(page);
-
-				UnlockReleaseBuffer(buf);
-				RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
-
-				/*
-				 * Periodically perform FSM vacuuming to make newly-freed
-				 * space visible on upper FSM pages. FreeSpaceMapVacuumRange()
-				 * vacuums the portion of the freespace map covering heap
-				 * pages from start to end - 1. Include the block we just
-				 * vacuumed by passing it blkno + 1. Overflow isn't an issue
-				 * because MaxBlockNumber + 1 is InvalidBlockNumber which
-				 * causes FreeSpaceMapVacuumRange() to vacuum freespace map
-				 * pages covering the remainder of the relation.
-				 */
-				if (blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
-				{
-					FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
-											blkno + 1);
-					next_fsm_block_to_vacuum = blkno + 1;
-				}
-
-				continue;
-			}
-
-			/*
-			 * There was no call to lazy_vacuum_heap_page() because pruning
-			 * didn't encounter/create any LP_DEAD items that needed to be
-			 * vacuumed.  Prune state has not been invalidated, so proceed
-			 * with prunestate-driven visibility map and FSM steps (just like
-			 * the two-pass strategy).
-			 */
-			Assert(dead_items->num_items == 0);
-		}
-
 		/*
 		 * Handle setting visibility map bit based on information from the VM
 		 * (as of last lazy_scan_skip() call), and from prunestate
@@ -1195,38 +1137,45 @@ lazy_scan_heap(LVRelState *vacrel)
 
 		/*
 		 * Final steps for block: drop cleanup lock, record free space in the
-		 * FSM
+		 * FSM.
+		 *
+		 * If we will likely do index vacuuming, wait until
+		 * lazy_vacuum_heap_rel() to save free space. This doesn't just save
+		 * us some cycles; it also allows us to record any additional free
+		 * space that lazy_vacuum_heap_page() will make available in cases
+		 * where it's possible to truncate the page's line pointer array.
+		 *
+		 * Note: It's not in fact 100% certain that we really will call
+		 * lazy_vacuum_heap_rel() -- lazy_vacuum() might yet opt to skip index
+		 * vacuuming (and so must skip heap vacuuming).  This is deemed okay
+		 * because it only happens in emergencies, or when there is very
+		 * little free space anyway. (Besides, we start recording free space
+		 * in the FSM once index vacuuming has been abandoned.)
 		 */
-		if (prunestate.has_lpdead_items && vacrel->do_index_vacuuming)
-		{
-			/*
-			 * Wait until lazy_vacuum_heap_rel() to save free space.  This
-			 * doesn't just save us some cycles; it also allows us to record
-			 * any additional free space that lazy_vacuum_heap_page() will
-			 * make available in cases where it's possible to truncate the
-			 * page's line pointer array.
-			 *
-			 * Note: It's not in fact 100% certain that we really will call
-			 * lazy_vacuum_heap_rel() -- lazy_vacuum() might yet opt to skip
-			 * index vacuuming (and so must skip heap vacuuming).  This is
-			 * deemed okay because it only happens in emergencies, or when
-			 * there is very little free space anyway. (Besides, we start
-			 * recording free space in the FSM once index vacuuming has been
-			 * abandoned.)
-			 *
-			 * Note: The one-pass (no indexes) case is only supposed to make
-			 * it this far when there were no LP_DEAD items during pruning.
-			 */
-			Assert(vacrel->nindexes > 0);
-			UnlockReleaseBuffer(buf);
-		}
-		else
+		if (prunestate.recordfreespace)
 		{
 			Size		freespace = PageGetHeapFreeSpace(page);
 
 			UnlockReleaseBuffer(buf);
 			RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
 		}
+		else
+			UnlockReleaseBuffer(buf);
+
+		/*
+		 * Periodically perform FSM vacuuming to make newly-freed space
+		 * visible on upper FSM pages. This is done after vacuuming if the
+		 * table has indexes.
+		 */
+		if (vacrel->nindexes == 0 &&
+			vacrel->tuples_deleted > tuples_already_deleted &&
+			(blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES))
+		{
+			FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
+									blkno);
+			next_fsm_block_to_vacuum = blkno;
+		}
+
 	}
 
 	vacrel->blkno = InvalidBlockNumber;
@@ -1550,6 +1499,7 @@ lazy_scan_prune(LVRelState *vacrel,
 				live_tuples,
 				recently_dead_tuples;
 	HeapPageFreeze pagefrz;
+	bool		pronto_reap;
 	bool		hastup = false;
 	int64		fpi_before = pgWalUsage.wal_fpi;
 	OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
@@ -1575,6 +1525,8 @@ lazy_scan_prune(LVRelState *vacrel,
 	live_tuples = 0;
 	recently_dead_tuples = 0;
 
+	pronto_reap = vacrel->nindexes == 0;
+
 	/*
 	 * Prune all HOT-update chains in this page.
 	 *
@@ -1583,7 +1535,8 @@ lazy_scan_prune(LVRelState *vacrel,
 	 * lpdead_items's final value can be thought of as the number of tuples
 	 * that were deleted from indexes.
 	 */
-	heap_page_prune(rel, buf, vacrel->vistest, &presult, &vacrel->offnum);
+	heap_page_prune(rel, buf, vacrel->vistest, pronto_reap,
+					&presult, &vacrel->offnum);
 
 	/*
 	 * Now scan the page to collect LP_DEAD items and check for tuples
@@ -1593,6 +1546,7 @@ lazy_scan_prune(LVRelState *vacrel,
 	prunestate->all_visible = true;
 	prunestate->all_frozen = true;
 	prunestate->visibility_cutoff_xid = InvalidTransactionId;
+	prunestate->recordfreespace = false;
 
 	for (offnum = FirstOffsetNumber;
 		 offnum <= maxoff;
@@ -1914,6 +1868,15 @@ lazy_scan_prune(LVRelState *vacrel,
 	vacrel->live_tuples += live_tuples;
 	vacrel->recently_dead_tuples += recently_dead_tuples;
 
+	/*
+	 * If we will not do index vacuuming, either because we have no indexes,
+	 * because there is nothing to vacuum, or because do_index_vacuuming is
+	 * false, make sure we update the freespace map.
+	 */
+	if (vacrel->nindexes == 0 ||
+		!vacrel->do_index_vacuuming || lpdead_items == 0)
+		prunestate->recordfreespace = true;
+
 	/* Remember the location of the last page with nonremovable tuples */
 	if (hastup)
 		vacrel->nonempty_pages = blkno + 1;
@@ -1935,7 +1898,8 @@ lazy_scan_prune(LVRelState *vacrel,
  * callers.
  *
  * recordfreespace flag instructs caller on whether or not it should do
- * generic FSM processing for page.
+ * generic FSM processing for page. vacrel is updated with page-level counts
+ * and to indicate whether or not rel truncation is safe.
  */
 static bool
 lazy_scan_noprune(LVRelState *vacrel,
@@ -2518,7 +2482,7 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
 	bool		all_frozen;
 	LVSavedErrInfo saved_err_info;
 
-	Assert(vacrel->nindexes == 0 || vacrel->do_index_vacuuming);
+	Assert(vacrel->do_index_vacuuming);
 
 	pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno);
 
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index a2d7a0ea72f..1c731dded65 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -320,9 +320,10 @@ struct GlobalVisState;
 extern void heap_page_prune_opt(Relation relation, Buffer buffer);
 extern void heap_page_prune(Relation relation, Buffer buffer,
 							struct GlobalVisState *vistest,
+							bool pronto_reap,
 							PruneResult *presult,
 							OffsetNumber *off_loc);
-extern void heap_page_prune_execute(Buffer buffer,
+extern void heap_page_prune_execute(Buffer buffer, bool pronto_reap,
 									OffsetNumber *redirected, int nredirected,
 									OffsetNumber *nowdead, int ndead,
 									OffsetNumber *nowunused, int nunused);
-- 
2.37.2

From 608658f2cbc0acde55aac815c0fdb523ec24c452 Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplage...@gmail.com>
Date: Mon, 13 Nov 2023 16:47:08 -0500
Subject: [PATCH v2 1/2] Indicate rel truncation unsafe in lazy_scan[no]prune

Both lazy_scan_prune() and lazy_scan_noprune() must determine whether or
not there are tuples on the page making rel truncation unsafe.
LVRelState->nonempty_pages is updated to reflect this. Previously, both
functions set an output parameter or output parameter member, hastup, to
indicate that nonempty_pages should be updated to reflect the latest
non-removable page. There doesn't seem to be any reason to wait until
lazy_scan_[no]prune() returns to update nonempty_pages. Plenty of other
counters in the LVRelState are updated in lazy_scan_[no]prune().
This allows us to get rid of the output parameter hastup.
---
 src/backend/access/heap/vacuumlazy.c | 50 +++++++++++++++-------------
 1 file changed, 26 insertions(+), 24 deletions(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 59f51f40e1b..d158cccd00d 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -217,7 +217,6 @@ typedef struct LVRelState
  */
 typedef struct LVPagePruneState
 {
-	bool		hastup;			/* Page prevents rel truncation? */
 	bool		has_lpdead_items;	/* includes existing LP_DEAD items */
 
 	/*
@@ -253,7 +252,7 @@ static void lazy_scan_prune(LVRelState *vacrel, Buffer buf,
 							LVPagePruneState *prunestate);
 static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf,
 							  BlockNumber blkno, Page page,
-							  bool *hastup, bool *recordfreespace);
+							  bool *recordfreespace);
 static void lazy_vacuum(LVRelState *vacrel);
 static bool lazy_vacuum_all_indexes(LVRelState *vacrel);
 static void lazy_vacuum_heap_rel(LVRelState *vacrel);
@@ -959,8 +958,7 @@ lazy_scan_heap(LVRelState *vacrel)
 		page = BufferGetPage(buf);
 		if (!ConditionalLockBufferForCleanup(buf))
 		{
-			bool		hastup,
-						recordfreespace;
+			bool		recordfreespace;
 
 			LockBuffer(buf, BUFFER_LOCK_SHARE);
 
@@ -972,20 +970,21 @@ lazy_scan_heap(LVRelState *vacrel)
 				continue;
 			}
 
-			/* Collect LP_DEAD items in dead_items array, count tuples */
-			if (lazy_scan_noprune(vacrel, buf, blkno, page, &hastup,
+			/*
+			 * Collect LP_DEAD items in dead_items array, count tuples,
+			 * determine if rel truncation is safe
+			 */
+			if (lazy_scan_noprune(vacrel, buf, blkno, page,
 								  &recordfreespace))
 			{
 				Size		freespace = 0;
 
 				/*
 				 * Processed page successfully (without cleanup lock) -- just
-				 * need to perform rel truncation and FSM steps, much like the
-				 * lazy_scan_prune case.  Don't bother trying to match its
-				 * visibility map setting steps, though.
+				 * need to update the FSM, much like the lazy_scan_prune case.
+				 * Don't bother trying to match its visibility map setting
+				 * steps, though.
 				 */
-				if (hastup)
-					vacrel->nonempty_pages = blkno + 1;
 				if (recordfreespace)
 					freespace = PageGetHeapFreeSpace(page);
 				UnlockReleaseBuffer(buf);
@@ -1023,10 +1022,6 @@ lazy_scan_heap(LVRelState *vacrel)
 
 		Assert(!prunestate.all_visible || !prunestate.has_lpdead_items);
 
-		/* Remember the location of the last page with nonremovable tuples */
-		if (prunestate.hastup)
-			vacrel->nonempty_pages = blkno + 1;
-
 		if (vacrel->nindexes == 0)
 		{
 			/*
@@ -1555,6 +1550,7 @@ lazy_scan_prune(LVRelState *vacrel,
 				live_tuples,
 				recently_dead_tuples;
 	HeapPageFreeze pagefrz;
+	bool		hastup = false;
 	int64		fpi_before = pgWalUsage.wal_fpi;
 	OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
 	HeapTupleFreeze frozen[MaxHeapTuplesPerPage];
@@ -1593,7 +1589,6 @@ lazy_scan_prune(LVRelState *vacrel,
 	 * Now scan the page to collect LP_DEAD items and check for tuples
 	 * requiring freezing among remaining tuples with storage
 	 */
-	prunestate->hastup = false;
 	prunestate->has_lpdead_items = false;
 	prunestate->all_visible = true;
 	prunestate->all_frozen = true;
@@ -1620,7 +1615,7 @@ lazy_scan_prune(LVRelState *vacrel,
 		if (ItemIdIsRedirected(itemid))
 		{
 			/* page makes rel truncation unsafe */
-			prunestate->hastup = true;
+			hastup = true;
 			continue;
 		}
 
@@ -1750,7 +1745,7 @@ lazy_scan_prune(LVRelState *vacrel,
 				break;
 		}
 
-		prunestate->hastup = true;	/* page makes rel truncation unsafe */
+		hastup = true;			/* page makes rel truncation unsafe */
 
 		/* Tuple with storage -- consider need to freeze */
 		if (heap_prepare_freeze_tuple(htup, &vacrel->cutoffs, &pagefrz,
@@ -1918,6 +1913,10 @@ lazy_scan_prune(LVRelState *vacrel,
 	vacrel->lpdead_items += lpdead_items;
 	vacrel->live_tuples += live_tuples;
 	vacrel->recently_dead_tuples += recently_dead_tuples;
+
+	/* Remember the location of the last page with nonremovable tuples */
+	if (hastup)
+		vacrel->nonempty_pages = blkno + 1;
 }
 
 /*
@@ -1935,7 +1934,6 @@ lazy_scan_prune(LVRelState *vacrel,
  * one or more tuples on the page.  We always return true for non-aggressive
  * callers.
  *
- * See lazy_scan_prune for an explanation of hastup return flag.
  * recordfreespace flag instructs caller on whether or not it should do
  * generic FSM processing for page.
  */
@@ -1944,7 +1942,6 @@ lazy_scan_noprune(LVRelState *vacrel,
 				  Buffer buf,
 				  BlockNumber blkno,
 				  Page page,
-				  bool *hastup,
 				  bool *recordfreespace)
 {
 	OffsetNumber offnum,
@@ -1953,6 +1950,7 @@ lazy_scan_noprune(LVRelState *vacrel,
 				live_tuples,
 				recently_dead_tuples,
 				missed_dead_tuples;
+	bool		hastup;
 	HeapTupleHeader tupleheader;
 	TransactionId NoFreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
 	MultiXactId NoFreezePageRelminMxid = vacrel->NewRelminMxid;
@@ -1960,7 +1958,7 @@ lazy_scan_noprune(LVRelState *vacrel,
 
 	Assert(BufferGetBlockNumber(buf) == blkno);
 
-	*hastup = false;			/* for now */
+	hastup = false;				/* for now */
 	*recordfreespace = false;	/* for now */
 
 	lpdead_items = 0;
@@ -1984,7 +1982,7 @@ lazy_scan_noprune(LVRelState *vacrel,
 
 		if (ItemIdIsRedirected(itemid))
 		{
-			*hastup = true;
+			hastup = true;
 			continue;
 		}
 
@@ -1998,7 +1996,7 @@ lazy_scan_noprune(LVRelState *vacrel,
 			continue;
 		}
 
-		*hastup = true;			/* page prevents rel truncation */
+		hastup = true;			/* page prevents rel truncation */
 		tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
 		if (heap_tuple_should_freeze(tupleheader, &vacrel->cutoffs,
 									 &NoFreezePageRelfrozenXid,
@@ -2100,7 +2098,7 @@ lazy_scan_noprune(LVRelState *vacrel,
 			 * but it beats having to maintain specialized heap vacuuming code
 			 * forever, for vanishingly little benefit.)
 			 */
-			*hastup = true;
+			hastup = true;
 			missed_dead_tuples += lpdead_items;
 		}
 
@@ -2156,6 +2154,10 @@ lazy_scan_noprune(LVRelState *vacrel,
 	if (missed_dead_tuples > 0)
 		vacrel->missed_dead_pages++;
 
+	/* rel truncation is unsafe */
+	if (hastup)
+		vacrel->nonempty_pages = blkno + 1;
+
 	/* Caller won't need to call lazy_scan_prune with same page */
 	return true;
 }
-- 
2.37.2

Reply via email to