From 2cc761a55b6f727b44a32b03e8393ffd3f61fb2c Mon Sep 17 00:00:00 2001
From: Peter Geoghegan <pg@bowt.ie>
Date: Wed, 17 Nov 2021 21:27:06 -0800
Subject: [PATCH v2] Simplify lazy_scan_heap's handling of scanned pages.

Redefine a scanned page as any heap page that actually gets pinned by
VACUUM's first pass over the heap.  Pages counted by scanned_pages are
now the complement of the pages that are skipped over using the
visibility map.  This new definition significantly simplifies quite a
few things.

Now heap relation truncation, visibility map bit setting, tuple counting
(e.g., for pg_class.reltuples), and tuple freezing all share a common
definition of scanned_pages.  That makes it possible to remove certain
special cases, that never made much sense.  We no longer need to track
tupcount_pages separately (see bugfix commit 1914c5ea for details),
since we now always count tuples from pages that are scanned_pages.  We
also don't need to needlessly distinguish between aggressive and
non-aggressive VACUUM operations when we cannot immediately acquire a
cleanup lock.

Since any VACUUM (not just an aggressive VACUUM) can sometimes advance
relfrozenxid, we now make non-aggressive VACUUMs work just a little
harder in order to make that desirable outcome more likely in practice.
Aggressive VACUUMs have long checked contended pages with only a shared
lock, to avoid needlessly waiting on a cleanup lock (in the common case
where the contended page has no tuples that need to be frozen anyway).
We still don't make non-aggressive VACUUMs wait for a cleanup lock, of
course -- if we did that they'd no longer be non-aggressive.  But we now
make the non-aggressive case notice that a failure to acquire a cleanup
lock on one particular heap page does not in itself make it unsafe to
advance relfrozenxid for the whole relation (which is what we usually
see in the aggressive case already).

This new relfrozenxid optimization might not be all that valuable on its
own, but it may still facilitate future work that makes non-aggressive
VACUUMs more conscious of the benefit of advancing relfrozenxid sooner
rather than later.  In general it would be useful for non-aggressive
VACUUMs to be "more aggressive" opportunistically (e.g., by waiting for
a cleanup lock once or twice if needed).  It would also be generally
useful if aggressive VACUUMs were "less aggressive" opportunistically
(e.g. by being responsive to query cancellations when the risk of
wraparound failure is still very low).

We now also collect LP_DEAD items in the dead_tuples array in the case
where we cannot immediately get a cleanup lock on the buffer.  We cannot
prune without a cleanup lock, but opportunistic pruning may well have
left some LP_DEAD items behind in the past -- no reason to miss those.
Only VACUUM can mark these LP_DEAD items LP_UNUSED (no opportunistic
technique is independently capable of cleaning up line pointer bloat),
so we should not squander any opportunity to do that.  Commit 8523492d4e
taught VACUUM to set LP_DEAD line pointers to LP_UNUSED while only
holding an exclusive lock (not a cleanup lock), so we can expect to set
existing LP_DEAD items to LP_UNUSED reliably, even when we cannot
acquire our own cleanup lock at either pass over the heap (unless we opt
to skip index vacuuming, which implies that there is no second pass over
the heap).

Note that we no longer report on "pin skipped pages" in VACUUM VERBOSE,
since there is no barely any real practical sense in which we actually
miss doing useful work for these pages.  Besides, this information
always seemed to have little practical value, even to Postgres hackers.
---
 src/backend/access/heap/vacuumlazy.c          | 792 +++++++++++-------
 .../isolation/expected/vacuum-reltuples.out   |   2 +-
 .../isolation/specs/vacuum-reltuples.spec     |   7 +-
 3 files changed, 500 insertions(+), 301 deletions(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 282b44f87..39a7fb39e 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -284,6 +284,8 @@ typedef struct LVRelState
 	Relation   *indrels;
 	int			nindexes;
 
+	/* Aggressive VACUUM (scan all unfrozen pages)? */
+	bool		aggressive;
 	/* Wraparound failsafe has been triggered? */
 	bool		failsafe_active;
 	/* Consider index vacuuming bypass optimization? */
@@ -308,6 +310,8 @@ typedef struct LVRelState
 	/* VACUUM operation's cutoff for freezing XIDs and MultiXactIds */
 	TransactionId FreezeLimit;
 	MultiXactId MultiXactCutoff;
+	/* Are FreezeLimit/MultiXactCutoff still valid? */
+	bool		freeze_cutoffs_valid;
 
 	/* Error reporting state */
 	char	   *relnamespace;
@@ -322,10 +326,8 @@ typedef struct LVRelState
 	 */
 	LVDeadItems *dead_items;	/* TIDs whose index tuples we'll delete */
 	BlockNumber rel_pages;		/* total number of pages */
-	BlockNumber scanned_pages;	/* number of pages we examined */
-	BlockNumber pinskipped_pages;	/* # of pages skipped due to a pin */
-	BlockNumber frozenskipped_pages;	/* # of frozen pages we skipped */
-	BlockNumber tupcount_pages; /* pages whose tuples we counted */
+	BlockNumber scanned_pages;	/* # pages examined (not skipped via VM) */
+	BlockNumber frozenskipped_pages;	/* # frozen pages skipped via VM */
 	BlockNumber pages_removed;	/* pages remove by truncation */
 	BlockNumber lpdead_item_pages;	/* # pages with LP_DEAD items */
 	BlockNumber nonempty_pages; /* actually, last nonempty page + 1 */
@@ -338,6 +340,7 @@ typedef struct LVRelState
 
 	/* Instrumentation counters */
 	int			num_index_scans;
+	/* Counters that follow are only for scanned_pages */
 	int64		tuples_deleted; /* # deleted from table */
 	int64		lpdead_items;	/* # deleted from indexes */
 	int64		new_dead_tuples;	/* new estimated total # of dead items in
@@ -377,19 +380,22 @@ static int	elevel = -1;
 
 
 /* non-export function prototypes */
-static void lazy_scan_heap(LVRelState *vacrel, VacuumParams *params,
-						   bool aggressive);
+static void lazy_scan_heap(LVRelState *vacrel, VacuumParams *params);
+static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf,
+								   BlockNumber blkno, Page page,
+								   bool sharelock, Buffer vmbuffer);
 static void lazy_scan_prune(LVRelState *vacrel, Buffer buf,
 							BlockNumber blkno, Page page,
 							GlobalVisState *vistest,
 							LVPagePruneState *prunestate);
+static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf,
+							  BlockNumber blkno, Page page,
+							  bool *hastup);
 static void lazy_vacuum(LVRelState *vacrel);
 static bool lazy_vacuum_all_indexes(LVRelState *vacrel);
 static void lazy_vacuum_heap_rel(LVRelState *vacrel);
 static int	lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno,
 								  Buffer buffer, int index, Buffer *vmbuffer);
-static bool lazy_check_needs_freeze(Buffer buf, bool *hastup,
-									LVRelState *vacrel);
 static bool lazy_check_wraparound_failsafe(LVRelState *vacrel);
 static void do_parallel_lazy_vacuum_all_indexes(LVRelState *vacrel);
 static void do_parallel_lazy_cleanup_all_indexes(LVRelState *vacrel);
@@ -465,16 +471,14 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
 	int			usecs;
 	double		read_rate,
 				write_rate;
-	bool		aggressive;		/* should we scan all unfrozen pages? */
-	bool		scanned_all_unfrozen;	/* actually scanned all such pages? */
+	bool		aggressive;
+	BlockNumber orig_rel_pages;
 	char	  **indnames = NULL;
 	TransactionId xidFullScanLimit;
 	MultiXactId mxactFullScanLimit;
 	BlockNumber new_rel_pages;
 	BlockNumber new_rel_allvisible;
 	double		new_live_tuples;
-	TransactionId new_frozen_xid;
-	MultiXactId new_min_multi;
 	ErrorContextCallback errcallback;
 	PgStat_Counter startreadtime = 0;
 	PgStat_Counter startwritetime = 0;
@@ -529,6 +533,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
 	vacrel->rel = rel;
 	vac_open_indexes(vacrel->rel, RowExclusiveLock, &vacrel->nindexes,
 					 &vacrel->indrels);
+	vacrel->aggressive = aggressive;
 	vacrel->failsafe_active = false;
 	vacrel->consider_bypass_optimization = true;
 
@@ -573,6 +578,8 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
 	vacrel->OldestXmin = OldestXmin;
 	vacrel->FreezeLimit = FreezeLimit;
 	vacrel->MultiXactCutoff = MultiXactCutoff;
+	/* Track if cutoffs became invalid (possible in !aggressive case only) */
+	vacrel->freeze_cutoffs_valid = true;
 
 	vacrel->relnamespace = get_namespace_name(RelationGetNamespace(rel));
 	vacrel->relname = pstrdup(RelationGetRelationName(rel));
@@ -609,30 +616,16 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
 	 * Call lazy_scan_heap to perform all required heap pruning, index
 	 * vacuuming, and heap vacuuming (plus related processing)
 	 */
-	lazy_scan_heap(vacrel, params, aggressive);
+	lazy_scan_heap(vacrel, params);
 
 	/* Done with indexes */
 	vac_close_indexes(vacrel->nindexes, vacrel->indrels, NoLock);
 
 	/*
-	 * Compute whether we actually scanned the all unfrozen pages. If we did,
-	 * we can adjust relfrozenxid and relminmxid.
-	 *
-	 * NB: We need to check this before truncating the relation, because that
-	 * will change ->rel_pages.
-	 */
-	if ((vacrel->scanned_pages + vacrel->frozenskipped_pages)
-		< vacrel->rel_pages)
-	{
-		Assert(!aggressive);
-		scanned_all_unfrozen = false;
-	}
-	else
-		scanned_all_unfrozen = true;
-
-	/*
-	 * Optionally truncate the relation.
+	 * Optionally truncate the relation.  But remember the relation size used
+	 * by lazy_scan_prune for later first.
 	 */
+	orig_rel_pages = vacrel->rel_pages;
 	if (should_attempt_truncation(vacrel))
 	{
 		/*
@@ -663,28 +656,43 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
 	 *
 	 * For safety, clamp relallvisible to be not more than what we're setting
 	 * relpages to.
-	 *
-	 * Also, don't change relfrozenxid/relminmxid if we skipped any pages,
-	 * since then we don't know for certain that all tuples have a newer xmin.
 	 */
-	new_rel_pages = vacrel->rel_pages;
+	new_rel_pages = vacrel->rel_pages;	/* After possible rel truncation */
 	new_live_tuples = vacrel->new_live_tuples;
 
 	visibilitymap_count(rel, &new_rel_allvisible, NULL);
 	if (new_rel_allvisible > new_rel_pages)
 		new_rel_allvisible = new_rel_pages;
 
-	new_frozen_xid = scanned_all_unfrozen ? FreezeLimit : InvalidTransactionId;
-	new_min_multi = scanned_all_unfrozen ? MultiXactCutoff : InvalidMultiXactId;
-
-	vac_update_relstats(rel,
-						new_rel_pages,
-						new_live_tuples,
-						new_rel_allvisible,
-						vacrel->nindexes > 0,
-						new_frozen_xid,
-						new_min_multi,
-						false);
+	/*
+	 * Aggressive VACUUM (which is the same thing as anti-wraparound
+	 * autovacuum for most practical purposes) exists so that we'll reliably
+	 * advance relfrozenxid and relminmxid sooner or later.  But we can often
+	 * opportunistically advance them even in a non-aggressive VACUUM.
+	 * Consider if that's possible now.
+	 *
+	 * NB: We must use orig_rel_pages, not vacrel->rel_pages, since we want
+	 * the rel_pages used by lazy_scan_prune, from before a possible relation
+	 * truncation took place. (vacrel->rel_pages is now new_rel_pages.)
+	 */
+	if (vacrel->scanned_pages + vacrel->frozenskipped_pages < orig_rel_pages ||
+		!vacrel->freeze_cutoffs_valid)
+	{
+		/* Cannot advance relfrozenxid/relminmxid -- just update pg_class */
+		Assert(!aggressive);
+		vac_update_relstats(rel, new_rel_pages, new_live_tuples,
+							new_rel_allvisible, vacrel->nindexes > 0,
+							InvalidTransactionId, InvalidMultiXactId, false);
+	}
+	else
+	{
+		/* Can safely advance relfrozen and relminmxid, too */
+		Assert(vacrel->scanned_pages + vacrel->frozenskipped_pages ==
+			   orig_rel_pages);
+		vac_update_relstats(rel, new_rel_pages, new_live_tuples,
+							new_rel_allvisible, vacrel->nindexes > 0,
+							FreezeLimit, MultiXactCutoff, false);
+	}
 
 	/*
 	 * Report results to the stats collector, too.
@@ -713,7 +721,6 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
 		{
 			StringInfoData buf;
 			char	   *msgfmt;
-			BlockNumber orig_rel_pages;
 
 			TimestampDifference(starttime, endtime, &secs, &usecs);
 
@@ -760,10 +767,9 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
 							 vacrel->relnamespace,
 							 vacrel->relname,
 							 vacrel->num_index_scans);
-			appendStringInfo(&buf, _("pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n"),
+			appendStringInfo(&buf, _("pages: %u removed, %u remain, %u skipped frozen\n"),
 							 vacrel->pages_removed,
 							 vacrel->rel_pages,
-							 vacrel->pinskipped_pages,
 							 vacrel->frozenskipped_pages);
 			appendStringInfo(&buf,
 							 _("tuples: %lld removed, %lld remain, %lld are dead but not yet removable, oldest xmin: %u\n"),
@@ -771,7 +777,6 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
 							 (long long) vacrel->new_rel_tuples,
 							 (long long) vacrel->new_dead_tuples,
 							 OldestXmin);
-			orig_rel_pages = vacrel->rel_pages + vacrel->pages_removed;
 			if (orig_rel_pages > 0)
 			{
 				if (vacrel->do_index_vacuuming)
@@ -888,9 +893,10 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
  *		supply.
  */
 static void
-lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive)
+lazy_scan_heap(LVRelState *vacrel, VacuumParams *params)
 {
 	LVDeadItems *dead_items;
+	bool		aggressive;
 	BlockNumber nblocks,
 				blkno,
 				next_unskippable_block,
@@ -910,26 +916,14 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive)
 
 	pg_rusage_init(&ru0);
 
-	if (aggressive)
-		ereport(elevel,
-				(errmsg("aggressively vacuuming \"%s.%s\"",
-						vacrel->relnamespace,
-						vacrel->relname)));
-	else
-		ereport(elevel,
-				(errmsg("vacuuming \"%s.%s\"",
-						vacrel->relnamespace,
-						vacrel->relname)));
-
+	aggressive = vacrel->aggressive;
 	nblocks = RelationGetNumberOfBlocks(vacrel->rel);
 	next_unskippable_block = 0;
 	next_failsafe_block = 0;
 	next_fsm_block_to_vacuum = 0;
 	vacrel->rel_pages = nblocks;
 	vacrel->scanned_pages = 0;
-	vacrel->pinskipped_pages = 0;
 	vacrel->frozenskipped_pages = 0;
-	vacrel->tupcount_pages = 0;
 	vacrel->pages_removed = 0;
 	vacrel->lpdead_item_pages = 0;
 	vacrel->nonempty_pages = 0;
@@ -947,6 +941,17 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive)
 	vacrel->indstats = (IndexBulkDeleteResult **)
 		palloc0(vacrel->nindexes * sizeof(IndexBulkDeleteResult *));
 
+	if (aggressive)
+		ereport(elevel,
+				(errmsg("aggressively vacuuming \"%s.%s\"",
+						vacrel->relnamespace,
+						vacrel->relname)));
+	else
+		ereport(elevel,
+				(errmsg("vacuuming \"%s.%s\"",
+						vacrel->relnamespace,
+						vacrel->relname)));
+
 	/*
 	 * Do failsafe precheck before calling dead_items_alloc.  This ensures
 	 * that parallel VACUUM won't be attempted when relfrozenxid is already
@@ -1002,15 +1007,6 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive)
 	 * just added to that page are necessarily newer than the GlobalXmin we
 	 * computed, so they'll have no effect on the value to which we can safely
 	 * set relfrozenxid.  A similar argument applies for MXIDs and relminmxid.
-	 *
-	 * We will scan the table's last page, at least to the extent of
-	 * determining whether it has tuples or not, even if it should be skipped
-	 * according to the above rules; except when we've already determined that
-	 * it's not worth trying to truncate the table.  This avoids having
-	 * lazy_truncate_heap() take access-exclusive lock on the table to attempt
-	 * a truncation that just fails immediately because there are tuples in
-	 * the last page.  This is worth avoiding mainly because such a lock must
-	 * be replayed on any hot standby, where it can be disruptive.
 	 */
 	if ((params->options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
 	{
@@ -1048,18 +1044,14 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive)
 		bool		all_visible_according_to_vm = false;
 		LVPagePruneState prunestate;
 
-		/*
-		 * Consider need to skip blocks.  See note above about forcing
-		 * scanning of last page.
-		 */
-#define FORCE_CHECK_PAGE() \
-		(blkno == nblocks - 1 && should_attempt_truncation(vacrel))
-
 		pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
 
 		update_vacuum_error_info(vacrel, NULL, VACUUM_ERRCB_PHASE_SCAN_HEAP,
 								 blkno, InvalidOffsetNumber);
 
+		/*
+		 * Consider need to skip blocks
+		 */
 		if (blkno == next_unskippable_block)
 		{
 			/* Time to advance next_unskippable_block */
@@ -1108,13 +1100,19 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive)
 		else
 		{
 			/*
-			 * The current block is potentially skippable; if we've seen a
-			 * long enough run of skippable blocks to justify skipping it, and
-			 * we're not forced to check it, then go ahead and skip.
-			 * Otherwise, the page must be at least all-visible if not
-			 * all-frozen, so we can set all_visible_according_to_vm = true.
+			 * The current block can be skipped if we've seen a long enough
+			 * run of skippable blocks to justify skipping it.
+			 *
+			 * There is an exception: we will scan the table's last page to
+			 * determine whether it has tuples or not, even if it would
+			 * otherwise be skipped (unless it's clearly not worth trying to
+			 * truncate the table).  This avoids having lazy_truncate_heap()
+			 * take access-exclusive lock on the table to attempt a truncation
+			 * that just fails immediately because there are tuples in the
+			 * last page.
 			 */
-			if (skipping_blocks && !FORCE_CHECK_PAGE())
+			if (skipping_blocks &&
+				!(blkno == nblocks - 1 && should_attempt_truncation(vacrel)))
 			{
 				/*
 				 * Tricky, tricky.  If this is in aggressive vacuum, the page
@@ -1124,12 +1122,22 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive)
 				 * case, or else we'll think we can't update relfrozenxid and
 				 * relminmxid.  If it's not an aggressive vacuum, we don't
 				 * know whether it was all-frozen, so we have to recheck; but
-				 * in this case an approximate answer is OK.
+				 * in this case an approximate answer is still correct.
+				 *
+				 * (We really don't want to miss out on the opportunity to
+				 * advance relfrozenxid in a non-aggressive vacuum, but this
+				 * edge case shouldn't make that appreciably less likely in
+				 * practice.)
 				 */
 				if (aggressive || VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
 					vacrel->frozenskipped_pages++;
 				continue;
 			}
+
+			/*
+			 * Otherwise, the page must be at least all-visible if not
+			 * all-frozen, so we can set all_visible_according_to_vm = true
+			 */
 			all_visible_according_to_vm = true;
 		}
 
@@ -1154,7 +1162,8 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive)
 		 * Consider if we definitely have enough space to process TIDs on page
 		 * already.  If we are close to overrunning the available space for
 		 * dead_items TIDs, pause and do a cycle of vacuuming before we tackle
-		 * this page.
+		 * this page.  Must do this before calling lazy_scan_prune (or before
+		 * calling lazy_scan_noprune).
 		 */
 		Assert(dead_items->max_items >= MaxHeapTuplesPerPage);
 		if (dead_items->max_items - dead_items->num_items < MaxHeapTuplesPerPage)
@@ -1189,7 +1198,8 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive)
 		}
 
 		/*
-		 * Set up visibility map page as needed.
+		 * Set up visibility map page as needed, and pin the heap page that
+		 * we're going to scan.
 		 *
 		 * Pin the visibility map page in case we need to mark the page
 		 * all-visible.  In most cases this will be very cheap, because we'll
@@ -1202,156 +1212,52 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive)
 
 		buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno,
 								 RBM_NORMAL, vacrel->bstrategy);
+		page = BufferGetPage(buf);
+		vacrel->scanned_pages++;
 
 		/*
-		 * We need buffer cleanup lock so that we can prune HOT chains and
-		 * defragment the page.
+		 * We need a buffer cleanup lock to prune HOT chains and defragment
+		 * the page in lazy_scan_prune.  But when it's not possible to acquire
+		 * a cleanup lock right away, we may be able to settle for reduced
+		 * processing in lazy_scan_noprune.
 		 */
 		if (!ConditionalLockBufferForCleanup(buf))
 		{
 			bool		hastup;
 
-			/*
-			 * If we're not performing an aggressive scan to guard against XID
-			 * wraparound, and we don't want to forcibly check the page, then
-			 * it's OK to skip vacuuming pages we get a lock conflict on. They
-			 * will be dealt with in some future vacuum.
-			 */
-			if (!aggressive && !FORCE_CHECK_PAGE())
+			LockBuffer(buf, BUFFER_LOCK_SHARE);
+
+			/* Check for new or empty pages before lazy_scan_noprune call */
+			if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, true,
+									   vmbuffer))
 			{
-				ReleaseBuffer(buf);
-				vacrel->pinskipped_pages++;
+				/* Lock and pin released for us */
+				continue;
+			}
+
+			if (lazy_scan_noprune(vacrel, buf, blkno, page, &hastup))
+			{
+				/* No need to wait for cleanup lock for this page */
+				UnlockReleaseBuffer(buf);
+				if (hastup)
+					vacrel->nonempty_pages = blkno + 1;
 				continue;
 			}
 
 			/*
-			 * Read the page with share lock to see if any xids on it need to
-			 * be frozen.  If not we just skip the page, after updating our
-			 * scan statistics.  If there are some, we wait for cleanup lock.
-			 *
-			 * We could defer the lock request further by remembering the page
-			 * and coming back to it later, or we could even register
-			 * ourselves for multiple buffers and then service whichever one
-			 * is received first.  For now, this seems good enough.
-			 *
-			 * If we get here with aggressive false, then we're just forcibly
-			 * checking the page, and so we don't want to insist on getting
-			 * the lock; we only need to know if the page contains tuples, so
-			 * that we can update nonempty_pages correctly.  It's convenient
-			 * to use lazy_check_needs_freeze() for both situations, though.
+			 * lazy_scan_noprune could not do all required processing without
+			 * a cleanup lock.  Wait for a cleanup lock, and then proceed to
+			 * lazy_scan_prune to perform ordinary pruning and freezing.
 			 */
-			LockBuffer(buf, BUFFER_LOCK_SHARE);
-			if (!lazy_check_needs_freeze(buf, &hastup, vacrel))
-			{
-				UnlockReleaseBuffer(buf);
-				vacrel->scanned_pages++;
-				vacrel->pinskipped_pages++;
-				if (hastup)
-					vacrel->nonempty_pages = blkno + 1;
-				continue;
-			}
-			if (!aggressive)
-			{
-				/*
-				 * Here, we must not advance scanned_pages; that would amount
-				 * to claiming that the page contains no freezable tuples.
-				 */
-				UnlockReleaseBuffer(buf);
-				vacrel->pinskipped_pages++;
-				if (hastup)
-					vacrel->nonempty_pages = blkno + 1;
-				continue;
-			}
+			Assert(vacrel->aggressive);
 			LockBuffer(buf, BUFFER_LOCK_UNLOCK);
 			LockBufferForCleanup(buf);
-			/* drop through to normal processing */
 		}
 
-		/*
-		 * By here we definitely have enough dead_items space for whatever
-		 * LP_DEAD tids are on this page, we have the visibility map page set
-		 * up in case we need to set this page's all_visible/all_frozen bit,
-		 * and we have a cleanup lock.  Any tuples on this page are now sure
-		 * to be "counted" by this VACUUM.
-		 *
-		 * One last piece of preamble needs to take place before we can prune:
-		 * we need to consider new and empty pages.
-		 */
-		vacrel->scanned_pages++;
-		vacrel->tupcount_pages++;
-
-		page = BufferGetPage(buf);
-
-		if (PageIsNew(page))
+		/* Check for new or empty pages before lazy_scan_prune call */
+		if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, false, vmbuffer))
 		{
-			/*
-			 * All-zeroes pages can be left over if either a backend extends
-			 * the relation by a single page, but crashes before the newly
-			 * initialized page has been written out, or when bulk-extending
-			 * the relation (which creates a number of empty pages at the tail
-			 * end of the relation, but enters them into the FSM).
-			 *
-			 * Note we do not enter the page into the visibilitymap. That has
-			 * the downside that we repeatedly visit this page in subsequent
-			 * vacuums, but otherwise we'll never not discover the space on a
-			 * promoted standby. The harm of repeated checking ought to
-			 * normally not be too bad - the space usually should be used at
-			 * some point, otherwise there wouldn't be any regular vacuums.
-			 *
-			 * Make sure these pages are in the FSM, to ensure they can be
-			 * reused. Do that by testing if there's any space recorded for
-			 * the page. If not, enter it. We do so after releasing the lock
-			 * on the heap page, the FSM is approximate, after all.
-			 */
-			UnlockReleaseBuffer(buf);
-
-			if (GetRecordedFreeSpace(vacrel->rel, blkno) == 0)
-			{
-				Size		freespace = BLCKSZ - SizeOfPageHeaderData;
-
-				RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
-			}
-			continue;
-		}
-
-		if (PageIsEmpty(page))
-		{
-			Size		freespace = PageGetHeapFreeSpace(page);
-
-			/*
-			 * Empty pages are always all-visible and all-frozen (note that
-			 * the same is currently not true for new pages, see above).
-			 */
-			if (!PageIsAllVisible(page))
-			{
-				START_CRIT_SECTION();
-
-				/* mark buffer dirty before writing a WAL record */
-				MarkBufferDirty(buf);
-
-				/*
-				 * It's possible that another backend has extended the heap,
-				 * initialized the page, and then failed to WAL-log the page
-				 * due to an ERROR.  Since heap extension is not WAL-logged,
-				 * recovery might try to replay our record setting the page
-				 * all-visible and find that the page isn't initialized, which
-				 * will cause a PANIC.  To prevent that, check whether the
-				 * page has been previously WAL-logged, and if not, do that
-				 * now.
-				 */
-				if (RelationNeedsWAL(vacrel->rel) &&
-					PageGetLSN(page) == InvalidXLogRecPtr)
-					log_newpage_buffer(buf, true);
-
-				PageSetAllVisible(page);
-				visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
-								  vmbuffer, InvalidTransactionId,
-								  VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
-				END_CRIT_SECTION();
-			}
-
-			UnlockReleaseBuffer(buf);
-			RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
+			/* Lock and pin released for us */
 			continue;
 		}
 
@@ -1564,7 +1470,7 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive)
 
 	/* now we can compute the new value for pg_class.reltuples */
 	vacrel->new_live_tuples = vac_estimate_reltuples(vacrel->rel, nblocks,
-													 vacrel->tupcount_pages,
+													 vacrel->scanned_pages,
 													 vacrel->live_tuples);
 
 	/*
@@ -1637,14 +1543,10 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive)
 	appendStringInfo(&buf,
 					 _("%lld dead row versions cannot be removed yet, oldest xmin: %u\n"),
 					 (long long) vacrel->new_dead_tuples, vacrel->OldestXmin);
-	appendStringInfo(&buf, ngettext("Skipped %u page due to buffer pins, ",
-									"Skipped %u pages due to buffer pins, ",
-									vacrel->pinskipped_pages),
-					 vacrel->pinskipped_pages);
-	appendStringInfo(&buf, ngettext("%u frozen page.\n",
-									"%u frozen pages.\n",
-									vacrel->frozenskipped_pages),
-					 vacrel->frozenskipped_pages);
+	appendStringInfo(&buf, ngettext("%u page skipped using visibility map.\n",
+									"%u pages skipped using visibility map.\n",
+									vacrel->rel_pages - vacrel->scanned_pages),
+					 vacrel->rel_pages - vacrel->scanned_pages);
 	appendStringInfo(&buf, _("%s."), pg_rusage_show(&ru0));
 
 	ereport(elevel,
@@ -1658,6 +1560,132 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive)
 	pfree(buf.data);
 }
 
+/*
+ *	lazy_scan_new_or_empty() -- lazy_scan_heap() new/empty page handling.
+ *
+ * Must call here to handle both new and empty pages before calling
+ * lazy_scan_prune or lazy_scan_noprune, since they're not prepared to deal
+ * with new or empty pages.
+ *
+ * It's necessary to consider new pages as a special case, since the rules for
+ * maintaining the visibility map and FSM with empty pages are a little
+ * different (though new pages can be truncated based on the usual rules).
+ *
+ * Empty pages are not really a special case -- they're just heap pages that
+ * have no allocated tuples (including even LP_UNUSED items).  You might
+ * wonder why we need to handle them here all the same.  It's only necessary
+ * because of a rare corner-case involving a hard crash during heap relation
+ * extension.  If we ever make relation-extension crash safe, then it should
+ * no longer be necessary to deal with empty pages here (or new pages, for
+ * that matter).
+ *
+ * Caller can either hold a buffer cleanup lock on the buffer, or a simple
+ * shared lock.
+ */
+static bool
+lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno,
+					   Page page, bool sharelock, Buffer vmbuffer)
+{
+	Size		freespace;
+
+	if (PageIsNew(page))
+	{
+		/*
+		 * All-zeroes pages can be left over if either a backend extends the
+		 * relation by a single page, but crashes before the newly initialized
+		 * page has been written out, or when bulk-extending the relation
+		 * (which creates a number of empty pages at the tail end of the
+		 * relation), and then enters them into the FSM.
+		 *
+		 * Note we do not enter the page into the visibilitymap. That has the
+		 * downside that we repeatedly visit this page in subsequent vacuums,
+		 * but otherwise we'll never not discover the space on a promoted
+		 * standby. The harm of repeated checking ought to normally not be too
+		 * bad - the space usually should be used at some point, otherwise
+		 * there wouldn't be any regular vacuums.
+		 *
+		 * Make sure these pages are in the FSM, to ensure they can be reused.
+		 * Do that by testing if there's any space recorded for the page. If
+		 * not, enter it. We do so after releasing the lock on the heap page,
+		 * the FSM is approximate, after all.
+		 */
+		UnlockReleaseBuffer(buf);
+
+		if (GetRecordedFreeSpace(vacrel->rel, blkno) == 0)
+		{
+			freespace = BLCKSZ - SizeOfPageHeaderData;
+
+			RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
+		}
+
+		return true;
+	}
+
+	if (PageIsEmpty(page))
+	{
+		/*
+		 * It seems likely that caller will always be able to get a cleanup
+		 * lock on an empty page.  But don't take any chances -- escalate to
+		 * an exclusive lock (still don't need a cleanup lock, though).
+		 */
+		if (sharelock)
+		{
+			LockBuffer(buf, BUFFER_LOCK_UNLOCK);
+			LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
+
+			if (!PageIsEmpty(page))
+			{
+				/* page isn't new or empty -- keep lock and pin for now */
+				return false;
+			}
+		}
+		else
+		{
+			/* Already have a full cleanup lock (which is more than enough) */
+		}
+
+		freespace = PageGetHeapFreeSpace(page);
+
+		/*
+		 * Unlike new pages, empty pages are always set all-visible and
+		 * all-frozen.
+		 */
+		if (!PageIsAllVisible(page))
+		{
+			START_CRIT_SECTION();
+
+			/* mark buffer dirty before writing a WAL record */
+			MarkBufferDirty(buf);
+
+			/*
+			 * It's possible that another backend has extended the heap,
+			 * initialized the page, and then failed to WAL-log the page due
+			 * to an ERROR.  Since heap extension is not WAL-logged, recovery
+			 * might try to replay our record setting the page all-visible and
+			 * find that the page isn't initialized, which will cause a PANIC.
+			 * To prevent that, check whether the page has been previously
+			 * WAL-logged, and if not, do that now.
+			 */
+			if (RelationNeedsWAL(vacrel->rel) &&
+				PageGetLSN(page) == InvalidXLogRecPtr)
+				log_newpage_buffer(buf, true);
+
+			PageSetAllVisible(page);
+			visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
+							  vmbuffer, InvalidTransactionId,
+							  VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
+			END_CRIT_SECTION();
+		}
+
+		UnlockReleaseBuffer(buf);
+		RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
+		return true;
+	}
+
+	/* page isn't new or empty -- keep lock and pin */
+	return false;
+}
+
 /*
  *	lazy_scan_prune() -- lazy_scan_heap() pruning and freezing.
  *
@@ -1764,10 +1792,9 @@ retry:
 		 * LP_DEAD items are processed outside of the loop.
 		 *
 		 * Note that we deliberately don't set hastup=true in the case of an
-		 * LP_DEAD item here, which is not how lazy_check_needs_freeze() or
-		 * count_nondeletable_pages() do it -- they only consider pages empty
-		 * when they only have LP_UNUSED items, which is important for
-		 * correctness.
+		 * LP_DEAD item here, which is not how count_nondeletable_pages() does
+		 * it -- it only considers pages empty/truncatable when they have no
+		 * items at all (except LP_UNUSED items).
 		 *
 		 * Our assumption is that any LP_DEAD items we encounter here will
 		 * become LP_UNUSED inside lazy_vacuum_heap_page() before we actually
@@ -2054,6 +2081,236 @@ retry:
 	vacrel->live_tuples += live_tuples;
 }
 
+/*
+ *	lazy_scan_noprune() -- lazy_scan_prune() variant without pruning
+ *
+ * Caller need only hold a pin and share lock on the buffer, unlike
+ * lazy_scan_prune, which requires a full cleanup lock.
+ *
+ * While pruning isn't performed here, we can at least collect existing
+ * LP_DEAD items into the dead_items array for removal from indexes.  It's
+ * quite possible that earlier opportunistic pruning left LP_DEAD items
+ * behind, and we shouldn't miss out on an opportunity to make them reusable
+ * (VACUUM alone is capable of cleaning up line pointer bloat like this).
+ * Note that we'll only require an exclusive lock (not a cleanup lock) later
+ * on when we set these LP_DEAD items to LP_UNUSED in lazy_vacuum_heap_page.
+ *
+ * Freezing isn't performed here either.  For aggressive VACUUM callers, we
+ * may return false to indicate that a full cleanup lock is required.  This is
+ * necessary because pruning requires a cleanup lock, and because VACUUM
+ * cannot freeze a page's tuples until after pruning takes place (freezing
+ * tuples effectively requires a cleanup lock, though we don't need a cleanup
+ * lock in lazy_vacuum_heap_page or in lazy_scan_new_or_empty to set a heap
+ * page all-frozen in the visibility map).
+ *
+ * We'll always return true for a non-aggressive VACUUM, even when we know
+ * that this will cause them to miss out on freezing tuples from before
+ * vacrel->FreezeLimit cutoff -- they should never have to wait for a cleanup
+ * lock.  This does mean that they definitely won't be able to advance
+ * relfrozenxid opportunistically (same applies to vacrel->MultiXactCutoff and
+ * relminmxid).
+ *
+ * See lazy_scan_prune for an explanation of hastup return flag.
+ */
+static bool
+lazy_scan_noprune(LVRelState *vacrel,
+				  Buffer buf,
+				  BlockNumber blkno,
+				  Page page,
+				  bool *hastup)
+{
+	OffsetNumber offnum,
+				maxoff;
+	bool		has_tuple_needs_freeze = false;
+	int			lpdead_items,
+				num_tuples,
+				live_tuples,
+				new_dead_tuples;
+	HeapTupleHeader tupleheader;
+	OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
+
+	*hastup = false;			/* for now */
+
+	lpdead_items = 0;
+	num_tuples = 0;
+	live_tuples = 0;
+	new_dead_tuples = 0;
+
+	maxoff = PageGetMaxOffsetNumber(page);
+	for (offnum = FirstOffsetNumber;
+		 offnum <= maxoff;
+		 offnum = OffsetNumberNext(offnum))
+	{
+		ItemId		itemid;
+		HeapTupleData tuple;
+
+		vacrel->offnum = offnum;
+		itemid = PageGetItemId(page, offnum);
+
+		if (!ItemIdIsUsed(itemid))
+			continue;
+
+		if (ItemIdIsRedirected(itemid))
+		{
+			*hastup = true;		/* page won't be truncatable */
+			continue;
+		}
+
+		if (ItemIdIsDead(itemid))
+		{
+			/*
+			 * Deliberately don't set hastup=true here.  See same point in
+			 * lazy_scan_prune for an explanation.
+			 */
+			deadoffsets[lpdead_items++] = offnum;
+			continue;
+		}
+
+		tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
+		if (!has_tuple_needs_freeze &&
+			heap_tuple_needs_freeze(tupleheader, vacrel->FreezeLimit,
+									vacrel->MultiXactCutoff, buf))
+		{
+			if (vacrel->aggressive)
+			{
+				/* Going to have to get cleanup lock for lazy_scan_prune */
+				vacrel->offnum = InvalidOffsetNumber;
+				return false;
+			}
+
+			has_tuple_needs_freeze = true;
+		}
+
+		num_tuples++;
+		ItemPointerSet(&(tuple.t_self), blkno, offnum);
+		tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
+		tuple.t_len = ItemIdGetLength(itemid);
+		tuple.t_tableOid = RelationGetRelid(vacrel->rel);
+
+		switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->OldestXmin, buf))
+		{
+			case HEAPTUPLE_DELETE_IN_PROGRESS:
+			case HEAPTUPLE_LIVE:
+
+				/*
+				 * Count both cases as live, just like lazy_scan_prune
+				 */
+				live_tuples++;
+
+				break;
+			case HEAPTUPLE_DEAD:
+			case HEAPTUPLE_RECENTLY_DEAD:
+
+				/*
+				 * We count DEAD and RECENTLY_DEAD tuples in new_dead_tuples.
+				 *
+				 * lazy_scan_prune only does this for RECENTLY_DEAD tuples,
+				 * and never has to deal with DEAD tuples directly (they
+				 * reliably become LP_DEAD items through pruning).  Our
+				 * approach to DEAD tuples is a bit arbitrary, but it seems
+				 * better than totally ignoring them.
+				 */
+				new_dead_tuples++;
+				break;
+			case HEAPTUPLE_INSERT_IN_PROGRESS:
+
+				/*
+				 * Do not count these rows as live, just like lazy_scan_prune
+				 */
+				break;
+			default:
+				elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
+				break;
+		}
+
+	}
+
+	vacrel->offnum = InvalidOffsetNumber;
+
+	if (has_tuple_needs_freeze)
+	{
+		/*
+		 * Current non-aggressive VACUUM operation definitely won't be able to
+		 * advance relfrozenxid or relminmxid
+		 */
+		Assert(!vacrel->aggressive);
+		vacrel->freeze_cutoffs_valid = false;
+	}
+
+	/*
+	 * Now save details of the LP_DEAD items from the page in the dead_items
+	 * array iff VACUUM uses two-pass strategy case
+	 */
+	if (vacrel->nindexes == 0)
+	{
+		/*
+		 * We are not prepared to handle the corner case where a single pass
+		 * strategy VACUUM cannot get a cleanup lock, and we then find LP_DEAD
+		 * items.  Repeat the same trick that we use for DEAD tuples: pretend
+		 * that they're RECENTLY_DEAD tuples.
+		 *
+		 * There is no fundamental reason why we must take the easy way out
+		 * like this.  Finding a way to make these LP_DEAD items get set to
+		 * LP_UNUSED would be less valuable and more complicated than it is in
+		 * the two-pass strategy case, since it would necessitate that we
+		 * repeat our lazy_scan_heap caller's page-at-a-time/one-pass-strategy
+		 * heap vacuuming steps.  Whereas in the two-pass strategy case,
+		 * lazy_vacuum_heap_rel will set the LP_DEAD items to LP_UNUSED. It
+		 * must always deal with things like remaining DEAD tuples with
+		 * storage, new LP_DEAD items that we didn't see earlier on, etc.
+		 */
+		if (lpdead_items > 0)
+			*hastup = true;		/* page won't be truncatable */
+		num_tuples += lpdead_items;
+		new_dead_tuples += lpdead_items;
+	}
+	else if (lpdead_items > 0)
+	{
+		LVDeadItems *dead_items = vacrel->dead_items;
+		ItemPointerData tmp;
+
+		vacrel->lpdead_item_pages++;
+
+		ItemPointerSetBlockNumber(&tmp, blkno);
+
+		for (int i = 0; i < lpdead_items; i++)
+		{
+			ItemPointerSetOffsetNumber(&tmp, deadoffsets[i]);
+			dead_items->items[dead_items->num_items++] = tmp;
+		}
+
+		Assert(dead_items->num_items <= dead_items->max_items);
+		pgstat_progress_update_param(PROGRESS_VACUUM_NUM_DEAD_TUPLES,
+									 dead_items->num_items);
+
+		vacrel->lpdead_items += lpdead_items;
+	}
+	else
+	{
+		/*
+		 * We opt to skip FSM processing for the page on the grounds that it
+		 * is probably being modified by concurrent DML operations.  Seems
+		 * best to assume that the space is best left behind for future
+		 * updates of existing tuples.  This matches what opportunistic
+		 * pruning does.
+		 *
+		 * It's theoretically possible for us to set VM bits here too, but we
+		 * don't try that either.  It is highly unlikely to be possible, much
+		 * less useful.
+		 */
+	}
+
+	/*
+	 * Finally, add relevant page-local counts to whole-VACUUM counts
+	 */
+	vacrel->new_dead_tuples += new_dead_tuples;
+	vacrel->num_tuples += num_tuples;
+	vacrel->live_tuples += live_tuples;
+
+	/* Caller won't need to call lazy_scan_prune with same page */
+	return true;
+}
+
 /*
  * Remove the collected garbage tuples from the table and its indexes.
  *
@@ -2500,67 +2757,6 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
 	return index;
 }
 
-/*
- *	lazy_check_needs_freeze() -- scan page to see if any tuples
- *					 need to be cleaned to avoid wraparound
- *
- * Returns true if the page needs to be vacuumed using cleanup lock.
- * Also returns a flag indicating whether page contains any tuples at all.
- */
-static bool
-lazy_check_needs_freeze(Buffer buf, bool *hastup, LVRelState *vacrel)
-{
-	Page		page = BufferGetPage(buf);
-	OffsetNumber offnum,
-				maxoff;
-	HeapTupleHeader tupleheader;
-
-	*hastup = false;
-
-	/*
-	 * New and empty pages, obviously, don't contain tuples. We could make
-	 * sure that the page is registered in the FSM, but it doesn't seem worth
-	 * waiting for a cleanup lock just for that, especially because it's
-	 * likely that the pin holder will do so.
-	 */
-	if (PageIsNew(page) || PageIsEmpty(page))
-		return false;
-
-	maxoff = PageGetMaxOffsetNumber(page);
-	for (offnum = FirstOffsetNumber;
-		 offnum <= maxoff;
-		 offnum = OffsetNumberNext(offnum))
-	{
-		ItemId		itemid;
-
-		/*
-		 * Set the offset number so that we can display it along with any
-		 * error that occurred while processing this tuple.
-		 */
-		vacrel->offnum = offnum;
-		itemid = PageGetItemId(page, offnum);
-
-		/* this should match hastup test in count_nondeletable_pages() */
-		if (ItemIdIsUsed(itemid))
-			*hastup = true;
-
-		/* dead and redirect items never need freezing */
-		if (!ItemIdIsNormal(itemid))
-			continue;
-
-		tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
-
-		if (heap_tuple_needs_freeze(tupleheader, vacrel->FreezeLimit,
-									vacrel->MultiXactCutoff, buf))
-			break;
-	}							/* scan along page */
-
-	/* Clear the offset information once we have processed the given page. */
-	vacrel->offnum = InvalidOffsetNumber;
-
-	return (offnum <= maxoff);
-}
-
 /*
  * Trigger the failsafe to avoid wraparound failure when vacrel table has a
  * relfrozenxid and/or relminmxid that is dangerously far in the past.
@@ -2655,7 +2851,7 @@ do_parallel_lazy_cleanup_all_indexes(LVRelState *vacrel)
 	 */
 	vacrel->lps->lvshared->reltuples = vacrel->new_rel_tuples;
 	vacrel->lps->lvshared->estimated_count =
-		(vacrel->tupcount_pages < vacrel->rel_pages);
+		(vacrel->scanned_pages < vacrel->rel_pages);
 
 	/* Determine the number of parallel workers to launch */
 	if (vacrel->lps->lvshared->first_time)
@@ -2972,7 +3168,7 @@ lazy_cleanup_all_indexes(LVRelState *vacrel)
 	{
 		double		reltuples = vacrel->new_rel_tuples;
 		bool		estimated_count =
-		vacrel->tupcount_pages < vacrel->rel_pages;
+		vacrel->scanned_pages < vacrel->rel_pages;
 
 		for (int idx = 0; idx < vacrel->nindexes; idx++)
 		{
@@ -3123,7 +3319,9 @@ lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat,
  * should_attempt_truncation - should we attempt to truncate the heap?
  *
  * Don't even think about it unless we have a shot at releasing a goodly
- * number of pages.  Otherwise, the time taken isn't worth it.
+ * number of pages.  Otherwise, the time taken isn't worth it, mainly because
+ * an AccessExclusive lock must be replayed on any hot standby, where it can
+ * be particularly disruptive.
  *
  * Also don't attempt it if wraparound failsafe is in effect.  It's hard to
  * predict how long lazy_truncate_heap will take.  Don't take any chances.
diff --git a/src/test/isolation/expected/vacuum-reltuples.out b/src/test/isolation/expected/vacuum-reltuples.out
index cdbe7f3a6..ce55376e7 100644
--- a/src/test/isolation/expected/vacuum-reltuples.out
+++ b/src/test/isolation/expected/vacuum-reltuples.out
@@ -45,7 +45,7 @@ step stats:
 
 relpages|reltuples
 --------+---------
-       1|       20
+       1|       21
 (1 row)
 
 
diff --git a/src/test/isolation/specs/vacuum-reltuples.spec b/src/test/isolation/specs/vacuum-reltuples.spec
index ae2f79b8f..a2a461f2f 100644
--- a/src/test/isolation/specs/vacuum-reltuples.spec
+++ b/src/test/isolation/specs/vacuum-reltuples.spec
@@ -2,9 +2,10 @@
 # to page pins. We absolutely need to avoid setting reltuples=0 in
 # such cases, since that interferes badly with planning.
 #
-# Expected result in second permutation is 20 tuples rather than 21 as
-# for the others, because vacuum should leave the previous result
-# (from before the insert) in place.
+# Expected result for all three permutation is 21 tuples, including
+# the second permutation.  VACUUM is able to count the concurrently
+# inserted tuple in its final reltuples, even when a cleanup lock
+# cannot be acquired on the affected heap page.
 
 setup {
     create table smalltbl
-- 
2.30.2

