On Mon, Dec 16, 2019 at 11:49:56AM +0900, Michael Paquier wrote:
> On Sun, Dec 15, 2019 at 10:27:12AM -0600, Justin Pryzby wrote:
> > I named it so because it calls both lazy_vacuum_index
> > ("PROGRESS_VACUUM_PHASE_VACUUM_INDEX") and
> > lazy_vacuum_heap("PROGRESS_VACUUM_PHASE_VACUUM_HEAP")
> > 
> > I suppose you don't think the other way around is better?
> > lazy_vacuum_index_heap
> 
> Dunno.  Let's see if others have other thoughts on the matter.  FWIW,
> I have a long history at naming things in a way others don't like :)

I renamed.

And deduplicated two more hunks into a 2nd function.

(I'm also including the changes I mentioned here ... in case anyone cares to
comment or review).
https://www.postgresql.org/message-id/20191220171132.GB30414%40telsasoft.com
>From 5317d9f3cee163762563f2255f1dd26800ea858b Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Wed, 27 Nov 2019 20:07:10 -0600
Subject: [PATCH v6 1/6] Rename buf to avoid shadowing buf of another type

---
 src/backend/access/heap/vacuumlazy.c | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index ab09d84..3f52278 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -517,7 +517,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	BlockNumber next_unskippable_block;
 	bool		skipping_blocks;
 	xl_heap_freeze_tuple *frozen;
-	StringInfoData buf;
+	StringInfoData sbuf;
 	const int	initprog_index[] = {
 		PROGRESS_VACUUM_PHASE,
 		PROGRESS_VACUUM_TOTAL_HEAP_BLKS,
@@ -1479,33 +1479,33 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	 * This is pretty messy, but we split it up so that we can skip emitting
 	 * individual parts of the message when not applicable.
 	 */
-	initStringInfo(&buf);
-	appendStringInfo(&buf,
+	initStringInfo(&sbuf);
+	appendStringInfo(&sbuf,
 					 _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"),
 					 nkeep, OldestXmin);
-	appendStringInfo(&buf, _("There were %.0f unused item identifiers.\n"),
+	appendStringInfo(&sbuf, _("There were %.0f unused item identifiers.\n"),
 					 nunused);
-	appendStringInfo(&buf, ngettext("Skipped %u page due to buffer pins, ",
+	appendStringInfo(&sbuf, ngettext("Skipped %u page due to buffer pins, ",
 									"Skipped %u pages due to buffer pins, ",
 									vacrelstats->pinskipped_pages),
 					 vacrelstats->pinskipped_pages);
-	appendStringInfo(&buf, ngettext("%u frozen page.\n",
+	appendStringInfo(&sbuf, ngettext("%u frozen page.\n",
 									"%u frozen pages.\n",
 									vacrelstats->frozenskipped_pages),
 					 vacrelstats->frozenskipped_pages);
-	appendStringInfo(&buf, ngettext("%u page is entirely empty.\n",
+	appendStringInfo(&sbuf, ngettext("%u page is entirely empty.\n",
 									"%u pages are entirely empty.\n",
 									empty_pages),
 					 empty_pages);
-	appendStringInfo(&buf, _("%s."), pg_rusage_show(&ru0));
+	appendStringInfo(&sbuf, _("%s."), pg_rusage_show(&ru0));
 
 	ereport(elevel,
 			(errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
 					RelationGetRelationName(onerel),
 					tups_vacuumed, num_tuples,
 					vacrelstats->scanned_pages, nblocks),
-			 errdetail_internal("%s", buf.data)));
-	pfree(buf.data);
+			 errdetail_internal("%s", sbuf.data)));
+	pfree(sbuf.data);
 }
 
 
-- 
2.7.4

>From 8fdee2073e083388865f52874a672e8cc47f7c51 Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Thu, 12 Dec 2019 19:57:25 -0600
Subject: [PATCH v6 2/6] deduplication

---
 src/backend/access/heap/vacuumlazy.c | 103 +++++++++++++++--------------------
 1 file changed, 43 insertions(+), 60 deletions(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 3f52278..c6dc44b 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -153,6 +153,7 @@ static BufferAccessStrategy vac_strategy;
 static void lazy_scan_heap(Relation onerel, VacuumParams *params,
 						   LVRelStats *vacrelstats, Relation *Irel, int nindexes,
 						   bool aggressive);
+static void lazy_vacuum_index_and_heap(Relation onerel, LVRelStats *vacrelstats, Relation *Irel, int nindexes, IndexBulkDeleteResult **indstats);
 static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
 static bool lazy_check_needs_freeze(Buffer buf, bool *hastup);
 static void lazy_vacuum_index(Relation indrel,
@@ -740,12 +741,6 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 		if ((vacrelstats->max_dead_tuples - vacrelstats->num_dead_tuples) < MaxHeapTuplesPerPage &&
 			vacrelstats->num_dead_tuples > 0)
 		{
-			const int	hvp_index[] = {
-				PROGRESS_VACUUM_PHASE,
-				PROGRESS_VACUUM_NUM_INDEX_VACUUMS
-			};
-			int64		hvp_val[2];
-
 			/*
 			 * Before beginning index vacuuming, we release any pin we may
 			 * hold on the visibility map page.  This isn't necessary for
@@ -758,31 +753,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 				vmbuffer = InvalidBuffer;
 			}
 
-			/* Log cleanup info before we touch indexes */
-			vacuum_log_cleanup_info(onerel, vacrelstats);
-
-			/* Report that we are now vacuuming indexes */
-			pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
-										 PROGRESS_VACUUM_PHASE_VACUUM_INDEX);
-
-			/* Remove index entries */
-			for (i = 0; i < nindexes; i++)
-				lazy_vacuum_index(Irel[i],
-								  &indstats[i],
-								  vacrelstats);
-
-			/*
-			 * Report that we are now vacuuming the heap.  We also increase
-			 * the number of index scans here; note that by using
-			 * pgstat_progress_update_multi_param we can update both
-			 * parameters atomically.
-			 */
-			hvp_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_HEAP;
-			hvp_val[1] = vacrelstats->num_index_scans + 1;
-			pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
 
-			/* Remove tuples from heap */
-			lazy_vacuum_heap(onerel, vacrelstats);
+			lazy_vacuum_index_and_heap(onerel, vacrelstats, Irel, nindexes, indstats);
 
 			/*
 			 * Forget the now-vacuumed tuples, and press on, but be careful
@@ -790,7 +762,6 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 			 * valid.
 			 */
 			vacrelstats->num_dead_tuples = 0;
-			vacrelstats->num_index_scans++;
 
 			/*
 			 * Vacuum the Free Space Map to make newly-freed space visible on
@@ -1418,35 +1389,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 
 	/* If any tuples need to be deleted, perform final vacuum cycle */
 	/* XXX put a threshold on min number of tuples here? */
-	if (vacrelstats->num_dead_tuples > 0)
-	{
-		const int	hvp_index[] = {
-			PROGRESS_VACUUM_PHASE,
-			PROGRESS_VACUUM_NUM_INDEX_VACUUMS
-		};
-		int64		hvp_val[2];
-
-		/* Log cleanup info before we touch indexes */
-		vacuum_log_cleanup_info(onerel, vacrelstats);
-
-		/* Report that we are now vacuuming indexes */
-		pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
-									 PROGRESS_VACUUM_PHASE_VACUUM_INDEX);
-
-		/* Remove index entries */
-		for (i = 0; i < nindexes; i++)
-			lazy_vacuum_index(Irel[i],
-							  &indstats[i],
-							  vacrelstats);
-
-		/* Report that we are now vacuuming the heap */
-		hvp_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_HEAP;
-		hvp_val[1] = vacrelstats->num_index_scans + 1;
-		pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
-
-		/* Remove tuples from heap */
-		lazy_vacuum_heap(onerel, vacrelstats);
-		vacrelstats->num_index_scans++;
+	if (vacrelstats->num_dead_tuples > 0) {
+		lazy_vacuum_index_and_heap(onerel, vacrelstats, Irel, nindexes, indstats);
 	}
 
 	/*
@@ -1508,6 +1452,45 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	pfree(sbuf.data);
 }
 
+static void
+lazy_vacuum_index_and_heap(Relation onerel, LVRelStats *vacrelstats, Relation *Irel, int nindexes, IndexBulkDeleteResult **indstats)
+{
+	const int	hvp_index[] = {
+		PROGRESS_VACUUM_PHASE,
+		PROGRESS_VACUUM_NUM_INDEX_VACUUMS
+	};
+	int64		hvp_val[2];
+	int			i;
+
+	/* Log cleanup info before we touch indexes */
+	vacuum_log_cleanup_info(onerel, vacrelstats);
+
+	/* Report that we are now vacuuming indexes */
+	pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
+								 PROGRESS_VACUUM_PHASE_VACUUM_INDEX);
+
+	/* Remove index entries */
+	for (i = 0; i < nindexes; i++)
+		lazy_vacuum_index(Irel[i],
+						  &indstats[i],
+						  vacrelstats);
+
+	/*
+	 * Report that we are now vacuuming the heap.  We also increase
+	 * the number of index scans here; note that by using
+	 * pgstat_progress_update_multi_param we can update both
+	 * parameters atomically.
+	 */
+	hvp_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_HEAP;
+	hvp_val[1] = vacrelstats->num_index_scans + 1;
+	pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
+
+	/* Remove tuples from heap */
+	lazy_vacuum_heap(onerel, vacrelstats);
+
+	vacrelstats->num_index_scans++;
+}
+
 
 /*
  *	lazy_vacuum_heap() -- second pass over the heap
-- 
2.7.4

>From 8d9c5808a936032dfca90ed71ab34d8a391601f7 Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Mon, 23 Dec 2019 14:38:01 -0600
Subject: [PATCH v6 3/6] dedup2: skip_blocks

---
 src/backend/access/heap/vacuumlazy.c | 187 ++++++++++++++++-------------------
 1 file changed, 84 insertions(+), 103 deletions(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index c6dc44b..98a3598 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -478,6 +478,88 @@ vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
 }
 
 /*
+ * Return whether skipping blocks or not.
+ * Except when aggressive is set, we want to skip pages that are
+ * all-visible according to the visibility map, but only when we can skip
+ * at least SKIP_PAGES_THRESHOLD consecutive pages.  Since we're reading
+ * sequentially, the OS should be doing readahead for us, so there's no
+ * gain in skipping a page now and then; that's likely to disable
+ * readahead and so be counterproductive. Also, skipping even a single
+ * page means that we can't update relfrozenxid, so we only want to do it
+ * if we can skip a goodly number of pages.
+ *
+ * When aggressive is set, we can't skip pages just because they are
+ * all-visible, but we can still skip pages that are all-frozen, since
+ * such pages do not need freezing and do not affect the value that we can
+ * safely set for relfrozenxid or relminmxid.
+ *
+ * Before entering the main loop, establish the invariant that
+ * next_unskippable_block is the next block number >= blkno that we can't
+ * skip based on the visibility map, either all-visible for a regular scan
+ * or all-frozen for an aggressive scan.  We set it to nblocks if there's
+ * no such block.  We also set up the skipping_blocks flag correctly at
+ * this stage.
+ *
+ * Note: The value returned by visibilitymap_get_status could be slightly
+ * out-of-date, since we make this test before reading the corresponding
+ * heap page or locking the buffer.  This is OK.  If we mistakenly think
+ * that the page is all-visible or all-frozen when in fact the flag's just
+ * been cleared, we might fail to vacuum the page.  It's easy to see that
+ * skipping a page when aggressive is not set is not a very big deal; we
+ * might leave some dead tuples lying around, but the next vacuum will
+ * find them.  But even when aggressive *is* set, it's still OK if we miss
+ * a page whose all-frozen marking has just been cleared.  Any new XIDs
+ * just added to that page are necessarily newer than the GlobalXmin we
+ * computed, so they'll have no effect on the value to which we can safely
+ * set relfrozenxid.  A similar argument applies for MXIDs and relminmxid.
+ *
+ * We will scan the table's last page, at least to the extent of
+ * determining whether it has tuples or not, even if it should be skipped
+ * according to the above rules; except when we've already determined that
+ * it's not worth trying to truncate the table.  This avoids having
+ * lazy_truncate_heap() take access-exclusive lock on the table to attempt
+ * a truncation that just fails immediately because there are tuples in
+ * the last page.  This is worth avoiding mainly because such a lock must
+ * be replayed on any hot standby, where it can be disruptive.
+ */
+static int
+skip_blocks(Relation onerel, VacuumParams *params, BlockNumber *next_unskippable_block, BlockNumber nblocks, Buffer *vmbuffer, bool aggressive)
+{
+	if ((params->options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
+	{
+		while (*next_unskippable_block < nblocks)
+		{
+			uint8		vmstatus;
+
+			vmstatus = visibilitymap_get_status(onerel, *next_unskippable_block,
+												vmbuffer);
+			if (aggressive)
+			{
+				if ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0)
+					break;
+			}
+			else
+			{
+				if ((vmstatus & VISIBILITYMAP_ALL_VISIBLE) == 0)
+					break;
+			}
+			vacuum_delay_point();
+			++*next_unskippable_block;
+		}
+	}
+
+
+	/*
+	 * We know we can't skip the current block.  But set up
+	 * skipping_blocks to do the right thing at the following blocks.
+	 */
+	if (*next_unskippable_block >= SKIP_PAGES_THRESHOLD)
+		return true;
+	else
+		return false;
+}
+
+/*
  *	lazy_scan_heap() -- scan an open heap relation
  *
  *		This routine prunes each page in the heap, which will among other
@@ -563,78 +645,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	initprog_val[2] = vacrelstats->max_dead_tuples;
 	pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
 
-	/*
-	 * Except when aggressive is set, we want to skip pages that are
-	 * all-visible according to the visibility map, but only when we can skip
-	 * at least SKIP_PAGES_THRESHOLD consecutive pages.  Since we're reading
-	 * sequentially, the OS should be doing readahead for us, so there's no
-	 * gain in skipping a page now and then; that's likely to disable
-	 * readahead and so be counterproductive. Also, skipping even a single
-	 * page means that we can't update relfrozenxid, so we only want to do it
-	 * if we can skip a goodly number of pages.
-	 *
-	 * When aggressive is set, we can't skip pages just because they are
-	 * all-visible, but we can still skip pages that are all-frozen, since
-	 * such pages do not need freezing and do not affect the value that we can
-	 * safely set for relfrozenxid or relminmxid.
-	 *
-	 * Before entering the main loop, establish the invariant that
-	 * next_unskippable_block is the next block number >= blkno that we can't
-	 * skip based on the visibility map, either all-visible for a regular scan
-	 * or all-frozen for an aggressive scan.  We set it to nblocks if there's
-	 * no such block.  We also set up the skipping_blocks flag correctly at
-	 * this stage.
-	 *
-	 * Note: The value returned by visibilitymap_get_status could be slightly
-	 * out-of-date, since we make this test before reading the corresponding
-	 * heap page or locking the buffer.  This is OK.  If we mistakenly think
-	 * that the page is all-visible or all-frozen when in fact the flag's just
-	 * been cleared, we might fail to vacuum the page.  It's easy to see that
-	 * skipping a page when aggressive is not set is not a very big deal; we
-	 * might leave some dead tuples lying around, but the next vacuum will
-	 * find them.  But even when aggressive *is* set, it's still OK if we miss
-	 * a page whose all-frozen marking has just been cleared.  Any new XIDs
-	 * just added to that page are necessarily newer than the GlobalXmin we
-	 * computed, so they'll have no effect on the value to which we can safely
-	 * set relfrozenxid.  A similar argument applies for MXIDs and relminmxid.
-	 *
-	 * We will scan the table's last page, at least to the extent of
-	 * determining whether it has tuples or not, even if it should be skipped
-	 * according to the above rules; except when we've already determined that
-	 * it's not worth trying to truncate the table.  This avoids having
-	 * lazy_truncate_heap() take access-exclusive lock on the table to attempt
-	 * a truncation that just fails immediately because there are tuples in
-	 * the last page.  This is worth avoiding mainly because such a lock must
-	 * be replayed on any hot standby, where it can be disruptive.
-	 */
 	next_unskippable_block = 0;
-	if ((params->options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
-	{
-		while (next_unskippable_block < nblocks)
-		{
-			uint8		vmstatus;
-
-			vmstatus = visibilitymap_get_status(onerel, next_unskippable_block,
-												&vmbuffer);
-			if (aggressive)
-			{
-				if ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0)
-					break;
-			}
-			else
-			{
-				if ((vmstatus & VISIBILITYMAP_ALL_VISIBLE) == 0)
-					break;
-			}
-			vacuum_delay_point();
-			next_unskippable_block++;
-		}
-	}
-
-	if (next_unskippable_block >= SKIP_PAGES_THRESHOLD)
-		skipping_blocks = true;
-	else
-		skipping_blocks = false;
+	skipping_blocks = skip_blocks(onerel, params, &next_unskippable_block, nblocks, &vmbuffer, aggressive);
 
 	for (blkno = 0; blkno < nblocks; blkno++)
 	{
@@ -663,38 +675,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 		{
 			/* Time to advance next_unskippable_block */
 			next_unskippable_block++;
-			if ((params->options & VACOPT_DISABLE_PAGE_SKIPPING) == 0)
-			{
-				while (next_unskippable_block < nblocks)
-				{
-					uint8		vmskipflags;
-
-					vmskipflags = visibilitymap_get_status(onerel,
-														   next_unskippable_block,
-														   &vmbuffer);
-					if (aggressive)
-					{
-						if ((vmskipflags & VISIBILITYMAP_ALL_FROZEN) == 0)
-							break;
-					}
-					else
-					{
-						if ((vmskipflags & VISIBILITYMAP_ALL_VISIBLE) == 0)
-							break;
-					}
-					vacuum_delay_point();
-					next_unskippable_block++;
-				}
-			}
-
-			/*
-			 * We know we can't skip the current block.  But set up
-			 * skipping_blocks to do the right thing at the following blocks.
-			 */
-			if (next_unskippable_block - blkno > SKIP_PAGES_THRESHOLD)
-				skipping_blocks = true;
-			else
-				skipping_blocks = false;
+			skipping_blocks = skip_blocks(onerel, params, &next_unskippable_block, nblocks, &vmbuffer, aggressive);
 
 			/*
 			 * Normally, the fact that we can't skip this block must mean that
-- 
2.7.4

>From 3266d63a6c90aa4df781019311edba92ba5e11d8 Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Thu, 12 Dec 2019 20:54:37 -0600
Subject: [PATCH v6 4/6] vacuum errcontext to show block being processed

As requested here.
https://www.postgresql.org/message-id/20190807235154.erbmr4o4bo6vgnjv%40alap3.anarazel.de
---
 src/backend/access/heap/vacuumlazy.c | 37 ++++++++++++++++++++++++++++++++++++
 1 file changed, 37 insertions(+)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 98a3598..7d6282e 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -138,6 +138,12 @@ typedef struct LVRelStats
 	bool		lock_waiter_detected;
 } LVRelStats;
 
+typedef struct
+{
+	char *relname;
+	char *relnamespace;
+	BlockNumber blkno;
+} vacuum_error_callback_arg;
 
 /* A few variables that don't seem worth passing around as parameters */
 static int	elevel = -1;
@@ -176,6 +182,7 @@ static bool lazy_tid_reaped(ItemPointer itemptr, void *state);
 static int	vac_cmp_itemptr(const void *left, const void *right);
 static bool heap_page_is_all_visible(Relation rel, Buffer buf,
 									 TransactionId *visibility_cutoff_xid, bool *all_frozen);
+static void vacuum_error_callback(void *arg);
 
 
 /*
@@ -607,6 +614,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 		PROGRESS_VACUUM_MAX_DEAD_TUPLES
 	};
 	int64		initprog_val[3];
+	ErrorContextCallback errcallback;
+	vacuum_error_callback_arg errcbarg;
 
 	pg_rusage_init(&ru0);
 
@@ -648,6 +657,15 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	next_unskippable_block = 0;
 	skipping_blocks = skip_blocks(onerel, params, &next_unskippable_block, nblocks, &vmbuffer, aggressive);
 
+	/* Setup error traceback support for ereport() */
+	errcbarg.relnamespace = get_namespace_name(RelationGetNamespace(onerel));
+	errcbarg.relname = relname;
+	errcbarg.blkno = InvalidBlockNumber; /* Not known yet */
+	errcallback.callback = vacuum_error_callback;
+	errcallback.arg = (void *) &errcbarg;
+	errcallback.previous = error_context_stack;
+	error_context_stack = &errcallback;
+
 	for (blkno = 0; blkno < nblocks; blkno++)
 	{
 		Buffer		buf;
@@ -669,6 +687,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 #define FORCE_CHECK_PAGE() \
 		(blkno == nblocks - 1 && should_attempt_truncation(params, vacrelstats))
 
+		errcbarg.blkno = blkno;
+
 		pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
 
 		if (blkno == next_unskippable_block)
@@ -735,7 +755,10 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 			}
 
 
+			/* Don't use the errcontext handler outside this function */
+			error_context_stack = errcallback.previous;
 			lazy_vacuum_index_and_heap(onerel, vacrelstats, Irel, nindexes, indstats);
+			error_context_stack = &errcallback;
 
 			/*
 			 * Forget the now-vacuumed tuples, and press on, but be careful
@@ -1340,6 +1363,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 			RecordPageWithFreeSpace(onerel, blkno, freespace);
 	}
 
+	/* Pop the error context stack */
+	error_context_stack = errcallback.previous;
+
 	/* report that everything is scanned and vacuumed */
 	pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
 
@@ -2316,3 +2342,14 @@ heap_page_is_all_visible(Relation rel, Buffer buf,
 
 	return all_visible;
 }
+
+/*
+ * Error context callback for errors occurring during vacuum.
+ */
+static void
+vacuum_error_callback(void *arg)
+{
+	vacuum_error_callback_arg *cbarg = arg;
+	errcontext("while scanning block %u of relation \"%s.%s\"",
+			cbarg->blkno, cbarg->relnamespace, cbarg->relname);
+}
-- 
2.7.4

>From ca8114babf42ec5e1fa1a6a11cde44a0bb97be66 Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Thu, 12 Dec 2019 20:34:03 -0600
Subject: [PATCH v6 5/6] add errcontext callback in lazy_vacuum_heap, too

---
 src/backend/access/heap/vacuumlazy.c | 14 ++++++++++++++
 1 file changed, 14 insertions(+)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 7d6282e..56dedf0 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -1398,6 +1398,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	/* XXX put a threshold on min number of tuples here? */
 	if (vacrelstats->num_dead_tuples > 0) {
 		lazy_vacuum_index_and_heap(onerel, vacrelstats, Irel, nindexes, indstats);
+		error_context_stack = errcallback.previous;
 	}
 
 	/*
@@ -1518,6 +1519,18 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
 	PGRUsage	ru0;
 	Buffer		vmbuffer = InvalidBuffer;
 
+	ErrorContextCallback errcallback;
+	vacuum_error_callback_arg errcbarg;
+
+	/* Setup error traceback support for ereport() */
+	errcbarg.relnamespace = get_namespace_name(RelationGetNamespace(onerel));
+	errcbarg.relname = RelationGetRelationName(onerel);
+	errcbarg.blkno = InvalidBlockNumber; /* Not known yet */
+	errcallback.callback = vacuum_error_callback;
+	errcallback.arg = (void *) &errcbarg;
+	errcallback.previous = error_context_stack;
+	error_context_stack = &errcallback;
+
 	pg_rusage_init(&ru0);
 	npages = 0;
 
@@ -1532,6 +1545,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
 		vacuum_delay_point();
 
 		tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
+		errcbarg.blkno = tblk;
 		buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
 								 vac_strategy);
 		if (!ConditionalLockBufferForCleanup(buf))
-- 
2.7.4

>From e01538ecc8406155b1ced15016905ed137ceb957 Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Sun, 15 Dec 2019 17:00:29 -0600
Subject: [PATCH v6 6/6] Print debug line before starting each vacuum step

---
 src/backend/access/heap/vacuumlazy.c | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 56dedf0..c020d08 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -1532,6 +1532,10 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
 	error_context_stack = &errcallback;
 
 	pg_rusage_init(&ru0);
+
+	ereport(elevel, (errmsg("\"%s\": vacuuming heap",
+					RelationGetRelationName(onerel))));
+
 	npages = 0;
 
 	tupindex = 0;
@@ -1750,6 +1754,9 @@ lazy_vacuum_index(Relation indrel,
 
 	pg_rusage_init(&ru0);
 
+	ereport(elevel, (errmsg("\"%s\": vacuuming index",
+					RelationGetRelationName(indrel))));
+
 	ivinfo.index = indrel;
 	ivinfo.analyze_only = false;
 	ivinfo.report_progress = false;
@@ -1797,6 +1804,9 @@ lazy_cleanup_index(Relation indrel,
 	ivinfo.num_heap_tuples = vacrelstats->new_rel_tuples;
 	ivinfo.strategy = vac_strategy;
 
+	ereport(elevel, (errmsg("cleaning up index \"%s\"",
+					RelationGetRelationName(indrel))));
+
 	stats = index_vacuum_cleanup(&ivinfo, stats);
 
 	if (!stats)
-- 
2.7.4

Reply via email to