On Thu, Feb 27, 2020 at 09:09:42PM -0300, Alvaro Herrera wrote:
> > +           case PROGRESS_VACUUM_PHASE_VACUUM_HEAP:
> > +                   if (BlockNumberIsValid(cbarg->blkno))
> > +                           errcontext("while vacuuming block %u of 
> > relation \"%s.%s\"",
> > +                                           cbarg->blkno, 
> > cbarg->relnamespace, cbarg->relname);
> > +                   break;
> 
> I think you should still call errcontext() when blkno is invalid.

In my experience while testing, the conditional avoids lots of CONTEXT noise
from interrupted autovacuum, at least.  I couldn't easily reproduce it with the
current patch, though, maybe due to less pushing and popping.

> Maybe it would make sense to make the LVRelStats struct members be char
> arrays rather than pointers.  Then you memcpy() or strlcpy() them
> instead of palloc/free.

I had done that in the v15 patch, to allow passing it to parallel workers.
But I don't think it's really needed.

On Tue, Mar 03, 2020 at 10:05:42PM +0900, Masahiko Sawada wrote:
> I was concerned about fsm vacuum; vacuum error context might show heap
> scan while actually doing fsm vacuum. But perhaps we can update
> callback args for that. That would be helpful for user to distinguish
> that the problem seems to be either in heap vacuum or in fsm vacuum.

Done in the attached.  But I think non-error reporting of additional progress
phases is out of scope for this patch.

> On Fri, 21 Feb 2020 at 02:02, Alvaro Herrera <alvhe...@2ndquadrant.com> wrote:
> > parallel children will need to "init" on their own, right?
> Right. In that case, I think parallel vacuum worker needs to init the
> callback args at parallel_vacuum_main(). Other functions that parallel
> vacuum worker could call are also called by the leader process.

In the previous patch, I added this to vacuum_one_index.  But I noticed that
sometimes reported multiple CONTEXT lines (while vacuuming..while scanning),
which isn't intended.  I was hacked around that by setting ->previous=NULL, but
your way in parallel main() seems better.

-- 
Justin
>From ca15c197328eb3feb851ec1c3b6ca7e0f1973e93 Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Thu, 12 Dec 2019 20:54:37 -0600
Subject: [PATCH v23 1/3] vacuum errcontext to show block being processed

Discussion:
https://www.postgresql.org/message-id/20191120210600.gc30...@telsasoft.com
---
 src/backend/access/heap/vacuumlazy.c | 178 +++++++++++++++++++++++----
 src/include/commands/progress.h      |   1 +
 2 files changed, 153 insertions(+), 26 deletions(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 03c43efc32..f15326a24c 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -270,6 +270,8 @@ typedef struct LVParallelState
 
 typedef struct LVRelStats
 {
+	char 		*relnamespace;
+	char		*relname;
 	/* useindex = true means two-pass strategy; false means one-pass */
 	bool		useindex;
 	/* Overall statistics about rel */
@@ -290,8 +292,12 @@ typedef struct LVRelStats
 	int			num_index_scans;
 	TransactionId latestRemovedXid;
 	bool		lock_waiter_detected;
-} LVRelStats;
 
+	/* Used for error callback: */
+	char 		*indname;
+	BlockNumber blkno;	/* used only for heap operations */
+	int			phase;	/* Reusing same constants as for progress reporting */
+} LVRelStats;
 
 /* A few variables that don't seem worth passing around as parameters */
 static int	elevel = -1;
@@ -314,10 +320,10 @@ static void lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
 									LVRelStats *vacrelstats, LVParallelState *lps,
 									int nindexes);
 static void lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
-							  LVDeadTuples *dead_tuples, double reltuples);
+							  LVDeadTuples *dead_tuples, double reltuples, LVRelStats *vacrelstats);
 static void lazy_cleanup_index(Relation indrel,
 							   IndexBulkDeleteResult **stats,
-							   double reltuples, bool estimated_count);
+							   double reltuples, bool estimated_count, LVRelStats *vacrelstats);
 static int	lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
 							 int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
 static bool should_attempt_truncation(VacuumParams *params,
@@ -337,13 +343,13 @@ static void lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult *
 										 int nindexes);
 static void parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats,
 								  LVShared *lvshared, LVDeadTuples *dead_tuples,
-								  int nindexes);
+								  int nindexes, LVRelStats *vacrelstats);
 static void vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats,
 								  LVRelStats *vacrelstats, LVParallelState *lps,
 								  int nindexes);
 static void vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
 							 LVShared *lvshared, LVSharedIndStats *shared_indstats,
-							 LVDeadTuples *dead_tuples);
+							 LVDeadTuples *dead_tuples, LVRelStats *vacrelstats);
 static void lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
 									 LVRelStats *vacrelstats, LVParallelState *lps,
 									 int nindexes);
@@ -361,6 +367,9 @@ static void end_parallel_vacuum(Relation *Irel, IndexBulkDeleteResult **stats,
 								LVParallelState *lps, int nindexes);
 static LVSharedIndStats *get_indstats(LVShared *lvshared, int n);
 static bool skip_parallel_vacuum_index(Relation indrel, LVShared *lvshared);
+static void vacuum_error_callback(void *arg);
+static void update_vacuum_error_cbarg(LVRelStats *errcbarg, int phase,
+		BlockNumber blkno, Relation rel);
 
 
 /*
@@ -460,6 +469,9 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 
 	vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats));
 
+	vacrelstats->relnamespace = get_namespace_name(RelationGetNamespace(onerel));
+	vacrelstats->relname = pstrdup(RelationGetRelationName(onerel));
+	vacrelstats->indname = NULL;
 	vacrelstats->old_rel_pages = onerel->rd_rel->relpages;
 	vacrelstats->old_live_tuples = onerel->rd_rel->reltuples;
 	vacrelstats->num_index_scans = 0;
@@ -699,7 +711,6 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	BlockNumber nblocks,
 				blkno;
 	HeapTupleData tuple;
-	char	   *relname;
 	TransactionId relfrozenxid = onerel->rd_rel->relfrozenxid;
 	TransactionId relminmxid = onerel->rd_rel->relminmxid;
 	BlockNumber empty_pages,
@@ -724,20 +735,20 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 		PROGRESS_VACUUM_MAX_DEAD_TUPLES
 	};
 	int64		initprog_val[3];
+	ErrorContextCallback errcallback;
 
 	pg_rusage_init(&ru0);
 
-	relname = RelationGetRelationName(onerel);
 	if (aggressive)
 		ereport(elevel,
 				(errmsg("aggressively vacuuming \"%s.%s\"",
-						get_namespace_name(RelationGetNamespace(onerel)),
-						relname)));
+						vacrelstats->relnamespace,
+						vacrelstats->relname)));
 	else
 		ereport(elevel,
 				(errmsg("vacuuming \"%s.%s\"",
-						get_namespace_name(RelationGetNamespace(onerel)),
-						relname)));
+						vacrelstats->relnamespace,
+						vacrelstats->relname)));
 
 	empty_pages = vacuumed_pages = 0;
 	next_fsm_block_to_vacuum = (BlockNumber) 0;
@@ -870,6 +881,14 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	else
 		skipping_blocks = false;
 
+	/* Setup error traceback support for ereport() */
+	update_vacuum_error_cbarg(vacrelstats, PROGRESS_VACUUM_PHASE_SCAN_HEAP,
+			InvalidBlockNumber, NULL);
+	errcallback.callback = vacuum_error_callback;
+	errcallback.arg = vacrelstats;
+	errcallback.previous = error_context_stack;
+	error_context_stack = &errcallback;
+
 	for (blkno = 0; blkno < nblocks; blkno++)
 	{
 		Buffer		buf;
@@ -891,6 +910,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 #define FORCE_CHECK_PAGE() \
 		(blkno == nblocks - 1 && should_attempt_truncation(params, vacrelstats))
 
+		vacrelstats->blkno = blkno;
+
 		pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
 
 		if (blkno == next_unskippable_block)
@@ -1005,12 +1026,18 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 			 * Vacuum the Free Space Map to make newly-freed space visible on
 			 * upper-level FSM pages.  Note we have not yet processed blkno.
 			 */
+			update_vacuum_error_cbarg(vacrelstats,
+					PROGRESS_VACUUM_PHASE_VACUUM_FSM, InvalidBlockNumber, NULL);
 			FreeSpaceMapVacuumRange(onerel, next_fsm_block_to_vacuum, blkno);
 			next_fsm_block_to_vacuum = blkno;
 
 			/* Report that we are once again scanning the heap */
 			pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
 										 PROGRESS_VACUUM_PHASE_SCAN_HEAP);
+
+			/* Set the error context while continuing heap scan */
+			update_vacuum_error_cbarg(vacrelstats,
+					PROGRESS_VACUUM_PHASE_SCAN_HEAP, blkno, NULL);
 		}
 
 		/*
@@ -1534,7 +1561,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 				 && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
 		{
 			elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
-				 relname, blkno);
+				 vacrelstats->relname, blkno);
 			visibilitymap_clear(onerel, blkno, vmbuffer,
 								VISIBILITYMAP_VALID_BITS);
 		}
@@ -1555,7 +1582,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 		else if (PageIsAllVisible(page) && has_dead_tuples)
 		{
 			elog(WARNING, "page containing dead tuples is marked as all-visible in relation \"%s\" page %u",
-				 relname, blkno);
+				 vacrelstats->relname, blkno);
 			PageClearAllVisible(page);
 			MarkBufferDirty(buf);
 			visibilitymap_clear(onerel, blkno, vmbuffer,
@@ -1651,6 +1678,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	if (vacrelstats->useindex)
 		lazy_cleanup_all_indexes(Irel, indstats, vacrelstats, lps, nindexes);
 
+	/* Pop the error context stack */
+	error_context_stack = errcallback.previous;
+
 	/*
 	 * End parallel mode before updating index statistics as we cannot write
 	 * during parallel mode.
@@ -1744,7 +1774,7 @@ lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
 
 		for (idx = 0; idx < nindexes; idx++)
 			lazy_vacuum_index(Irel[idx], &stats[idx], vacrelstats->dead_tuples,
-							  vacrelstats->old_live_tuples);
+							  vacrelstats->old_live_tuples, vacrelstats);
 	}
 
 	/* Increase and report the number of index scans */
@@ -1778,8 +1808,13 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
 								 PROGRESS_VACUUM_PHASE_VACUUM_HEAP);
 
 	pg_rusage_init(&ru0);
-	npages = 0;
 
+	/* Setup error traceback support for ereport() */
+	update_vacuum_error_cbarg(vacrelstats,
+			PROGRESS_VACUUM_PHASE_VACUUM_HEAP, InvalidBlockNumber,
+			NULL);
+
+	npages = 0;
 	tupindex = 0;
 	while (tupindex < vacrelstats->dead_tuples->num_tuples)
 	{
@@ -1791,6 +1826,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
 		vacuum_delay_point();
 
 		tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples->itemptrs[tupindex]);
+		vacrelstats->blkno = tblk;
 		buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
 								 vac_strategy);
 		if (!ConditionalLockBufferForCleanup(buf))
@@ -2083,7 +2119,7 @@ lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
 	 * indexes in the case where no workers are launched.
 	 */
 	parallel_vacuum_index(Irel, stats, lps->lvshared,
-						  vacrelstats->dead_tuples, nindexes);
+						  vacrelstats->dead_tuples, nindexes, vacrelstats);
 
 	/* Wait for all vacuum workers to finish */
 	WaitForParallelWorkersToFinish(lps->pcxt);
@@ -2106,7 +2142,7 @@ lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
 static void
 parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats,
 					  LVShared *lvshared, LVDeadTuples *dead_tuples,
-					  int nindexes)
+					  int nindexes, LVRelStats *vacrelstats)
 {
 	/*
 	 * Increment the active worker count if we are able to launch any worker.
@@ -2140,7 +2176,7 @@ parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats,
 
 		/* Do vacuum or cleanup of the index */
 		vacuum_one_index(Irel[idx], &(stats[idx]), lvshared, shared_indstats,
-						 dead_tuples);
+						 dead_tuples, vacrelstats);
 	}
 
 	/*
@@ -2180,7 +2216,8 @@ vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats,
 		if (shared_indstats == NULL ||
 			skip_parallel_vacuum_index(Irel[i], lps->lvshared))
 			vacuum_one_index(Irel[i], &(stats[i]), lps->lvshared,
-							 shared_indstats, vacrelstats->dead_tuples);
+							 shared_indstats, vacrelstats->dead_tuples,
+							 vacrelstats);
 	}
 
 	/*
@@ -2200,7 +2237,7 @@ vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats,
 static void
 vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
 				 LVShared *lvshared, LVSharedIndStats *shared_indstats,
-				 LVDeadTuples *dead_tuples)
+				 LVDeadTuples *dead_tuples, LVRelStats *vacrelstats)
 {
 	IndexBulkDeleteResult *bulkdelete_res = NULL;
 
@@ -2220,10 +2257,10 @@ vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
 	/* Do vacuum or cleanup of the index */
 	if (lvshared->for_cleanup)
 		lazy_cleanup_index(indrel, stats, lvshared->reltuples,
-						   lvshared->estimated_count);
+						   lvshared->estimated_count, vacrelstats);
 	else
 		lazy_vacuum_index(indrel, stats, dead_tuples,
-						  lvshared->reltuples);
+						  lvshared->reltuples, vacrelstats);
 
 	/*
 	 * Copy the index bulk-deletion result returned from ambulkdelete and
@@ -2298,7 +2335,8 @@ lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
 		for (idx = 0; idx < nindexes; idx++)
 			lazy_cleanup_index(Irel[idx], &stats[idx],
 							   vacrelstats->new_rel_tuples,
-							   vacrelstats->tupcount_pages < vacrelstats->rel_pages);
+							   vacrelstats->tupcount_pages < vacrelstats->rel_pages,
+							   vacrelstats);
 	}
 }
 
@@ -2313,7 +2351,7 @@ lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
  */
 static void
 lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
-				  LVDeadTuples *dead_tuples, double reltuples)
+				  LVDeadTuples *dead_tuples, double reltuples, LVRelStats *vacrelstats)
 {
 	IndexVacuumInfo ivinfo;
 	const char *msg;
@@ -2329,6 +2367,11 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
 	ivinfo.num_heap_tuples = reltuples;
 	ivinfo.strategy = vac_strategy;
 
+	/* Setup error traceback support for ereport() */
+	update_vacuum_error_cbarg(vacrelstats,
+			PROGRESS_VACUUM_PHASE_VACUUM_INDEX, InvalidBlockNumber,
+			indrel);
+
 	/* Do bulk deletion */
 	*stats = index_bulk_delete(&ivinfo, *stats,
 							   lazy_tid_reaped, (void *) dead_tuples);
@@ -2354,7 +2397,7 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
 static void
 lazy_cleanup_index(Relation indrel,
 				   IndexBulkDeleteResult **stats,
-				   double reltuples, bool estimated_count)
+				   double reltuples, bool estimated_count, LVRelStats *vacrelstats)
 {
 	IndexVacuumInfo ivinfo;
 	const char *msg;
@@ -2371,6 +2414,10 @@ lazy_cleanup_index(Relation indrel,
 	ivinfo.num_heap_tuples = reltuples;
 	ivinfo.strategy = vac_strategy;
 
+	/* Setup error traceback support for ereport() */
+	update_vacuum_error_cbarg(vacrelstats,
+			PROGRESS_VACUUM_PHASE_INDEX_CLEANUP, InvalidBlockNumber, indrel);
+
 	*stats = index_vacuum_cleanup(&ivinfo, *stats);
 
 	if (!(*stats))
@@ -3320,6 +3367,8 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
 	int			nindexes;
 	char	   *sharedquery;
 	IndexBulkDeleteResult **stats;
+	LVRelStats vacrelstats;
+	ErrorContextCallback errcallback;
 
 	lvshared = (LVShared *) shm_toc_lookup(toc, PARALLEL_VACUUM_KEY_SHARED,
 										   false);
@@ -3341,6 +3390,18 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
 	 */
 	onerel = table_open(lvshared->relid, ShareUpdateExclusiveLock);
 
+	/* Init vacrelstats for use as error callback by parallel worker: */
+	vacrelstats.relnamespace = get_namespace_name(RelationGetNamespace(onerel));
+	vacrelstats.relname = pstrdup(RelationGetRelationName(onerel));
+	vacrelstats.indname = NULL;
+	vacrelstats.phase = -1, /* Not yet processing */
+
+	/* Setup error traceback support for ereport() */
+	errcallback.callback = vacuum_error_callback;
+	errcallback.arg = &vacrelstats;
+	errcallback.previous = error_context_stack;
+	error_context_stack = &errcallback;
+
 	/*
 	 * Open all indexes. indrels are sorted in order by OID, which should be
 	 * matched to the leader's one.
@@ -3370,9 +3431,74 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
 		maintenance_work_mem = lvshared->maintenance_work_mem_worker;
 
 	/* Process indexes to perform vacuum/cleanup */
-	parallel_vacuum_index(indrels, stats, lvshared, dead_tuples, nindexes);
+	parallel_vacuum_index(indrels, stats, lvshared, dead_tuples, nindexes, &vacrelstats);
 
 	vac_close_indexes(nindexes, indrels, RowExclusiveLock);
 	table_close(onerel, ShareUpdateExclusiveLock);
 	pfree(stats);
 }
+
+/*
+ * Error context callback for errors occurring during vacuum.
+ */
+static void
+vacuum_error_callback(void *arg)
+{
+	LVRelStats *cbarg = arg;
+
+	switch (cbarg->phase) {
+		case PROGRESS_VACUUM_PHASE_SCAN_HEAP:
+			if (BlockNumberIsValid(cbarg->blkno))
+				errcontext("while scanning block %u of relation \"%s.%s\"",
+						cbarg->blkno, cbarg->relnamespace, cbarg->relname);
+			break;
+
+		case PROGRESS_VACUUM_PHASE_VACUUM_HEAP:
+			if (BlockNumberIsValid(cbarg->blkno))
+				errcontext("while vacuuming block %u of relation \"%s.%s\"",
+						cbarg->blkno, cbarg->relnamespace, cbarg->relname);
+			break;
+
+		case PROGRESS_VACUUM_PHASE_VACUUM_INDEX:
+			errcontext("while vacuuming index \"%s\" of relation \"%s.%s\"",
+					cbarg->indname, cbarg->relnamespace, cbarg->relname);
+			break;
+
+		case PROGRESS_VACUUM_PHASE_INDEX_CLEANUP:
+			errcontext("while cleaning up index \"%s\" of relation \"%s.%s\"",
+					cbarg->indname, cbarg->relnamespace, cbarg->relname);
+			break;
+
+		case PROGRESS_VACUUM_PHASE_VACUUM_FSM:
+			errcontext("while vacuuming free space map of relation \"%s.%s\"",
+					cbarg->relnamespace, cbarg->relname);
+			break;
+
+		case -1: /* Parallel worker not yet processing */
+		case PROGRESS_VACUUM_PHASE_TRUNCATE:
+		case PROGRESS_VACUUM_PHASE_FINAL_CLEANUP:
+		default:
+			return; /* Shouldn't happen: do nothing */
+	}
+}
+
+/* Update vacuum error callback for current phase, block and index */
+static void
+update_vacuum_error_cbarg(LVRelStats *errcbarg, int phase, BlockNumber blkno,
+		Relation indrel)
+{
+	errcbarg->blkno = blkno;
+	errcbarg->phase = phase;
+
+	/* Free index name from any previous phase */
+	if (errcbarg->indname) {
+		pfree(errcbarg->indname);
+		errcbarg->indname = NULL;
+	}
+
+	/* For index phases, save the name of the current index for the callback */
+	if (indrel) {
+		Assert(indrel->rd_rel->relkind == RELKIND_INDEX);
+		errcbarg->indname = pstrdup(RelationGetRelationName(indrel));
+	}
+}
diff --git a/src/include/commands/progress.h b/src/include/commands/progress.h
index 12e9d3d42f..5af2b8cc0c 100644
--- a/src/include/commands/progress.h
+++ b/src/include/commands/progress.h
@@ -33,6 +33,7 @@
 #define PROGRESS_VACUUM_PHASE_INDEX_CLEANUP		4
 #define PROGRESS_VACUUM_PHASE_TRUNCATE			5
 #define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP		6
+#define PROGRESS_VACUUM_PHASE_VACUUM_FSM		7 /* For error reporting only */
 
 /* Progress parameters for analyze */
 #define PROGRESS_ANALYZE_PHASE						0
-- 
2.17.0

>From 6a435ea20193ba92f457a9a3c30d154cd0a74f5f Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Sun, 16 Feb 2020 20:25:13 -0600
Subject: [PATCH v23 2/3] add callback for truncation

---
 src/backend/access/heap/vacuumlazy.c | 21 ++++++++++++++++++++-
 src/include/commands/progress.h      |  1 +
 2 files changed, 21 insertions(+), 1 deletion(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index f15326a24c..d10a1bcc96 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -2572,6 +2572,10 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 			return;
 		}
 
+		/* Setup error traceback support for ereport() */
+		update_vacuum_error_cbarg(vacrelstats,
+				PROGRESS_VACUUM_PHASE_TRUNCATE, new_rel_pages, NULL);
+
 		/*
 		 * Okay to truncate.
 		 */
@@ -2688,6 +2692,10 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
 			prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
 			for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
 			{
+				/* Setup error traceback support for ereport() */
+				update_vacuum_error_cbarg(vacrelstats,
+						PROGRESS_VACUUM_PHASE_TRUNCATE_PREFETCH, pblkno, NULL);
+
 				PrefetchBuffer(onerel, MAIN_FORKNUM, pblkno);
 				CHECK_FOR_INTERRUPTS();
 			}
@@ -3474,8 +3482,19 @@ vacuum_error_callback(void *arg)
 					cbarg->relnamespace, cbarg->relname);
 			break;
 
-		case -1: /* Parallel worker not yet processing */
 		case PROGRESS_VACUUM_PHASE_TRUNCATE:
+			if (BlockNumberIsValid(cbarg->blkno))
+				errcontext("while truncating relation \"%s.%s\" to %u blocks",
+						cbarg->relnamespace, cbarg->relname, cbarg->blkno);
+			break;
+
+		case PROGRESS_VACUUM_PHASE_TRUNCATE_PREFETCH:
+			if (BlockNumberIsValid(cbarg->blkno))
+				errcontext("while prefetching block %u to truncate relation \"%s.%s\"",
+						cbarg->blkno, cbarg->relnamespace, cbarg->relname);
+			break;
+
+		case -1: /* Parallel worker not yet processing */
 		case PROGRESS_VACUUM_PHASE_FINAL_CLEANUP:
 		default:
 			return; /* Shouldn't happen: do nothing */
diff --git a/src/include/commands/progress.h b/src/include/commands/progress.h
index 5af2b8cc0c..c7eeaf8c48 100644
--- a/src/include/commands/progress.h
+++ b/src/include/commands/progress.h
@@ -34,6 +34,7 @@
 #define PROGRESS_VACUUM_PHASE_TRUNCATE			5
 #define PROGRESS_VACUUM_PHASE_FINAL_CLEANUP		6
 #define PROGRESS_VACUUM_PHASE_VACUUM_FSM		7 /* For error reporting only */
+#define PROGRESS_VACUUM_PHASE_TRUNCATE_PREFETCH		8 /* For error reporting only */
 
 /* Progress parameters for analyze */
 #define PROGRESS_ANALYZE_PHASE						0
-- 
2.17.0

>From 12fe4eebec05d72fd2bec67408be657d90bbdd75 Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Wed, 26 Feb 2020 19:22:55 -0600
Subject: [PATCH v23 3/3] Avoid some calls to RelationGetRelationName

---
 src/backend/access/heap/vacuumlazy.c | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index d10a1bcc96..170b85f7b4 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -611,8 +611,8 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 			}
 			appendStringInfo(&buf, msgfmt,
 							 get_database_name(MyDatabaseId),
-							 get_namespace_name(RelationGetNamespace(onerel)),
-							 RelationGetRelationName(onerel),
+							 vacrelstats->relnamespace,
+							 vacrelstats->relname,
 							 vacrelstats->num_index_scans);
 			appendStringInfo(&buf, _("pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n"),
 							 vacrelstats->pages_removed,
@@ -784,7 +784,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 			if (params->nworkers > 0)
 				ereport(WARNING,
 						(errmsg("disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
-								RelationGetRelationName(onerel))));
+								vacrelstats->relname)));
 		}
 		else
 			lps = begin_parallel_vacuum(RelationGetRelid(onerel), Irel,
@@ -1695,7 +1695,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	if (vacuumed_pages)
 		ereport(elevel,
 				(errmsg("\"%s\": removed %.0f row versions in %u pages",
-						RelationGetRelationName(onerel),
+						vacrelstats->relname,
 						tups_vacuumed, vacuumed_pages)));
 
 	/*
@@ -1724,7 +1724,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 
 	ereport(elevel,
 			(errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
-					RelationGetRelationName(onerel),
+					vacrelstats->relname,
 					tups_vacuumed, num_tuples,
 					vacrelstats->scanned_pages, nblocks),
 			 errdetail_internal("%s", buf.data)));
@@ -1855,7 +1855,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
 
 	ereport(elevel,
 			(errmsg("\"%s\": removed %d row versions in %d pages",
-					RelationGetRelationName(onerel),
+					vacrelstats->relname,
 					tupindex, npages),
 			 errdetail_internal("%s", pg_rusage_show(&ru0))));
 }
@@ -2383,7 +2383,7 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
 
 	ereport(elevel,
 			(errmsg(msg,
-					RelationGetRelationName(indrel),
+					vacrelstats->relname,
 					dead_tuples->num_tuples),
 			 errdetail_internal("%s", pg_rusage_show(&ru0))));
 }
@@ -2531,7 +2531,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 				vacrelstats->lock_waiter_detected = true;
 				ereport(elevel,
 						(errmsg("\"%s\": stopping truncate due to conflicting lock request",
-								RelationGetRelationName(onerel))));
+								vacrelstats->relname)));
 				return;
 			}
 
@@ -2600,7 +2600,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 
 		ereport(elevel,
 				(errmsg("\"%s\": truncated %u to %u pages",
-						RelationGetRelationName(onerel),
+						vacrelstats->relname,
 						old_rel_pages, new_rel_pages),
 				 errdetail_internal("%s",
 									pg_rusage_show(&ru0))));
@@ -2665,7 +2665,7 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
 				{
 					ereport(elevel,
 							(errmsg("\"%s\": suspending truncate due to conflicting lock request",
-									RelationGetRelationName(onerel))));
+									vacrelstats->relname)));
 
 					vacrelstats->lock_waiter_detected = true;
 					return blkno;
-- 
2.17.0

Reply via email to