On Thu, Mar 18, 2021 at 11:30:46PM +0900, Masahiko Sawada wrote:
> Sorry, I attached the wrong version patch. So attached the right one.

Thanks.  I have been hacking aon that, and I think that we could do
more in terms of integration of the index stats into LVRelStats to
help with debugging issues, mainly, but also to open the door at
allowing autovacuum to use the parallel path in the future.  Hence,
for consistency, I think that we should change the following things in
LVRelStats:
- Add the number of indexes.  It looks rather unusual to not track
down the number of indexes directly in the structure anyway, as the
stats array gets added there.
- Add all the index names, for parallel and non-parallel mode.
- Replace the index name in the error callback by an index number,
pointing back to its location in indstats and indnames.

As lazy_vacuum_index() requires the index number to be set internally
to it, this means that we need to pass it down across
vacuum_indexes_leader(), lazy_parallel_vacuum_indexes(), but that
seems like an acceptable compromise to me for now.  I think that it
would be good to tighten a bit more the relationship between the index
stats in the DSM for the parallel case and the ones in local memory,
but what we have here looks enough to me so we could figure out that
as a future step.

Sawada-san, what do you think?  Attached is the patch I have finished
with.
--
Michael
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 8341879d89..e7ba83f500 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -315,8 +315,14 @@ typedef struct LVRelStats
 	TransactionId latestRemovedXid;
 	bool		lock_waiter_detected;
 
+	/* Statistics about indexes */
+	int			nindexes;
+	char	  **indnames;
+	IndexBulkDeleteResult **indstats;
+
 	/* Used for error callback */
-	char	   *indname;
+	int			indnum;			/* index number to indnames and indstats,
+								 * or -1 if no index being worked on. */
 	BlockNumber blkno;			/* used only for heap operations */
 	OffsetNumber offnum;		/* used only for heap operations */
 	VacErrPhase phase;
@@ -348,14 +354,18 @@ static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
 static bool lazy_check_needs_freeze(Buffer buf, bool *hastup,
 									LVRelStats *vacrelstats);
 static void lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
-									IndexBulkDeleteResult **stats,
 									LVRelStats *vacrelstats, LVParallelState *lps,
 									int nindexes);
-static void lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
-							  LVDeadTuples *dead_tuples, double reltuples, LVRelStats *vacrelstats);
+static void lazy_vacuum_index(Relation indrel,
+							  IndexBulkDeleteResult **stats,
+							  LVDeadTuples *dead_tuples,
+							  double reltuples,
+							  LVRelStats *vacrelstats,
+							  int indnum);
 static void lazy_cleanup_index(Relation indrel,
 							   IndexBulkDeleteResult **stats,
-							   double reltuples, bool estimated_count, LVRelStats *vacrelstats);
+							   double reltuples, bool estimated_count,
+							   LVRelStats *vacrelstats, int indnum);
 static int	lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
 							 int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
 static bool should_attempt_truncation(VacuumParams *params,
@@ -371,21 +381,19 @@ static int	vac_cmp_itemptr(const void *left, const void *right);
 static bool heap_page_is_all_visible(Relation rel, Buffer buf,
 									 LVRelStats *vacrelstats,
 									 TransactionId *visibility_cutoff_xid, bool *all_frozen);
-static void lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
-										 LVRelStats *vacrelstats, LVParallelState *lps,
-										 int nindexes);
-static void parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats,
-								  LVShared *lvshared, LVDeadTuples *dead_tuples,
-								  int nindexes, LVRelStats *vacrelstats);
-static void vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats,
-								  LVRelStats *vacrelstats, LVParallelState *lps,
-								  int nindexes);
-static void vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
+static void lazy_parallel_vacuum_indexes(Relation *Irel, LVRelStats *vacrelstats,
+										 LVParallelState *lps, int nindexes);
+static void parallel_vacuum_index(Relation *Irel, LVShared *lvshared,
+								  LVDeadTuples *dead_tuples, int nindexes,
+								  LVRelStats *vacrelstats);
+static void vacuum_indexes_leader(Relation *Irel, LVRelStats *vacrelstats,
+								  LVParallelState *lps, int nindexes);
+static void vacuum_one_index(Relation indrel,
 							 LVShared *lvshared, LVSharedIndStats *shared_indstats,
-							 LVDeadTuples *dead_tuples, LVRelStats *vacrelstats);
-static void lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
-									 LVRelStats *vacrelstats, LVParallelState *lps,
-									 int nindexes);
+							 LVDeadTuples *dead_tuples, LVRelStats *vacrelstats,
+							 int indnum);
+static void lazy_cleanup_all_indexes(Relation *Irel, LVRelStats *vacrelstats,
+									 LVParallelState *lps, int nindexes);
 static long compute_max_dead_tuples(BlockNumber relblocks, bool hasindex);
 static int	compute_parallel_vacuum_workers(Relation *Irel, int nindexes, int nrequested,
 											bool *can_parallel_vacuum);
@@ -433,6 +441,7 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 				write_rate;
 	bool		aggressive;		/* should we scan all unfrozen pages? */
 	bool		scanned_all_unfrozen;	/* actually scanned all such pages? */
+	int			i;
 	TransactionId xidFullScanLimit;
 	MultiXactId mxactFullScanLimit;
 	BlockNumber new_rel_pages;
@@ -499,7 +508,7 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 
 	vacrelstats->relnamespace = get_namespace_name(RelationGetNamespace(onerel));
 	vacrelstats->relname = pstrdup(RelationGetRelationName(onerel));
-	vacrelstats->indname = NULL;
+	vacrelstats->indnum = -1;
 	vacrelstats->phase = VACUUM_ERRCB_PHASE_UNKNOWN;
 	vacrelstats->old_rel_pages = onerel->rd_rel->relpages;
 	vacrelstats->old_live_tuples = onerel->rd_rel->reltuples;
@@ -512,6 +521,19 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 	vacrelstats->useindex = (nindexes > 0 &&
 							 params->index_cleanup == VACOPT_TERNARY_ENABLED);
 
+	vacrelstats->nindexes = nindexes;
+	vacrelstats->indnames = NULL;
+	vacrelstats->indstats = (IndexBulkDeleteResult **)
+		palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
+
+	/* save index names for the stats contexts */
+	if (vacrelstats->nindexes > 0)
+	{
+		vacrelstats->indnames = palloc(sizeof(char *) * nindexes);
+		for (i = 0; i < vacrelstats->nindexes; i++)
+			vacrelstats->indnames[i] = pstrdup(RelationGetRelationName(Irel[i]));
+	}
+
 	/*
 	 * Setup error traceback support for ereport().  The idea is to set up an
 	 * error context callback to display additional information on any error
@@ -680,6 +702,21 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 							 (long long) VacuumPageHit,
 							 (long long) VacuumPageMiss,
 							 (long long) VacuumPageDirty);
+			for (i = 0; i < nindexes; i++)
+			{
+				IndexBulkDeleteResult *stats = vacrelstats->indstats[i];
+
+				if (!stats)
+					continue;
+
+				appendStringInfo(&buf,
+								 _("index \"%s\": pages: %u remain, %u newly deleted, %u currently deleted, %u reusable\n"),
+								 vacrelstats->indnames[i],
+								 stats->num_pages,
+								 stats->pages_newly_deleted,
+								 stats->pages_deleted,
+								 stats->pages_free);
+			}
 			appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
 							 read_rate, write_rate);
 			if (track_io_timing)
@@ -705,6 +742,16 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 			pfree(buf.data);
 		}
 	}
+
+	/* Cleanup index statistics and index names */
+	for (i = 0; i < vacrelstats->nindexes; i++)
+	{
+		if (vacrelstats->indstats[i])
+			pfree(vacrelstats->indstats[i]);
+
+		if (vacrelstats->indnames && vacrelstats->indnames[i])
+			pfree(vacrelstats->indnames[i]);
+	}
 }
 
 /*
@@ -787,7 +834,6 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 				tups_vacuumed,	/* tuples cleaned up by current vacuum */
 				nkeep,			/* dead-but-not-removable tuples */
 				nunused;		/* # existing unused line pointers */
-	IndexBulkDeleteResult **indstats;
 	int			i;
 	PGRUsage	ru0;
 	Buffer		vmbuffer = InvalidBuffer;
@@ -820,9 +866,6 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	next_fsm_block_to_vacuum = (BlockNumber) 0;
 	num_tuples = live_tuples = tups_vacuumed = nkeep = nunused = 0;
 
-	indstats = (IndexBulkDeleteResult **)
-		palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
-
 	nblocks = RelationGetNumberOfBlocks(onerel);
 	vacrelstats->rel_pages = nblocks;
 	vacrelstats->scanned_pages = 0;
@@ -1070,8 +1113,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 			}
 
 			/* Work on all the indexes, then the heap */
-			lazy_vacuum_all_indexes(onerel, Irel, indstats,
-									vacrelstats, lps, nindexes);
+			lazy_vacuum_all_indexes(onerel, Irel, vacrelstats, lps,
+									nindexes);
 
 			/* Remove tuples from heap */
 			lazy_vacuum_heap(onerel, vacrelstats);
@@ -1728,8 +1771,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	if (dead_tuples->num_tuples > 0)
 	{
 		/* Work on all the indexes, and then the heap */
-		lazy_vacuum_all_indexes(onerel, Irel, indstats, vacrelstats,
-								lps, nindexes);
+		lazy_vacuum_all_indexes(onerel, Irel, vacrelstats, lps,
+								nindexes);
 
 		/* Remove tuples from heap */
 		lazy_vacuum_heap(onerel, vacrelstats);
@@ -1747,18 +1790,18 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 
 	/* Do post-vacuum cleanup */
 	if (vacrelstats->useindex)
-		lazy_cleanup_all_indexes(Irel, indstats, vacrelstats, lps, nindexes);
+		lazy_cleanup_all_indexes(Irel, vacrelstats, lps, nindexes);
 
 	/*
 	 * End parallel mode before updating index statistics as we cannot write
 	 * during parallel mode.
 	 */
 	if (ParallelVacuumIsActive(lps))
-		end_parallel_vacuum(indstats, lps, nindexes);
+		end_parallel_vacuum(vacrelstats->indstats, lps, nindexes);
 
 	/* Update index statistics */
 	if (vacrelstats->useindex)
-		update_index_statistics(Irel, indstats, nindexes);
+		update_index_statistics(Irel, vacrelstats->indstats, nindexes);
 
 	/* If no indexes, make log report that lazy_vacuum_heap would've made */
 	if (vacuumed_pages)
@@ -1803,7 +1846,6 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
  */
 static void
 lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
-						IndexBulkDeleteResult **stats,
 						LVRelStats *vacrelstats, LVParallelState *lps,
 						int nindexes)
 {
@@ -1831,15 +1873,17 @@ lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
 		lps->lvshared->reltuples = vacrelstats->old_live_tuples;
 		lps->lvshared->estimated_count = true;
 
-		lazy_parallel_vacuum_indexes(Irel, stats, vacrelstats, lps, nindexes);
+		lazy_parallel_vacuum_indexes(Irel, vacrelstats, lps, nindexes);
 	}
 	else
 	{
 		int			idx;
 
 		for (idx = 0; idx < nindexes; idx++)
-			lazy_vacuum_index(Irel[idx], &stats[idx], vacrelstats->dead_tuples,
-							  vacrelstats->old_live_tuples, vacrelstats);
+			lazy_vacuum_index(Irel[idx], &(vacrelstats->indstats[idx]),
+							  vacrelstats->dead_tuples,
+							  vacrelstats->old_live_tuples, vacrelstats,
+							  idx);
 	}
 
 	/* Increase and report the number of index scans */
@@ -2109,9 +2153,8 @@ lazy_check_needs_freeze(Buffer buf, bool *hastup, LVRelStats *vacrelstats)
  * cleanup.
  */
 static void
-lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
-							 LVRelStats *vacrelstats, LVParallelState *lps,
-							 int nindexes)
+lazy_parallel_vacuum_indexes(Relation *Irel, LVRelStats *vacrelstats,
+							 LVParallelState *lps, int nindexes)
 {
 	int			nworkers;
 
@@ -2199,14 +2242,14 @@ lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
 	}
 
 	/* Process the indexes that can be processed by only leader process */
-	vacuum_indexes_leader(Irel, stats, vacrelstats, lps, nindexes);
+	vacuum_indexes_leader(Irel, vacrelstats, lps, nindexes);
 
 	/*
 	 * Join as a parallel worker.  The leader process alone processes all the
 	 * indexes in the case where no workers are launched.
 	 */
-	parallel_vacuum_index(Irel, stats, lps->lvshared,
-						  vacrelstats->dead_tuples, nindexes, vacrelstats);
+	parallel_vacuum_index(Irel, lps->lvshared, vacrelstats->dead_tuples,
+						  nindexes, vacrelstats);
 
 	/*
 	 * Next, accumulate buffer and WAL usage.  (This must wait for the workers
@@ -2239,9 +2282,9 @@ lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
  * vacuum worker processes to process the indexes in parallel.
  */
 static void
-parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats,
-					  LVShared *lvshared, LVDeadTuples *dead_tuples,
-					  int nindexes, LVRelStats *vacrelstats)
+parallel_vacuum_index(Relation *Irel, LVShared *lvshared,
+					  LVDeadTuples *dead_tuples, int nindexes,
+					  LVRelStats *vacrelstats)
 {
 	/*
 	 * Increment the active worker count if we are able to launch any worker.
@@ -2274,8 +2317,9 @@ parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats,
 			continue;
 
 		/* Do vacuum or cleanup of the index */
-		vacuum_one_index(Irel[idx], &(stats[idx]), lvshared, shared_indstats,
-						 dead_tuples, vacrelstats);
+		vacuum_one_index(Irel[idx],
+						 lvshared, shared_indstats,
+						 dead_tuples, vacrelstats, idx);
 	}
 
 	/*
@@ -2291,9 +2335,8 @@ parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats,
  * because these indexes don't support parallel operation at that phase.
  */
 static void
-vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats,
-					  LVRelStats *vacrelstats, LVParallelState *lps,
-					  int nindexes)
+vacuum_indexes_leader(Relation *Irel, LVRelStats *vacrelstats,
+					  LVParallelState *lps, int nindexes)
 {
 	int			i;
 
@@ -2314,9 +2357,9 @@ vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats,
 		/* Process the indexes skipped by parallel workers */
 		if (shared_indstats == NULL ||
 			skip_parallel_vacuum_index(Irel[i], lps->lvshared))
-			vacuum_one_index(Irel[i], &(stats[i]), lps->lvshared,
-							 shared_indstats, vacrelstats->dead_tuples,
-							 vacrelstats);
+			vacuum_one_index(Irel[i],
+							 lps->lvshared, shared_indstats,
+							 vacrelstats->dead_tuples, vacrelstats, i);
 	}
 
 	/*
@@ -2334,12 +2377,20 @@ vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats,
  * segment.
  */
 static void
-vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
+vacuum_one_index(Relation indrel,
 				 LVShared *lvshared, LVSharedIndStats *shared_indstats,
-				 LVDeadTuples *dead_tuples, LVRelStats *vacrelstats)
+				 LVDeadTuples *dead_tuples, LVRelStats *vacrelstats,
+				 int indnum)
 {
 	IndexBulkDeleteResult *bulkdelete_res = NULL;
+	IndexBulkDeleteResult *stats;
 
+	/*
+	 * Get the statistics this is going to work on.  For a parallel
+	 * run, fetch this information from the shared index stats.  If
+	 * there is nothing of the kind, just grab the equivalent from
+	 * the relation stats.
+	 */
 	if (shared_indstats)
 	{
 		/* Get the space for IndexBulkDeleteResult */
@@ -2349,17 +2400,20 @@ vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
 		 * Update the pointer to the corresponding bulk-deletion result if
 		 * someone has already updated it.
 		 */
-		if (shared_indstats->updated && *stats == NULL)
-			*stats = bulkdelete_res;
+		if (shared_indstats->updated)
+			stats = bulkdelete_res;
 	}
+	else
+		stats = vacrelstats->indstats[indnum];
 
 	/* Do vacuum or cleanup of the index */
 	if (lvshared->for_cleanup)
-		lazy_cleanup_index(indrel, stats, lvshared->reltuples,
-						   lvshared->estimated_count, vacrelstats);
+		lazy_cleanup_index(indrel, &stats, lvshared->reltuples,
+						   lvshared->estimated_count, vacrelstats,
+						   indnum);
 	else
-		lazy_vacuum_index(indrel, stats, dead_tuples,
-						  lvshared->reltuples, vacrelstats);
+		lazy_vacuum_index(indrel, &stats, dead_tuples,
+						  lvshared->reltuples, vacrelstats, indnum);
 
 	/*
 	 * Copy the index bulk-deletion result returned from ambulkdelete and
@@ -2373,17 +2427,11 @@ vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
 	 * Since all vacuum workers write the bulk-deletion result at different
 	 * slots we can write them without locking.
 	 */
-	if (shared_indstats && !shared_indstats->updated && *stats != NULL)
+	if (shared_indstats && !shared_indstats->updated && stats != NULL)
 	{
-		memcpy(bulkdelete_res, *stats, sizeof(IndexBulkDeleteResult));
+		memcpy(bulkdelete_res, stats, sizeof(IndexBulkDeleteResult));
 		shared_indstats->updated = true;
-
-		/*
-		 * Now that stats[idx] points to the DSM segment, we don't need the
-		 * locally allocated results.
-		 */
-		pfree(*stats);
-		*stats = bulkdelete_res;
+		stats = bulkdelete_res;
 	}
 }
 
@@ -2394,9 +2442,8 @@ vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
  * parallel vacuum.
  */
 static void
-lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
-						 LVRelStats *vacrelstats, LVParallelState *lps,
-						 int nindexes)
+lazy_cleanup_all_indexes(Relation *Irel, LVRelStats *vacrelstats,
+						 LVParallelState *lps, int nindexes)
 {
 	int			idx;
 
@@ -2427,15 +2474,15 @@ lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
 		lps->lvshared->estimated_count =
 			(vacrelstats->tupcount_pages < vacrelstats->rel_pages);
 
-		lazy_parallel_vacuum_indexes(Irel, stats, vacrelstats, lps, nindexes);
+		lazy_parallel_vacuum_indexes(Irel, vacrelstats, lps, nindexes);
 	}
 	else
 	{
 		for (idx = 0; idx < nindexes; idx++)
-			lazy_cleanup_index(Irel[idx], &stats[idx],
+			lazy_cleanup_index(Irel[idx], &(vacrelstats->indstats[idx]),
 							   vacrelstats->new_rel_tuples,
 							   vacrelstats->tupcount_pages < vacrelstats->rel_pages,
-							   vacrelstats);
+							   vacrelstats, idx);
 	}
 }
 
@@ -2450,7 +2497,8 @@ lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
  */
 static void
 lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
-				  LVDeadTuples *dead_tuples, double reltuples, LVRelStats *vacrelstats)
+				  LVDeadTuples *dead_tuples, double reltuples,
+				  LVRelStats *vacrelstats, int indnum)
 {
 	IndexVacuumInfo ivinfo;
 	PGRUsage	ru0;
@@ -2472,8 +2520,8 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
 	 * The index name is saved during this phase and restored immediately
 	 * after this phase.  See vacuum_error_callback.
 	 */
-	Assert(vacrelstats->indname == NULL);
-	vacrelstats->indname = pstrdup(RelationGetRelationName(indrel));
+	Assert(vacrelstats->indnum < 0);
+	vacrelstats->indnum = indnum;
 	update_vacuum_error_info(vacrelstats, &saved_err_info,
 							 VACUUM_ERRCB_PHASE_VACUUM_INDEX,
 							 InvalidBlockNumber, InvalidOffsetNumber);
@@ -2484,14 +2532,13 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
 
 	ereport(elevel,
 			(errmsg("scanned index \"%s\" to remove %d row versions",
-					vacrelstats->indname,
+					vacrelstats->indnames[indnum],
 					dead_tuples->num_tuples),
 			 errdetail_internal("%s", pg_rusage_show(&ru0))));
 
 	/* Revert to the previous phase information for error traceback */
 	restore_vacuum_error_info(vacrelstats, &saved_err_info);
-	pfree(vacrelstats->indname);
-	vacrelstats->indname = NULL;
+	vacrelstats->indnum = -1;
 }
 
 /*
@@ -2503,7 +2550,8 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
 static void
 lazy_cleanup_index(Relation indrel,
 				   IndexBulkDeleteResult **stats,
-				   double reltuples, bool estimated_count, LVRelStats *vacrelstats)
+				   double reltuples, bool estimated_count,
+				   LVRelStats *vacrelstats, int indnum)
 {
 	IndexVacuumInfo ivinfo;
 	PGRUsage	ru0;
@@ -2526,8 +2574,8 @@ lazy_cleanup_index(Relation indrel,
 	 * The index name is saved during this phase and restored immediately
 	 * after this phase.  See vacuum_error_callback.
 	 */
-	Assert(vacrelstats->indname == NULL);
-	vacrelstats->indname = pstrdup(RelationGetRelationName(indrel));
+	Assert(vacrelstats->indnum < 0);
+	vacrelstats->indnum = indnum;
 	update_vacuum_error_info(vacrelstats, &saved_err_info,
 							 VACUUM_ERRCB_PHASE_INDEX_CLEANUP,
 							 InvalidBlockNumber, InvalidOffsetNumber);
@@ -2553,8 +2601,7 @@ lazy_cleanup_index(Relation indrel,
 
 	/* Revert to the previous phase information for error traceback */
 	restore_vacuum_error_info(vacrelstats, &saved_err_info);
-	pfree(vacrelstats->indname);
-	vacrelstats->indname = NULL;
+	vacrelstats->indnum = -1;
 }
 
 /*
@@ -3243,7 +3290,6 @@ update_index_statistics(Relation *Irel, IndexBulkDeleteResult **stats,
 							InvalidTransactionId,
 							InvalidMultiXactId,
 							false);
-		pfree(stats[i]);
 	}
 }
 
@@ -3550,7 +3596,6 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
 	WalUsage   *wal_usage;
 	int			nindexes;
 	char	   *sharedquery;
-	IndexBulkDeleteResult **stats;
 	LVRelStats	vacrelstats;
 	ErrorContextCallback errcallback;
 
@@ -3597,9 +3642,6 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
 	VacuumSharedCostBalance = &(lvshared->cost_balance);
 	VacuumActiveNWorkers = &(lvshared->active_nworkers);
 
-	stats = (IndexBulkDeleteResult **)
-		palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
-
 	if (lvshared->maintenance_work_mem_worker > 0)
 		maintenance_work_mem = lvshared->maintenance_work_mem_worker;
 
@@ -3609,9 +3651,22 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
 	 */
 	vacrelstats.relnamespace = get_namespace_name(RelationGetNamespace(onerel));
 	vacrelstats.relname = pstrdup(RelationGetRelationName(onerel));
-	vacrelstats.indname = NULL;
+	vacrelstats.indnum = -1;
 	vacrelstats.phase = VACUUM_ERRCB_PHASE_UNKNOWN; /* Not yet processing */
 
+	vacrelstats.nindexes = nindexes;
+	vacrelstats.indnames = NULL;
+	vacrelstats.indstats = (IndexBulkDeleteResult **)
+		palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
+
+	/* save index names for the stats contexts */
+	if (vacrelstats.nindexes > 0)
+	{
+		vacrelstats.indnames = palloc(sizeof(char *) * nindexes);
+		for (int i = 0; i < vacrelstats.nindexes; i++)
+			vacrelstats.indnames[i] = pstrdup(RelationGetRelationName(indrels[i]));
+	}
+
 	/* Setup error traceback support for ereport() */
 	errcallback.callback = vacuum_error_callback;
 	errcallback.arg = &vacrelstats;
@@ -3622,7 +3677,7 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
 	InstrStartParallelQuery();
 
 	/* Process indexes to perform vacuum/cleanup */
-	parallel_vacuum_index(indrels, stats, lvshared, dead_tuples, nindexes,
+	parallel_vacuum_index(indrels, lvshared, dead_tuples, nindexes,
 						  &vacrelstats);
 
 	/* Report buffer/WAL usage during parallel execution */
@@ -3636,7 +3691,16 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
 
 	vac_close_indexes(nindexes, indrels, RowExclusiveLock);
 	table_close(onerel, ShareUpdateExclusiveLock);
-	pfree(stats);
+
+	/* Cleanup index statistics and index names */
+	for (int i = 0; i < vacrelstats.nindexes; i++)
+	{
+		if (vacrelstats.indstats[i])
+			pfree(vacrelstats.indstats[i]);
+
+		if (vacrelstats.indnames && vacrelstats.indnames[i])
+			pfree(vacrelstats.indnames[i]);
+	}
 }
 
 /*
@@ -3681,12 +3745,14 @@ vacuum_error_callback(void *arg)
 
 		case VACUUM_ERRCB_PHASE_VACUUM_INDEX:
 			errcontext("while vacuuming index \"%s\" of relation \"%s.%s\"",
-					   errinfo->indname, errinfo->relnamespace, errinfo->relname);
+					   errinfo->indnames[errinfo->indnum],
+					   errinfo->relnamespace, errinfo->relname);
 			break;
 
 		case VACUUM_ERRCB_PHASE_INDEX_CLEANUP:
 			errcontext("while cleaning up index \"%s\" of relation \"%s.%s\"",
-					   errinfo->indname, errinfo->relnamespace, errinfo->relname);
+					   errinfo->indnames[errinfo->indnum],
+					   errinfo->relnamespace, errinfo->relname);
 			break;
 
 		case VACUUM_ERRCB_PHASE_TRUNCATE:

Attachment: signature.asc
Description: PGP signature

Reply via email to