On Sun, Jan 26, 2020 at 12:29:38PM -0800, Andres Freund wrote:
> > postgres=# SET client_min_messages=debug;SET statement_timeout=99; VACUUM 
> > (VERBOSE, PARALLEL 0) t;
> > INFO:  vacuuming "public.t"
> > DEBUG:  "t_a_idx": vacuuming index
> > 2020-01-20 15:47:36.338 CST [20139] ERROR:  canceling statement due to 
> > statement timeout
> > 2020-01-20 15:47:36.338 CST [20139] CONTEXT:  while vacuuming relation 
> > "public.t_a_idx"
> > 2020-01-20 15:47:36.338 CST [20139] STATEMENT:  VACUUM (VERBOSE, PARALLEL 
> > 0) t;
> > ERROR:  canceling statement due to statement timeout
> > CONTEXT:  while vacuuming relation "public.t_a_idx"
> 
> It'd be a bit nicer if it said index "public.t_a_idx" for relation "public.t".

I think that tips the scale in favour of making vacrelstats a global.
I added that as a 1st patch, and squished the callback patches into one.

Also, it seems to me we shouldn't repeat the namespace of the index *and* its
table.  I tried looking for consistency here:

grep -r '\\"%s.%s\\"' --incl='*.c' |grep '\\"%s\\"'
src/backend/commands/cluster.c:                         (errmsg("clustering 
\"%s.%s\" using index scan on \"%s\"",
src/backend/access/heap/vacuumlazy.c:           errcontext(_("while vacuuming 
index \"%s\" on table \"%s.%s\""),

grep -r 'index \\".* table \\"' --incl='*.c'
src/backend/catalog/index.c:                            (errmsg("building index 
\"%s\" on table \"%s\" serially",
src/backend/catalog/index.c:                            
(errmsg_plural("building index \"%s\" on table \"%s\" with request for %d 
parallel worker",
src/backend/catalog/index.c:                                                    
   "building index \"%s\" on table \"%s\" with request for %d parallel workers",
src/backend/catalog/catalog.c:                           errmsg("index \"%s\" 
does not belong to table \"%s\"",
src/backend/commands/indexcmds.c:                               (errmsg("%s %s 
will create implicit index \"%s\" for table \"%s\"",
src/backend/commands/tablecmds.c:                                errmsg("index 
\"%s\" for table \"%s\" does not exist",
src/backend/commands/tablecmds.c:                                errmsg("index 
\"%s\" for table \"%s\" does not exist",
src/backend/commands/tablecmds.c:                                               
 errdetail("The index \"%s\" belongs to a constraint in table \"%s\" but no 
constraint exists for index \"%s\".",
src/backend/commands/cluster.c:                                          
errmsg("index \"%s\" for table \"%s\" does not exist",
src/backend/parser/parse_utilcmd.c:                                      
errmsg("index \"%s\" does not belong to table \"%s\"",
>From 8ee9ffc1325118438309ee25e9b33c61cccd022f Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Sun, 26 Jan 2020 22:38:10 -0600
Subject: [PATCH v14 1/3] make vacrelstats a global

---
 src/backend/access/heap/vacuumlazy.c | 276 +++++++++++++++++------------------
 1 file changed, 136 insertions(+), 140 deletions(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 8ce5011..114428b 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -302,16 +302,17 @@ static MultiXactId MultiXactCutoff;
 
 static BufferAccessStrategy vac_strategy;
 
+LVRelStats vacrelstats = {0};
 
 /* non-export function prototypes */
 static void lazy_scan_heap(Relation onerel, VacuumParams *params,
-						   LVRelStats *vacrelstats, Relation *Irel, int nindexes,
+						   Relation *Irel, int nindexes,
 						   bool aggressive);
-static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
+static void lazy_vacuum_heap(Relation onerel);
 static bool lazy_check_needs_freeze(Buffer buf, bool *hastup);
 static void lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
 									IndexBulkDeleteResult **stats,
-									LVRelStats *vacrelstats, LVParallelState *lps,
+									LVParallelState *lps,
 									int nindexes);
 static void lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
 							  LVDeadTuples *dead_tuples, double reltuples);
@@ -319,13 +320,11 @@ static void lazy_cleanup_index(Relation indrel,
 							   IndexBulkDeleteResult **stats,
 							   double reltuples, bool estimated_count);
 static int	lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
-							 int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
-static bool should_attempt_truncation(VacuumParams *params,
-									  LVRelStats *vacrelstats);
-static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
-static BlockNumber count_nondeletable_pages(Relation onerel,
-											LVRelStats *vacrelstats);
-static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks);
+							 int tupindex, Buffer *vmbuffer);
+static bool should_attempt_truncation(VacuumParams *params);
+static void lazy_truncate_heap(Relation onerel);
+static BlockNumber count_nondeletable_pages(Relation onerel);
+static void lazy_space_alloc(BlockNumber relblocks);
 static void lazy_record_dead_tuple(LVDeadTuples *dead_tuples,
 								   ItemPointer itemptr);
 static bool lazy_tid_reaped(ItemPointer itemptr, void *state);
@@ -333,19 +332,19 @@ static int	vac_cmp_itemptr(const void *left, const void *right);
 static bool heap_page_is_all_visible(Relation rel, Buffer buf,
 									 TransactionId *visibility_cutoff_xid, bool *all_frozen);
 static void lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
-										 LVRelStats *vacrelstats, LVParallelState *lps,
+										 LVParallelState *lps,
 										 int nindexes);
 static void parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats,
 								  LVShared *lvshared, LVDeadTuples *dead_tuples,
 								  int nindexes);
 static void vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats,
-								  LVRelStats *vacrelstats, LVParallelState *lps,
+								  LVParallelState *lps,
 								  int nindexes);
 static void vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
 							 LVShared *lvshared, LVSharedIndStats *shared_indstats,
 							 LVDeadTuples *dead_tuples);
 static void lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
-									 LVRelStats *vacrelstats, LVParallelState *lps,
+									 LVParallelState *lps,
 									 int nindexes);
 static long compute_max_dead_tuples(BlockNumber relblocks, bool hasindex);
 static int	compute_parallel_vacuum_workers(Relation *Irel, int nindexes, int nrequested,
@@ -355,7 +354,7 @@ static void prepare_index_statistics(LVShared *lvshared, bool *can_parallel_vacu
 static void update_index_statistics(Relation *Irel, IndexBulkDeleteResult **stats,
 									int nindexes);
 static LVParallelState *begin_parallel_vacuum(Oid relid, Relation *Irel,
-											  LVRelStats *vacrelstats, BlockNumber nblocks,
+											  BlockNumber nblocks,
 											  int nindexes, int nrequested);
 static void end_parallel_vacuum(Relation *Irel, IndexBulkDeleteResult **stats,
 								LVParallelState *lps, int nindexes);
@@ -376,7 +375,6 @@ void
 heap_vacuum_rel(Relation onerel, VacuumParams *params,
 				BufferAccessStrategy bstrategy)
 {
-	LVRelStats *vacrelstats;
 	Relation   *Irel;
 	int			nindexes;
 	PGRUsage	ru0;
@@ -458,21 +456,20 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 		return;
 	}
 
-	vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats));
-
-	vacrelstats->old_rel_pages = onerel->rd_rel->relpages;
-	vacrelstats->old_live_tuples = onerel->rd_rel->reltuples;
-	vacrelstats->num_index_scans = 0;
-	vacrelstats->pages_removed = 0;
-	vacrelstats->lock_waiter_detected = false;
+	memset(&vacrelstats, 0, sizeof(vacrelstats));
+	vacrelstats.old_rel_pages = onerel->rd_rel->relpages;
+	vacrelstats.old_live_tuples = onerel->rd_rel->reltuples;
+	// vacrelstats.num_index_scans = 0;
+	// vacrelstats.pages_removed = 0;
+	// vacrelstats.lock_waiter_detected = false;
 
 	/* Open all indexes of the relation */
 	vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel);
-	vacrelstats->useindex = (nindexes > 0 &&
+	vacrelstats.useindex = (nindexes > 0 &&
 							 params->index_cleanup == VACOPT_TERNARY_ENABLED);
 
 	/* Do the vacuuming */
-	lazy_scan_heap(onerel, params, vacrelstats, Irel, nindexes, aggressive);
+	lazy_scan_heap(onerel, params, Irel, nindexes, aggressive);
 
 	/* Done with indexes */
 	vac_close_indexes(nindexes, Irel, NoLock);
@@ -484,8 +481,8 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 	 * NB: We need to check this before truncating the relation, because that
 	 * will change ->rel_pages.
 	 */
-	if ((vacrelstats->scanned_pages + vacrelstats->frozenskipped_pages)
-		< vacrelstats->rel_pages)
+	if ((vacrelstats.scanned_pages + vacrelstats.frozenskipped_pages)
+		< vacrelstats.rel_pages)
 	{
 		Assert(!aggressive);
 		scanned_all_unfrozen = false;
@@ -496,8 +493,8 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 	/*
 	 * Optionally truncate the relation.
 	 */
-	if (should_attempt_truncation(params, vacrelstats))
-		lazy_truncate_heap(onerel, vacrelstats);
+	if (should_attempt_truncation(params))
+		lazy_truncate_heap(onerel);
 
 	/* Report that we are now doing final cleanup */
 	pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
@@ -524,12 +521,12 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 	 * Also, don't change relfrozenxid/relminmxid if we skipped any pages,
 	 * since then we don't know for certain that all tuples have a newer xmin.
 	 */
-	new_rel_pages = vacrelstats->rel_pages;
-	new_live_tuples = vacrelstats->new_live_tuples;
-	if (vacrelstats->tupcount_pages == 0 && new_rel_pages > 0)
+	new_rel_pages = vacrelstats.rel_pages;
+	new_live_tuples = vacrelstats.new_live_tuples;
+	if (vacrelstats.tupcount_pages == 0 && new_rel_pages > 0)
 	{
-		new_rel_pages = vacrelstats->old_rel_pages;
-		new_live_tuples = vacrelstats->old_live_tuples;
+		new_rel_pages = vacrelstats.old_rel_pages;
+		new_live_tuples = vacrelstats.old_live_tuples;
 	}
 
 	visibilitymap_count(onerel, &new_rel_allvisible, NULL);
@@ -552,7 +549,7 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 	pgstat_report_vacuum(RelationGetRelid(onerel),
 						 onerel->rd_rel->relisshared,
 						 new_live_tuples,
-						 vacrelstats->new_dead_tuples);
+						 vacrelstats.new_dead_tuples);
 	pgstat_progress_end_command();
 
 	/* and log the action if appropriate */
@@ -601,17 +598,17 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 							 get_database_name(MyDatabaseId),
 							 get_namespace_name(RelationGetNamespace(onerel)),
 							 RelationGetRelationName(onerel),
-							 vacrelstats->num_index_scans);
+							 vacrelstats.num_index_scans);
 			appendStringInfo(&buf, _("pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n"),
-							 vacrelstats->pages_removed,
-							 vacrelstats->rel_pages,
-							 vacrelstats->pinskipped_pages,
-							 vacrelstats->frozenskipped_pages);
+							 vacrelstats.pages_removed,
+							 vacrelstats.rel_pages,
+							 vacrelstats.pinskipped_pages,
+							 vacrelstats.frozenskipped_pages);
 			appendStringInfo(&buf,
 							 _("tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable, oldest xmin: %u\n"),
-							 vacrelstats->tuples_deleted,
-							 vacrelstats->new_rel_tuples,
-							 vacrelstats->new_dead_tuples,
+							 vacrelstats.tuples_deleted,
+							 vacrelstats.new_rel_tuples,
+							 vacrelstats.new_dead_tuples,
 							 OldestXmin);
 			appendStringInfo(&buf,
 							 _("buffer usage: %d hits, %d misses, %d dirtied\n"),
@@ -646,7 +643,7 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
  * which would be after the rows have become inaccessible.
  */
 static void
-vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
+vacuum_log_cleanup_info(Relation rel)
 {
 	/*
 	 * Skip this for relations for which no WAL is to be written, or if we're
@@ -658,8 +655,8 @@ vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
 	/*
 	 * No need to write the record at all unless it contains a valid value
 	 */
-	if (TransactionIdIsValid(vacrelstats->latestRemovedXid))
-		(void) log_heap_cleanup_info(rel->rd_node, vacrelstats->latestRemovedXid);
+	if (TransactionIdIsValid(vacrelstats.latestRemovedXid))
+		(void) log_heap_cleanup_info(rel->rd_node, vacrelstats.latestRemovedXid);
 }
 
 /*
@@ -691,7 +688,7 @@ vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
  *		reference them have been killed.
  */
 static void
-lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
+lazy_scan_heap(Relation onerel, VacuumParams *params,
 			   Relation *Irel, int nindexes, bool aggressive)
 {
 	LVParallelState *lps = NULL;
@@ -747,18 +744,18 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 		palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
 
 	nblocks = RelationGetNumberOfBlocks(onerel);
-	vacrelstats->rel_pages = nblocks;
-	vacrelstats->scanned_pages = 0;
-	vacrelstats->tupcount_pages = 0;
-	vacrelstats->nonempty_pages = 0;
-	vacrelstats->latestRemovedXid = InvalidTransactionId;
+	vacrelstats.rel_pages = nblocks;
+	vacrelstats.scanned_pages = 0;
+	vacrelstats.tupcount_pages = 0;
+	vacrelstats.nonempty_pages = 0;
+	vacrelstats.latestRemovedXid = InvalidTransactionId;
 
 	/*
 	 * Initialize the state for a parallel vacuum.  As of now, only one worker
 	 * can be used for an index, so we invoke parallelism only if there are at
 	 * least two indexes on a table.
 	 */
-	if (params->nworkers >= 0 && vacrelstats->useindex && nindexes > 1)
+	if (params->nworkers >= 0 && vacrelstats.useindex && nindexes > 1)
 	{
 		/*
 		 * Since parallel workers cannot access data in temporary tables, we
@@ -777,7 +774,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 		}
 		else
 			lps = begin_parallel_vacuum(RelationGetRelid(onerel), Irel,
-										vacrelstats, nblocks, nindexes,
+										nblocks, nindexes,
 										params->nworkers);
 	}
 
@@ -786,9 +783,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	 * initialized.
 	 */
 	if (!ParallelVacuumIsActive(lps))
-		lazy_space_alloc(vacrelstats, nblocks);
+		lazy_space_alloc(nblocks);
 
-	dead_tuples = vacrelstats->dead_tuples;
+	dead_tuples = vacrelstats.dead_tuples;
 	frozen = palloc(sizeof(xl_heap_freeze_tuple) * MaxHeapTuplesPerPage);
 
 	/* Report that we're scanning the heap, advertising total # of blocks */
@@ -889,7 +886,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 
 		/* see note above about forcing scanning of last page */
 #define FORCE_CHECK_PAGE() \
-		(blkno == nblocks - 1 && should_attempt_truncation(params, vacrelstats))
+		(blkno == nblocks - 1 && should_attempt_truncation(params))
 
 		pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
 
@@ -960,7 +957,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 				 * in this case an approximate answer is OK.
 				 */
 				if (aggressive || VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
-					vacrelstats->frozenskipped_pages++;
+					vacrelstats.frozenskipped_pages++;
 				continue;
 			}
 			all_visible_according_to_vm = true;
@@ -989,10 +986,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 
 			/* Work on all the indexes, then the heap */
 			lazy_vacuum_all_indexes(onerel, Irel, indstats,
-									vacrelstats, lps, nindexes);
-
+									lps, nindexes);
 			/* Remove tuples from heap */
-			lazy_vacuum_heap(onerel, vacrelstats);
+			lazy_vacuum_heap(onerel);
 
 			/*
 			 * Forget the now-vacuumed tuples, and press on, but be careful
@@ -1039,7 +1035,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 			if (!aggressive && !FORCE_CHECK_PAGE())
 			{
 				ReleaseBuffer(buf);
-				vacrelstats->pinskipped_pages++;
+				vacrelstats.pinskipped_pages++;
 				continue;
 			}
 
@@ -1063,10 +1059,10 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 			if (!lazy_check_needs_freeze(buf, &hastup))
 			{
 				UnlockReleaseBuffer(buf);
-				vacrelstats->scanned_pages++;
-				vacrelstats->pinskipped_pages++;
+				vacrelstats.scanned_pages++;
+				vacrelstats.pinskipped_pages++;
 				if (hastup)
-					vacrelstats->nonempty_pages = blkno + 1;
+					vacrelstats.nonempty_pages = blkno + 1;
 				continue;
 			}
 			if (!aggressive)
@@ -1076,9 +1072,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 				 * to claiming that the page contains no freezable tuples.
 				 */
 				UnlockReleaseBuffer(buf);
-				vacrelstats->pinskipped_pages++;
+				vacrelstats.pinskipped_pages++;
 				if (hastup)
-					vacrelstats->nonempty_pages = blkno + 1;
+					vacrelstats.nonempty_pages = blkno + 1;
 				continue;
 			}
 			LockBuffer(buf, BUFFER_LOCK_UNLOCK);
@@ -1086,8 +1082,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 			/* drop through to normal processing */
 		}
 
-		vacrelstats->scanned_pages++;
-		vacrelstats->tupcount_pages++;
+		vacrelstats.scanned_pages++;
+		vacrelstats.tupcount_pages++;
 
 		page = BufferGetPage(buf);
 
@@ -1184,7 +1180,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 		 * We count tuples removed by the pruning step as removed by VACUUM.
 		 */
 		tups_vacuumed += heap_page_prune(onerel, buf, OldestXmin, false,
-										 &vacrelstats->latestRemovedXid);
+										 &vacrelstats.latestRemovedXid);
 
 		/*
 		 * Now scan the page to collect vacuumable items and check for tuples
@@ -1381,7 +1377,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 			{
 				lazy_record_dead_tuple(dead_tuples, &(tuple.t_self));
 				HeapTupleHeaderAdvanceLatestRemovedXid(tuple.t_data,
-													   &vacrelstats->latestRemovedXid);
+													   &vacrelstats.latestRemovedXid);
 				tups_vacuumed += 1;
 				has_dead_tuples = true;
 			}
@@ -1449,12 +1445,12 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 		 * doing a second scan. Also we don't do that but forget dead tuples
 		 * when index cleanup is disabled.
 		 */
-		if (!vacrelstats->useindex && dead_tuples->num_tuples > 0)
+		if (!vacrelstats.useindex && dead_tuples->num_tuples > 0)
 		{
 			if (nindexes == 0)
 			{
 				/* Remove tuples from heap if the table has no index */
-				lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats, &vmbuffer);
+				lazy_vacuum_page(onerel, blkno, buf, 0, &vmbuffer);
 				vacuumed_pages++;
 				has_dead_tuples = false;
 			}
@@ -1465,7 +1461,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 				 * Instead of vacuuming the dead tuples on the heap, we just
 				 * forget them.
 				 *
-				 * Note that vacrelstats->dead_tuples could have tuples which
+				 * Note that vacrelstats.dead_tuples could have tuples which
 				 * became dead after HOT-pruning but are not marked dead yet.
 				 * We do not process them because it's a very rare condition,
 				 * and the next vacuum will process them anyway.
@@ -1584,7 +1580,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 
 		/* Remember the location of the last page with nonremovable tuples */
 		if (hastup)
-			vacrelstats->nonempty_pages = blkno + 1;
+			vacrelstats.nonempty_pages = blkno + 1;
 
 		/*
 		 * If we remembered any tuples for deletion, then the page will be
@@ -1603,18 +1599,18 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	pfree(frozen);
 
 	/* save stats for use later */
-	vacrelstats->tuples_deleted = tups_vacuumed;
-	vacrelstats->new_dead_tuples = nkeep;
+	vacrelstats.tuples_deleted = tups_vacuumed;
+	vacrelstats.new_dead_tuples = nkeep;
 
 	/* now we can compute the new value for pg_class.reltuples */
-	vacrelstats->new_live_tuples = vac_estimate_reltuples(onerel,
+	vacrelstats.new_live_tuples = vac_estimate_reltuples(onerel,
 														  nblocks,
-														  vacrelstats->tupcount_pages,
+														  vacrelstats.tupcount_pages,
 														  live_tuples);
 
 	/* also compute total number of surviving heap entries */
-	vacrelstats->new_rel_tuples =
-		vacrelstats->new_live_tuples + vacrelstats->new_dead_tuples;
+	vacrelstats.new_rel_tuples =
+		vacrelstats.new_live_tuples + vacrelstats.new_dead_tuples;
 
 	/*
 	 * Release any remaining pin on visibility map page.
@@ -1630,11 +1626,11 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	if (dead_tuples->num_tuples > 0)
 	{
 		/* Work on all the indexes, and then the heap */
-		lazy_vacuum_all_indexes(onerel, Irel, indstats, vacrelstats,
+		lazy_vacuum_all_indexes(onerel, Irel, indstats,
 								lps, nindexes);
 
 		/* Remove tuples from heap */
-		lazy_vacuum_heap(onerel, vacrelstats);
+		lazy_vacuum_heap(onerel);
 	}
 
 	/*
@@ -1648,8 +1644,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno);
 
 	/* Do post-vacuum cleanup */
-	if (vacrelstats->useindex)
-		lazy_cleanup_all_indexes(Irel, indstats, vacrelstats, lps, nindexes);
+	if (vacrelstats.useindex)
+		lazy_cleanup_all_indexes(Irel, indstats, lps, nindexes);
 
 	/*
 	 * End parallel mode before updating index statistics as we cannot write
@@ -1680,12 +1676,12 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 					 nunused);
 	appendStringInfo(&buf, ngettext("Skipped %u page due to buffer pins, ",
 									"Skipped %u pages due to buffer pins, ",
-									vacrelstats->pinskipped_pages),
-					 vacrelstats->pinskipped_pages);
+									vacrelstats.pinskipped_pages),
+					 vacrelstats.pinskipped_pages);
 	appendStringInfo(&buf, ngettext("%u frozen page.\n",
 									"%u frozen pages.\n",
-									vacrelstats->frozenskipped_pages),
-					 vacrelstats->frozenskipped_pages);
+									vacrelstats.frozenskipped_pages),
+					 vacrelstats.frozenskipped_pages);
 	appendStringInfo(&buf, ngettext("%u page is entirely empty.\n",
 									"%u pages are entirely empty.\n",
 									empty_pages),
@@ -1696,7 +1692,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 			(errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
 					RelationGetRelationName(onerel),
 					tups_vacuumed, num_tuples,
-					vacrelstats->scanned_pages, nblocks),
+					vacrelstats.scanned_pages, nblocks),
 			 errdetail_internal("%s", buf.data)));
 	pfree(buf.data);
 }
@@ -1709,14 +1705,14 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 static void
 lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
 						IndexBulkDeleteResult **stats,
-						LVRelStats *vacrelstats, LVParallelState *lps,
+						LVParallelState *lps,
 						int nindexes)
 {
 	Assert(!IsParallelWorker());
 	Assert(nindexes > 0);
 
 	/* Log cleanup info before we touch indexes */
-	vacuum_log_cleanup_info(onerel, vacrelstats);
+	vacuum_log_cleanup_info(onerel);
 
 	/* Report that we are now vacuuming indexes */
 	pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
@@ -1733,24 +1729,24 @@ lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
 		 * We can only provide an approximate value of num_heap_tuples in
 		 * vacuum cases.
 		 */
-		lps->lvshared->reltuples = vacrelstats->old_live_tuples;
+		lps->lvshared->reltuples = vacrelstats.old_live_tuples;
 		lps->lvshared->estimated_count = true;
 
-		lazy_parallel_vacuum_indexes(Irel, stats, vacrelstats, lps, nindexes);
+		lazy_parallel_vacuum_indexes(Irel, stats, lps, nindexes);
 	}
 	else
 	{
 		int			idx;
 
 		for (idx = 0; idx < nindexes; idx++)
-			lazy_vacuum_index(Irel[idx], &stats[idx], vacrelstats->dead_tuples,
-							  vacrelstats->old_live_tuples);
+			lazy_vacuum_index(Irel[idx], &stats[idx], vacrelstats.dead_tuples,
+							  vacrelstats.old_live_tuples);
 	}
 
 	/* Increase and report the number of index scans */
-	vacrelstats->num_index_scans++;
+	vacrelstats.num_index_scans++;
 	pgstat_progress_update_param(PROGRESS_VACUUM_NUM_INDEX_VACUUMS,
-								 vacrelstats->num_index_scans);
+								 vacrelstats.num_index_scans);
 }
 
 
@@ -1766,7 +1762,7 @@ lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
  * process index entry removal in batches as large as possible.
  */
 static void
-lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
+lazy_vacuum_heap(Relation onerel)
 {
 	int			tupindex;
 	int			npages;
@@ -1781,7 +1777,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
 	npages = 0;
 
 	tupindex = 0;
-	while (tupindex < vacrelstats->dead_tuples->num_tuples)
+	while (tupindex < vacrelstats.dead_tuples->num_tuples)
 	{
 		BlockNumber tblk;
 		Buffer		buf;
@@ -1790,7 +1786,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
 
 		vacuum_delay_point();
 
-		tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples->itemptrs[tupindex]);
+		tblk = ItemPointerGetBlockNumber(&vacrelstats.dead_tuples->itemptrs[tupindex]);
 		buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
 								 vac_strategy);
 		if (!ConditionalLockBufferForCleanup(buf))
@@ -1799,7 +1795,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
 			++tupindex;
 			continue;
 		}
-		tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats,
+		tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex,
 									&vmbuffer);
 
 		/* Now that we've compacted the page, record its available space */
@@ -1836,9 +1832,9 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
  */
 static int
 lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
-				 int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
+				 int tupindex, Buffer *vmbuffer)
 {
-	LVDeadTuples *dead_tuples = vacrelstats->dead_tuples;
+	LVDeadTuples *dead_tuples = vacrelstats.dead_tuples;
 	Page		page = BufferGetPage(buffer);
 	OffsetNumber unused[MaxOffsetNumber];
 	int			uncnt = 0;
@@ -1879,7 +1875,7 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
 		recptr = log_heap_clean(onerel, buffer,
 								NULL, 0, NULL, 0,
 								unused, uncnt,
-								vacrelstats->latestRemovedXid);
+								vacrelstats.latestRemovedXid);
 		PageSetLSN(page, recptr);
 	}
 
@@ -1987,7 +1983,7 @@ lazy_check_needs_freeze(Buffer buf, bool *hastup)
  */
 static void
 lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
-							 LVRelStats *vacrelstats, LVParallelState *lps,
+							 LVParallelState *lps,
 							 int nindexes)
 {
 	int			nworkers;
@@ -2021,7 +2017,7 @@ lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
 	/* Setup the shared cost-based vacuum delay and launch workers */
 	if (nworkers > 0)
 	{
-		if (vacrelstats->num_index_scans > 0)
+		if (vacrelstats.num_index_scans > 0)
 		{
 			/* Reset the parallel index processing counter */
 			pg_atomic_write_u32(&(lps->lvshared->idx), 0);
@@ -2076,14 +2072,14 @@ lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
 	}
 
 	/* Process the indexes that can be processed by only leader process */
-	vacuum_indexes_leader(Irel, stats, vacrelstats, lps, nindexes);
+	vacuum_indexes_leader(Irel, stats, lps, nindexes);
 
 	/*
 	 * Join as a parallel worker.  The leader process alone processes all the
 	 * indexes in the case where no workers are launched.
 	 */
 	parallel_vacuum_index(Irel, stats, lps->lvshared,
-						  vacrelstats->dead_tuples, nindexes);
+						  vacrelstats.dead_tuples, nindexes);
 
 	/* Wait for all vacuum workers to finish */
 	WaitForParallelWorkersToFinish(lps->pcxt);
@@ -2157,7 +2153,7 @@ parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats,
  */
 static void
 vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats,
-					  LVRelStats *vacrelstats, LVParallelState *lps,
+					  LVParallelState *lps,
 					  int nindexes)
 {
 	int			i;
@@ -2180,7 +2176,7 @@ vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats,
 		if (shared_indstats == NULL ||
 			skip_parallel_vacuum_index(Irel[i], lps->lvshared))
 			vacuum_one_index(Irel[i], &(stats[i]), lps->lvshared,
-							 shared_indstats, vacrelstats->dead_tuples);
+							 shared_indstats, vacrelstats.dead_tuples);
 	}
 
 	/*
@@ -2259,7 +2255,7 @@ vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
  */
 static void
 lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
-						 LVRelStats *vacrelstats, LVParallelState *lps,
+						 LVParallelState *lps,
 						 int nindexes)
 {
 	int			idx;
@@ -2280,25 +2276,25 @@ lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
 		/* Tell parallel workers to do index cleanup */
 		lps->lvshared->for_cleanup = true;
 		lps->lvshared->first_time =
-			(vacrelstats->num_index_scans == 0);
+			(vacrelstats.num_index_scans == 0);
 
 		/*
 		 * Now we can provide a better estimate of total number of surviving
 		 * tuples (we assume indexes are more interested in that than in the
 		 * number of nominally live tuples).
 		 */
-		lps->lvshared->reltuples = vacrelstats->new_rel_tuples;
+		lps->lvshared->reltuples = vacrelstats.new_rel_tuples;
 		lps->lvshared->estimated_count =
-			(vacrelstats->tupcount_pages < vacrelstats->rel_pages);
+			(vacrelstats.tupcount_pages < vacrelstats.rel_pages);
 
-		lazy_parallel_vacuum_indexes(Irel, stats, vacrelstats, lps, nindexes);
+		lazy_parallel_vacuum_indexes(Irel, stats, lps, nindexes);
 	}
 	else
 	{
 		for (idx = 0; idx < nindexes; idx++)
 			lazy_cleanup_index(Irel[idx], &stats[idx],
-							   vacrelstats->new_rel_tuples,
-							   vacrelstats->tupcount_pages < vacrelstats->rel_pages);
+							   vacrelstats.new_rel_tuples,
+							   vacrelstats.tupcount_pages < vacrelstats.rel_pages);
 	}
 }
 
@@ -2414,17 +2410,17 @@ lazy_cleanup_index(Relation indrel,
  * careful to depend only on fields that lazy_scan_heap updates on-the-fly.
  */
 static bool
-should_attempt_truncation(VacuumParams *params, LVRelStats *vacrelstats)
+should_attempt_truncation(VacuumParams *params)
 {
 	BlockNumber possibly_freeable;
 
 	if (params->truncate == VACOPT_TERNARY_DISABLED)
 		return false;
 
-	possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
+	possibly_freeable = vacrelstats.rel_pages - vacrelstats.nonempty_pages;
 	if (possibly_freeable > 0 &&
 		(possibly_freeable >= REL_TRUNCATE_MINIMUM ||
-		 possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) &&
+		 possibly_freeable >= vacrelstats.rel_pages / REL_TRUNCATE_FRACTION) &&
 		old_snapshot_threshold < 0)
 		return true;
 	else
@@ -2435,9 +2431,9 @@ should_attempt_truncation(VacuumParams *params, LVRelStats *vacrelstats)
  * lazy_truncate_heap - try to truncate off any empty pages at the end
  */
 static void
-lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
+lazy_truncate_heap(Relation onerel)
 {
-	BlockNumber old_rel_pages = vacrelstats->rel_pages;
+	BlockNumber old_rel_pages = vacrelstats.rel_pages;
 	BlockNumber new_rel_pages;
 	PGRUsage	ru0;
 	int			lock_retry;
@@ -2460,7 +2456,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 		 * (which is quite possible considering we already hold a lower-grade
 		 * lock).
 		 */
-		vacrelstats->lock_waiter_detected = false;
+		vacrelstats.lock_waiter_detected = false;
 		lock_retry = 0;
 		while (true)
 		{
@@ -2480,7 +2476,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 				 * We failed to establish the lock in the specified number of
 				 * retries. This means we give up truncating.
 				 */
-				vacrelstats->lock_waiter_detected = true;
+				vacrelstats.lock_waiter_detected = true;
 				ereport(elevel,
 						(errmsg("\"%s\": stopping truncate due to conflicting lock request",
 								RelationGetRelationName(onerel))));
@@ -2499,7 +2495,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 		if (new_rel_pages != old_rel_pages)
 		{
 			/*
-			 * Note: we intentionally don't update vacrelstats->rel_pages with
+			 * Note: we intentionally don't update vacrelstats.rel_pages with
 			 * the new rel size here.  If we did, it would amount to assuming
 			 * that the new pages are empty, which is unlikely. Leaving the
 			 * numbers alone amounts to assuming that the new pages have the
@@ -2515,7 +2511,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 		 * other backends could have added tuples to these pages whilst we
 		 * were vacuuming.
 		 */
-		new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
+		new_rel_pages = count_nondeletable_pages(onerel);
 
 		if (new_rel_pages >= old_rel_pages)
 		{
@@ -2543,8 +2539,8 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 		 * without also touching reltuples, since the tuple count wasn't
 		 * changed by the truncation.
 		 */
-		vacrelstats->pages_removed += old_rel_pages - new_rel_pages;
-		vacrelstats->rel_pages = new_rel_pages;
+		vacrelstats.pages_removed += old_rel_pages - new_rel_pages;
+		vacrelstats.rel_pages = new_rel_pages;
 
 		ereport(elevel,
 				(errmsg("\"%s\": truncated %u to %u pages",
@@ -2553,8 +2549,8 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 				 errdetail_internal("%s",
 									pg_rusage_show(&ru0))));
 		old_rel_pages = new_rel_pages;
-	} while (new_rel_pages > vacrelstats->nonempty_pages &&
-			 vacrelstats->lock_waiter_detected);
+	} while (new_rel_pages > vacrelstats.nonempty_pages &&
+			 vacrelstats.lock_waiter_detected);
 }
 
 /*
@@ -2563,7 +2559,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
  * Returns number of nondeletable pages (last nonempty page + 1).
  */
 static BlockNumber
-count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
+count_nondeletable_pages(Relation onerel)
 {
 	BlockNumber blkno;
 	BlockNumber prefetchedUntil;
@@ -2578,11 +2574,11 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
 	 * unsigned.)  To make the scan faster, we prefetch a few blocks at a time
 	 * in forward direction, so that OS-level readahead can kick in.
 	 */
-	blkno = vacrelstats->rel_pages;
+	blkno = vacrelstats.rel_pages;
 	StaticAssertStmt((PREFETCH_SIZE & (PREFETCH_SIZE - 1)) == 0,
 					 "prefetch size must be power of 2");
 	prefetchedUntil = InvalidBlockNumber;
-	while (blkno > vacrelstats->nonempty_pages)
+	while (blkno > vacrelstats.nonempty_pages)
 	{
 		Buffer		buf;
 		Page		page;
@@ -2615,7 +2611,7 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
 							(errmsg("\"%s\": suspending truncate due to conflicting lock request",
 									RelationGetRelationName(onerel))));
 
-					vacrelstats->lock_waiter_detected = true;
+					vacrelstats.lock_waiter_detected = true;
 					return blkno;
 				}
 				starttime = currenttime;
@@ -2695,7 +2691,7 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
 	 * pages still are; we need not bother to look at the last known-nonempty
 	 * page.
 	 */
-	return vacrelstats->nonempty_pages;
+	return vacrelstats.nonempty_pages;
 }
 
 /*
@@ -2734,18 +2730,18 @@ compute_max_dead_tuples(BlockNumber relblocks, bool useindex)
  * See the comments at the head of this file for rationale.
  */
 static void
-lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
+lazy_space_alloc(BlockNumber relblocks)
 {
 	LVDeadTuples *dead_tuples = NULL;
 	long		maxtuples;
 
-	maxtuples = compute_max_dead_tuples(relblocks, vacrelstats->useindex);
+	maxtuples = compute_max_dead_tuples(relblocks, vacrelstats.useindex);
 
 	dead_tuples = (LVDeadTuples *) palloc(SizeOfDeadTuples(maxtuples));
 	dead_tuples->num_tuples = 0;
 	dead_tuples->max_tuples = (int) maxtuples;
 
-	vacrelstats->dead_tuples = dead_tuples;
+	vacrelstats.dead_tuples = dead_tuples;
 }
 
 /*
@@ -3063,7 +3059,7 @@ update_index_statistics(Relation *Irel, IndexBulkDeleteResult **stats,
  * create a parallel context, and then initialize the DSM segment.
  */
 static LVParallelState *
-begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
+begin_parallel_vacuum(Oid relid, Relation *Irel,
 					  BlockNumber nblocks, int nindexes, int nrequested)
 {
 	LVParallelState *lps = NULL;
@@ -3185,7 +3181,7 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
 	dead_tuples->num_tuples = 0;
 	MemSet(dead_tuples->itemptrs, 0, sizeof(ItemPointerData) * maxtuples);
 	shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_DEAD_TUPLES, dead_tuples);
-	vacrelstats->dead_tuples = dead_tuples;
+	vacrelstats.dead_tuples = dead_tuples;
 
 	/* Store query string for workers */
 	sharedquery = (char *) shm_toc_allocate(pcxt->toc, querylen + 1);
-- 
2.7.4

>From 592a77554f99b5ff9035c55bf19a79a1443ae59e Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Thu, 12 Dec 2019 20:54:37 -0600
Subject: [PATCH v14 2/3] vacuum errcontext to show block being processed

As requested here.
https://www.postgresql.org/message-id/20190807235154.erbmr4o4bo6vgnjv%40alap3.anarazel.de
---
 src/backend/access/heap/vacuumlazy.c | 85 +++++++++++++++++++++++++++++++++++-
 1 file changed, 84 insertions(+), 1 deletion(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 114428b..a62dc79 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -290,8 +290,14 @@ typedef struct LVRelStats
 	int			num_index_scans;
 	TransactionId latestRemovedXid;
 	bool		lock_waiter_detected;
-} LVRelStats;
 
+	/* Used by the error callback */
+	char		*relname;
+	char 		*relnamespace;
+	BlockNumber blkno;
+	char 		*indname;
+	int			stage;	/* 0: scan heap; 1: vacuum heap; 2: vacuum index */
+} LVRelStats;
 
 /* A few variables that don't seem worth passing around as parameters */
 static int	elevel = -1;
@@ -360,6 +366,7 @@ static void end_parallel_vacuum(Relation *Irel, IndexBulkDeleteResult **stats,
 								LVParallelState *lps, int nindexes);
 static LVSharedIndStats *get_indstats(LVShared *lvshared, int n);
 static bool skip_parallel_vacuum_index(Relation indrel, LVShared *lvshared);
+static void vacuum_error_callback(void *arg);
 
 
 /*
@@ -721,6 +728,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params,
 		PROGRESS_VACUUM_MAX_DEAD_TUPLES
 	};
 	int64		initprog_val[3];
+	ErrorContextCallback errcallback;
 
 	pg_rusage_init(&ru0);
 
@@ -867,6 +875,17 @@ lazy_scan_heap(Relation onerel, VacuumParams *params,
 	else
 		skipping_blocks = false;
 
+	/* Setup error traceback support for ereport() */
+	vacrelstats.relnamespace = get_namespace_name(RelationGetNamespace(onerel));
+	vacrelstats.relname = relname;
+	vacrelstats.blkno = InvalidBlockNumber; /* Not known yet */
+	vacrelstats.stage = 0;
+
+	errcallback.callback = vacuum_error_callback;
+	errcallback.arg = (void *) &vacrelstats;
+	errcallback.previous = error_context_stack;
+	error_context_stack = &errcallback;
+
 	for (blkno = 0; blkno < nblocks; blkno++)
 	{
 		Buffer		buf;
@@ -888,6 +907,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params,
 #define FORCE_CHECK_PAGE() \
 		(blkno == nblocks - 1 && should_attempt_truncation(params))
 
+		vacrelstats.blkno = blkno;
+
 		pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
 
 		if (blkno == next_unskippable_block)
@@ -984,12 +1005,18 @@ lazy_scan_heap(Relation onerel, VacuumParams *params,
 				vmbuffer = InvalidBuffer;
 			}
 
+			/* Pop the error context stack */
+			error_context_stack = errcallback.previous;
+
 			/* Work on all the indexes, then the heap */
 			lazy_vacuum_all_indexes(onerel, Irel, indstats,
 									lps, nindexes);
 			/* Remove tuples from heap */
 			lazy_vacuum_heap(onerel);
 
+			/* Replace error context while continuing heap scan */
+			error_context_stack = &errcallback;
+
 			/*
 			 * Forget the now-vacuumed tuples, and press on, but be careful
 			 * not to reset latestRemovedXid since we want that value to be
@@ -1593,6 +1620,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params,
 			RecordPageWithFreeSpace(onerel, blkno, freespace);
 	}
 
+	/* Pop the error context stack */
+	error_context_stack = errcallback.previous;
+
 	/* report that everything is scanned and vacuumed */
 	pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
 
@@ -1768,11 +1798,24 @@ lazy_vacuum_heap(Relation onerel)
 	int			npages;
 	PGRUsage	ru0;
 	Buffer		vmbuffer = InvalidBuffer;
+	ErrorContextCallback errcallback;
 
 	/* Report that we are now vacuuming the heap */
 	pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
 								 PROGRESS_VACUUM_PHASE_VACUUM_HEAP);
 
+	/*
+	 * Setup error traceback support for ereport()
+	 * ->relnamespace and ->relname are already set
+	 */
+	vacrelstats.blkno = InvalidBlockNumber; /* Not known yet */
+	vacrelstats.stage = 1;
+
+	errcallback.callback = vacuum_error_callback;
+	errcallback.arg = (void *) &vacrelstats;
+	errcallback.previous = error_context_stack;
+	error_context_stack = &errcallback;
+
 	pg_rusage_init(&ru0);
 	npages = 0;
 
@@ -1787,6 +1830,7 @@ lazy_vacuum_heap(Relation onerel)
 		vacuum_delay_point();
 
 		tblk = ItemPointerGetBlockNumber(&vacrelstats.dead_tuples->itemptrs[tupindex]);
+		vacrelstats.blkno = tblk;
 		buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
 								 vac_strategy);
 		if (!ConditionalLockBufferForCleanup(buf))
@@ -1807,6 +1851,9 @@ lazy_vacuum_heap(Relation onerel)
 		npages++;
 	}
 
+	/* Pop the error context stack */
+	error_context_stack = errcallback.previous;
+
 	if (BufferIsValid(vmbuffer))
 	{
 		ReleaseBuffer(vmbuffer);
@@ -2314,6 +2361,8 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
 	IndexVacuumInfo ivinfo;
 	const char *msg;
 	PGRUsage	ru0;
+	ErrorContextCallback errcallback;
+	LVRelStats	errcbarg; /* Used for error callback, only */
 
 	pg_rusage_init(&ru0);
 
@@ -2325,10 +2374,24 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
 	ivinfo.num_heap_tuples = reltuples;
 	ivinfo.strategy = vac_strategy;
 
+	/* Setup error traceback support for ereport() */
+	errcbarg.relnamespace = get_namespace_name(RelationGetNamespace(indrel));
+	errcbarg.indname = RelationGetRelationName(indrel);
+	errcbarg.relname = vacrelstats.relname;
+	errcbarg.stage = 2;
+
+	errcallback.callback = vacuum_error_callback;
+	errcallback.arg = (void *) &errcbarg;
+	errcallback.previous = error_context_stack;
+	error_context_stack = &errcallback;
+
 	/* Do bulk deletion */
 	*stats = index_bulk_delete(&ivinfo, *stats,
 							   lazy_tid_reaped, (void *) dead_tuples);
 
+	/* Pop the error context stack */
+	error_context_stack = errcallback.previous;
+
 	if (IsParallelWorker())
 		msg = gettext_noop("scanned index \"%s\" to remove %d row versions by parallel vacuum worker");
 	else
@@ -3371,3 +3434,23 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
 	table_close(onerel, ShareUpdateExclusiveLock);
 	pfree(stats);
 }
+
+/*
+ * Error context callback for errors occurring during vacuum.
+ */
+static void
+vacuum_error_callback(void *arg)
+{
+	LVRelStats *cbarg = arg;
+
+	if (cbarg->stage == 0)
+		errcontext(_("while scanning block %u of relation \"%s.%s\""),
+				cbarg->blkno, cbarg->relnamespace, cbarg->relname);
+	else if (cbarg->stage == 1)
+		errcontext(_("while vacuuming block %u of relation \"%s.%s\""),
+				cbarg->blkno, cbarg->relnamespace, cbarg->relname);
+	else if (cbarg->stage == 2)
+		errcontext(_("while vacuuming index \"%s\" on table \"%s.%s\""),
+				cbarg->indname, cbarg->relnamespace, cbarg->relname);
+
+}
-- 
2.7.4

>From 7c76861addf213b3b417d783b04b12112fc468f1 Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Mon, 20 Jan 2020 15:26:39 -0600
Subject: [PATCH v14 3/3] Avoid extra calls like GetRelationName

---
 src/backend/access/heap/vacuumlazy.c | 75 ++++++++++++++++++------------------
 1 file changed, 37 insertions(+), 38 deletions(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index a62dc79..1841e7c 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -270,6 +270,9 @@ typedef struct LVParallelState
 
 typedef struct LVRelStats
 {
+	char		*relname;
+	char 		*relnamespace;
+
 	/* useindex = true means two-pass strategy; false means one-pass */
 	bool		useindex;
 	/* Overall statistics about rel */
@@ -292,8 +295,6 @@ typedef struct LVRelStats
 	bool		lock_waiter_detected;
 
 	/* Used by the error callback */
-	char		*relname;
-	char 		*relnamespace;
 	BlockNumber blkno;
 	char 		*indname;
 	int			stage;	/* 0: scan heap; 1: vacuum heap; 2: vacuum index */
@@ -603,8 +604,8 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 			}
 			appendStringInfo(&buf, msgfmt,
 							 get_database_name(MyDatabaseId),
-							 get_namespace_name(RelationGetNamespace(onerel)),
-							 RelationGetRelationName(onerel),
+							 vacrelstats.relnamespace,
+							 vacrelstats.relname,
 							 vacrelstats.num_index_scans);
 			appendStringInfo(&buf, _("pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n"),
 							 vacrelstats.pages_removed,
@@ -703,7 +704,6 @@ lazy_scan_heap(Relation onerel, VacuumParams *params,
 	BlockNumber nblocks,
 				blkno;
 	HeapTupleData tuple;
-	char	   *relname;
 	TransactionId relfrozenxid = onerel->rd_rel->relfrozenxid;
 	TransactionId relminmxid = onerel->rd_rel->relminmxid;
 	BlockNumber empty_pages,
@@ -732,18 +732,6 @@ lazy_scan_heap(Relation onerel, VacuumParams *params,
 
 	pg_rusage_init(&ru0);
 
-	relname = RelationGetRelationName(onerel);
-	if (aggressive)
-		ereport(elevel,
-				(errmsg("aggressively vacuuming \"%s.%s\"",
-						get_namespace_name(RelationGetNamespace(onerel)),
-						relname)));
-	else
-		ereport(elevel,
-				(errmsg("vacuuming \"%s.%s\"",
-						get_namespace_name(RelationGetNamespace(onerel)),
-						relname)));
-
 	empty_pages = vacuumed_pages = 0;
 	next_fsm_block_to_vacuum = (BlockNumber) 0;
 	num_tuples = live_tuples = tups_vacuumed = nkeep = nunused = 0;
@@ -758,6 +746,28 @@ lazy_scan_heap(Relation onerel, VacuumParams *params,
 	vacrelstats.nonempty_pages = 0;
 	vacrelstats.latestRemovedXid = InvalidTransactionId;
 
+	vacrelstats.relnamespace = get_namespace_name(RelationGetNamespace(onerel));
+	vacrelstats.relname = RelationGetRelationName(onerel);
+	vacrelstats.blkno = InvalidBlockNumber; /* Not known yet */
+	vacrelstats.stage = 0;
+
+	if (aggressive)
+		ereport(elevel,
+				(errmsg("aggressively vacuuming \"%s.%s\"",
+						vacrelstats.relnamespace,
+						vacrelstats.relname)));
+	else
+		ereport(elevel,
+				(errmsg("vacuuming \"%s.%s\"",
+						vacrelstats.relnamespace,
+						vacrelstats.relname)));
+
+	/* Setup error traceback support for ereport() */
+	errcallback.callback = vacuum_error_callback;
+	errcallback.arg = (void *) &vacrelstats;
+	errcallback.previous = error_context_stack;
+	error_context_stack = &errcallback;
+
 	/*
 	 * Initialize the state for a parallel vacuum.  As of now, only one worker
 	 * can be used for an index, so we invoke parallelism only if there are at
@@ -778,7 +788,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params,
 			if (params->nworkers > 0)
 				ereport(WARNING,
 						(errmsg("disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
-								RelationGetRelationName(onerel))));
+								vacrelstats.relname)));
 		}
 		else
 			lps = begin_parallel_vacuum(RelationGetRelid(onerel), Irel,
@@ -875,17 +885,6 @@ lazy_scan_heap(Relation onerel, VacuumParams *params,
 	else
 		skipping_blocks = false;
 
-	/* Setup error traceback support for ereport() */
-	vacrelstats.relnamespace = get_namespace_name(RelationGetNamespace(onerel));
-	vacrelstats.relname = relname;
-	vacrelstats.blkno = InvalidBlockNumber; /* Not known yet */
-	vacrelstats.stage = 0;
-
-	errcallback.callback = vacuum_error_callback;
-	errcallback.arg = (void *) &vacrelstats;
-	errcallback.previous = error_context_stack;
-	error_context_stack = &errcallback;
-
 	for (blkno = 0; blkno < nblocks; blkno++)
 	{
 		Buffer		buf;
@@ -1557,7 +1556,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params,
 				 && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
 		{
 			elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
-				 relname, blkno);
+				 vacrelstats.relname, blkno);
 			visibilitymap_clear(onerel, blkno, vmbuffer,
 								VISIBILITYMAP_VALID_BITS);
 		}
@@ -1578,7 +1577,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params,
 		else if (PageIsAllVisible(page) && has_dead_tuples)
 		{
 			elog(WARNING, "page containing dead tuples is marked as all-visible in relation \"%s\" page %u",
-				 relname, blkno);
+				 vacrelstats.relname, blkno);
 			PageClearAllVisible(page);
 			MarkBufferDirty(buf);
 			visibilitymap_clear(onerel, blkno, vmbuffer,
@@ -1691,7 +1690,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params,
 	if (vacuumed_pages)
 		ereport(elevel,
 				(errmsg("\"%s\": removed %.0f row versions in %u pages",
-						RelationGetRelationName(onerel),
+						vacrelstats.relname,
 						tups_vacuumed, vacuumed_pages)));
 
 	/*
@@ -1720,7 +1719,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params,
 
 	ereport(elevel,
 			(errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
-					RelationGetRelationName(onerel),
+					vacrelstats.relname,
 					tups_vacuumed, num_tuples,
 					vacrelstats.scanned_pages, nblocks),
 			 errdetail_internal("%s", buf.data)));
@@ -1862,7 +1861,7 @@ lazy_vacuum_heap(Relation onerel)
 
 	ereport(elevel,
 			(errmsg("\"%s\": removed %d row versions in %d pages",
-					RelationGetRelationName(onerel),
+					vacrelstats.relname,
 					tupindex, npages),
 			 errdetail_internal("%s", pg_rusage_show(&ru0))));
 }
@@ -2399,7 +2398,7 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
 
 	ereport(elevel,
 			(errmsg(msg,
-					RelationGetRelationName(indrel),
+					vacrelstats.relname,
 					dead_tuples->num_tuples),
 			 errdetail_internal("%s", pg_rusage_show(&ru0))));
 }
@@ -2542,7 +2541,7 @@ lazy_truncate_heap(Relation onerel)
 				vacrelstats.lock_waiter_detected = true;
 				ereport(elevel,
 						(errmsg("\"%s\": stopping truncate due to conflicting lock request",
-								RelationGetRelationName(onerel))));
+								vacrelstats.relname)));
 				return;
 			}
 
@@ -2607,7 +2606,7 @@ lazy_truncate_heap(Relation onerel)
 
 		ereport(elevel,
 				(errmsg("\"%s\": truncated %u to %u pages",
-						RelationGetRelationName(onerel),
+						vacrelstats.relname,
 						old_rel_pages, new_rel_pages),
 				 errdetail_internal("%s",
 									pg_rusage_show(&ru0))));
@@ -2672,7 +2671,7 @@ count_nondeletable_pages(Relation onerel)
 				{
 					ereport(elevel,
 							(errmsg("\"%s\": suspending truncate due to conflicting lock request",
-									RelationGetRelationName(onerel))));
+									vacrelstats.relname)));
 
 					vacrelstats.lock_waiter_detected = true;
 					return blkno;
-- 
2.7.4

Reply via email to