On Mon, Feb 17, 2020 at 02:18:11PM +0900, Masahiko Sawada wrote:
> Oops it seems to me that it's better to set error context after
> tupindex = 0. Sorry for my bad.

I take your point but did it differently - see what you think

> And the above comment can be written in a single line as other same comments.

Thanks :)

> Hmm I don't think it's a good idea to have count_nondeletable_pages
> set the error context of PHASE_TRUNCATE.

I think if we don't do it there then we shouldn't bother handling
PHASE_TRUNCATE at all, since that's what's likely to hit filesystem or other
lowlevel errors, before lazy_truncate_heap() hits them.

> Because the patch sets the
> error context during RelationTruncate that actually truncates the heap
> but count_nondeletable_pages doesn't truncate it.

I would say that ReadBuffer called by the prefetch in
count_nondeletable_pages() is called during the course of truncation, the same
as ReadBuffer called during the course of vacuuming can be attributed to
vacuuming.

> I think setting the error context only during RelationTruncate would be a
> good start. We can hear other opinions from other hackers. Some hackers may
> want to set the error context for whole lazy_truncate_heap.

I avoided doing that since it has several "return" statements, each of which
would need to "Pop the error context stack", which is at risk of being
forgotten and left unpopped by anyone who adds or changes flow control.

Also, I just added this to the TRUNCATE case, even though that should never
happen: if (BlockNumberIsValid(cbarg->blkno))...

-- 
Justin
>From 977b1b5e00ce522bd775cf91f7a9c7a9345d3171 Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Thu, 12 Dec 2019 20:54:37 -0600
Subject: [PATCH v20 1/2] vacuum errcontext to show block being processed

Discussion:
https://www.postgresql.org/message-id/20191120210600.gc30...@telsasoft.com
---
 src/backend/access/heap/vacuumlazy.c | 130 ++++++++++++++++++++++++++++++++++-
 1 file changed, 129 insertions(+), 1 deletion(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index a23cdef..ce3efd7 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -292,6 +292,14 @@ typedef struct LVRelStats
 	bool		lock_waiter_detected;
 } LVRelStats;
 
+typedef struct
+{
+	char 		*relnamespace;
+	char		*relname;
+	char 		*indname;
+	BlockNumber blkno;	/* used only for heap operations */
+	int			phase;	/* Reusing same enums as for progress reporting */
+} vacuum_error_callback_arg;
 
 /* A few variables that don't seem worth passing around as parameters */
 static int	elevel = -1;
@@ -361,6 +369,9 @@ static void end_parallel_vacuum(Relation *Irel, IndexBulkDeleteResult **stats,
 								LVParallelState *lps, int nindexes);
 static LVSharedIndStats *get_indstats(LVShared *lvshared, int n);
 static bool skip_parallel_vacuum_index(Relation indrel, LVShared *lvshared);
+static void vacuum_error_callback(void *arg);
+static void init_vacuum_error_callback(ErrorContextCallback *errcallback,
+		vacuum_error_callback_arg *errcbarg, Relation onerel, int phase);
 
 
 /*
@@ -724,6 +735,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 		PROGRESS_VACUUM_MAX_DEAD_TUPLES
 	};
 	int64		initprog_val[3];
+	ErrorContextCallback errcallback;
+	vacuum_error_callback_arg errcbarg;
 
 	pg_rusage_init(&ru0);
 
@@ -870,6 +883,11 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	else
 		skipping_blocks = false;
 
+	/* Setup error traceback support for ereport() */
+	init_vacuum_error_callback(&errcallback, &errcbarg, onerel,
+					PROGRESS_VACUUM_PHASE_SCAN_HEAP);
+	error_context_stack = &errcallback;
+
 	for (blkno = 0; blkno < nblocks; blkno++)
 	{
 		Buffer		buf;
@@ -891,6 +909,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 #define FORCE_CHECK_PAGE() \
 		(blkno == nblocks - 1 && should_attempt_truncation(params, vacrelstats))
 
+		errcbarg.blkno = blkno;
+
 		pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
 
 		if (blkno == next_unskippable_block)
@@ -987,6 +1007,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 				vmbuffer = InvalidBuffer;
 			}
 
+			/* Pop the error context stack while calling vacuum */
+			error_context_stack = errcallback.previous;
+
 			/* Work on all the indexes, then the heap */
 			lazy_vacuum_all_indexes(onerel, Irel, indstats,
 									vacrelstats, lps, nindexes);
@@ -1011,6 +1034,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 			/* Report that we are once again scanning the heap */
 			pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
 										 PROGRESS_VACUUM_PHASE_SCAN_HEAP);
+
+			/* Set the error context while continuing heap scan */
+			error_context_stack = &errcallback;
 		}
 
 		/*
@@ -1597,6 +1623,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 			RecordPageWithFreeSpace(onerel, blkno, freespace);
 	}
 
+	/* Pop the error context stack */
+	error_context_stack = errcallback.previous;
+
 	/* report that everything is scanned and vacuumed */
 	pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
 
@@ -1772,14 +1801,21 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
 	int			npages;
 	PGRUsage	ru0;
 	Buffer		vmbuffer = InvalidBuffer;
+	ErrorContextCallback errcallback;
+	vacuum_error_callback_arg errcbarg;
 
 	/* Report that we are now vacuuming the heap */
 	pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
 								 PROGRESS_VACUUM_PHASE_VACUUM_HEAP);
 
 	pg_rusage_init(&ru0);
-	npages = 0;
 
+	/* Setup error traceback support for ereport() */
+	init_vacuum_error_callback(&errcallback, &errcbarg, onerel,
+			PROGRESS_VACUUM_PHASE_VACUUM_HEAP);
+	error_context_stack = &errcallback;
+
+	npages = 0;
 	tupindex = 0;
 	while (tupindex < vacrelstats->dead_tuples->num_tuples)
 	{
@@ -1791,6 +1827,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
 		vacuum_delay_point();
 
 		tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples->itemptrs[tupindex]);
+		errcbarg.blkno = tblk;
 		buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
 								 vac_strategy);
 		if (!ConditionalLockBufferForCleanup(buf))
@@ -1811,6 +1848,9 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
 		npages++;
 	}
 
+	/* Pop the error context stack */
+	error_context_stack = errcallback.previous;
+
 	if (BufferIsValid(vmbuffer))
 	{
 		ReleaseBuffer(vmbuffer);
@@ -2318,6 +2358,8 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
 	IndexVacuumInfo ivinfo;
 	const char *msg;
 	PGRUsage	ru0;
+	ErrorContextCallback errcallback;
+	vacuum_error_callback_arg errcbarg;
 
 	pg_rusage_init(&ru0);
 
@@ -2329,10 +2371,18 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
 	ivinfo.num_heap_tuples = reltuples;
 	ivinfo.strategy = vac_strategy;
 
+	/* Setup error traceback support for ereport() */
+	init_vacuum_error_callback(&errcallback, &errcbarg, indrel,
+			PROGRESS_VACUUM_PHASE_VACUUM_INDEX);
+	error_context_stack = &errcallback;
+
 	/* Do bulk deletion */
 	*stats = index_bulk_delete(&ivinfo, *stats,
 							   lazy_tid_reaped, (void *) dead_tuples);
 
+	/* Pop the error context stack */
+	error_context_stack = errcallback.previous;
+
 	if (IsParallelWorker())
 		msg = gettext_noop("scanned index \"%s\" to remove %d row versions by parallel vacuum worker");
 	else
@@ -2359,6 +2409,8 @@ lazy_cleanup_index(Relation indrel,
 	IndexVacuumInfo ivinfo;
 	const char *msg;
 	PGRUsage	ru0;
+	vacuum_error_callback_arg errcbarg;
+	ErrorContextCallback errcallback;
 
 	pg_rusage_init(&ru0);
 
@@ -2371,8 +2423,16 @@ lazy_cleanup_index(Relation indrel,
 	ivinfo.num_heap_tuples = reltuples;
 	ivinfo.strategy = vac_strategy;
 
+	/* Setup error traceback support for ereport() */
+	init_vacuum_error_callback(&errcallback, &errcbarg, indrel,
+			PROGRESS_VACUUM_PHASE_INDEX_CLEANUP);
+	error_context_stack = &errcallback;
+
 	*stats = index_vacuum_cleanup(&ivinfo, *stats);
 
+	/* Pop the error context stack */
+	error_context_stack = errcallback.previous;
+
 	if (!(*stats))
 		return;
 
@@ -3375,3 +3435,71 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
 	table_close(onerel, ShareUpdateExclusiveLock);
 	pfree(stats);
 }
+
+/*
+ * Error context callback for errors occurring during vacuum.
+ */
+static void
+vacuum_error_callback(void *arg)
+{
+	vacuum_error_callback_arg *cbarg = arg;
+
+	switch (cbarg->phase) {
+		case PROGRESS_VACUUM_PHASE_SCAN_HEAP:
+			if (BlockNumberIsValid(cbarg->blkno))
+				errcontext(_("while scanning block %u of relation \"%s.%s\""),
+						cbarg->blkno, cbarg->relnamespace, cbarg->relname);
+			break;
+
+		case PROGRESS_VACUUM_PHASE_VACUUM_HEAP:
+			if (BlockNumberIsValid(cbarg->blkno))
+				errcontext(_("while vacuuming block %u of relation \"%s.%s\""),
+						cbarg->blkno, cbarg->relnamespace, cbarg->relname);
+			break;
+
+		case PROGRESS_VACUUM_PHASE_VACUUM_INDEX:
+			errcontext(_("while vacuuming index \"%s.%s\" of relation \"%s\""),
+					cbarg->relnamespace, cbarg->indname, cbarg->relname);
+			break;
+
+		case PROGRESS_VACUUM_PHASE_INDEX_CLEANUP:
+			errcontext(_("while cleaning up index \"%s.%s\" of relation \"%s\""),
+					cbarg->relnamespace, cbarg->indname, cbarg->relname);
+			break;
+
+		case PROGRESS_VACUUM_PHASE_TRUNCATE:
+		case PROGRESS_VACUUM_PHASE_FINAL_CLEANUP:
+		default:
+			return; /* Shouldn't happen: do nothing */
+	}
+}
+
+/* Initialize vacuum error callback */
+static void
+init_vacuum_error_callback(ErrorContextCallback *errcallback,
+				vacuum_error_callback_arg *errcbarg, Relation rel, int phase)
+{
+	switch (phase)
+	{
+		case PROGRESS_VACUUM_PHASE_SCAN_HEAP:
+		case PROGRESS_VACUUM_PHASE_VACUUM_HEAP:
+			errcbarg->relname = RelationGetRelationName(rel);
+			errcbarg->indname = NULL; /* Not used for heap */
+			break;
+
+		case PROGRESS_VACUUM_PHASE_VACUUM_INDEX:
+		case PROGRESS_VACUUM_PHASE_INDEX_CLEANUP:
+			/* rel is an index relation in index vacuum case */
+			errcbarg->relname = get_rel_name(rel->rd_index->indrelid);
+			errcbarg->indname = RelationGetRelationName(rel);
+			break;
+	}
+
+	errcbarg->relnamespace = get_namespace_name(RelationGetNamespace(rel));
+	errcbarg->blkno = InvalidBlockNumber; /* Not known yet */
+	errcbarg->phase = phase;
+
+	errcallback->callback = vacuum_error_callback;
+	errcallback->arg = errcbarg;
+	errcallback->previous = error_context_stack;
+}
-- 
2.7.4

>From 02b2b4f06578bf649f7574314cb55744de7ddd63 Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Sun, 16 Feb 2020 20:25:13 -0600
Subject: [PATCH v20 2/2] add callback for truncation

---
 src/backend/access/heap/vacuumlazy.c | 32 ++++++++++++++++++++++++++++++++
 1 file changed, 32 insertions(+)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index ce3efd7..518a8a0 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -2501,9 +2501,15 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 	BlockNumber new_rel_pages;
 	PGRUsage	ru0;
 	int			lock_retry;
+	ErrorContextCallback errcallback;
+	vacuum_error_callback_arg errcbarg;
 
 	pg_rusage_init(&ru0);
 
+	/* Setup error traceback support for ereport() */
+	init_vacuum_error_callback(&errcallback, &errcbarg, onerel,
+			PROGRESS_VACUUM_PHASE_TRUNCATE);
+
 	/* Report that we are now truncating */
 	pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
 								 PROGRESS_VACUUM_PHASE_TRUNCATE);
@@ -2584,11 +2590,18 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 			return;
 		}
 
+		/* Setup error traceback support for ereport() */
+		errcbarg.blkno = new_rel_pages;
+		error_context_stack = &errcallback;
+
 		/*
 		 * Okay to truncate.
 		 */
 		RelationTruncate(onerel, new_rel_pages);
 
+		/* Pop the error context stack */
+		error_context_stack = errcallback.previous;
+
 		/*
 		 * We can release the exclusive lock as soon as we have truncated.
 		 * Other backends can't safely access the relation until they have
@@ -2628,6 +2641,12 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
 	BlockNumber blkno;
 	BlockNumber prefetchedUntil;
 	instr_time	starttime;
+	ErrorContextCallback errcallback;
+	vacuum_error_callback_arg errcbarg;
+
+	/* Setup error traceback support for ereport() */
+	init_vacuum_error_callback(&errcallback, &errcbarg, onerel,
+			PROGRESS_VACUUM_PHASE_TRUNCATE);
 
 	/* Initialize the starttime if we check for conflicting lock requests */
 	INSTR_TIME_SET_CURRENT(starttime);
@@ -2691,6 +2710,10 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
 
 		blkno--;
 
+		/* Setup error traceback support for ereport() */
+		errcbarg.blkno = blkno;
+		error_context_stack = &errcallback;
+
 		/* If we haven't prefetched this lot yet, do so now. */
 		if (prefetchedUntil > blkno)
 		{
@@ -2709,6 +2732,9 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
 		buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
 								 RBM_NORMAL, vac_strategy);
 
+		/* Pop the error context stack */
+		error_context_stack = errcallback.previous;
+
 		/* In this phase we only need shared access to the buffer */
 		LockBuffer(buf, BUFFER_LOCK_SHARE);
 
@@ -3468,6 +3494,11 @@ vacuum_error_callback(void *arg)
 			break;
 
 		case PROGRESS_VACUUM_PHASE_TRUNCATE:
+			if (BlockNumberIsValid(cbarg->blkno))
+				errcontext(_("while truncating relation \"%s.%s\" to %u blocks"),
+						cbarg->relnamespace, cbarg->relname, cbarg->blkno);
+			break;
+
 		case PROGRESS_VACUUM_PHASE_FINAL_CLEANUP:
 		default:
 			return; /* Shouldn't happen: do nothing */
@@ -3483,6 +3514,7 @@ init_vacuum_error_callback(ErrorContextCallback *errcallback,
 	{
 		case PROGRESS_VACUUM_PHASE_SCAN_HEAP:
 		case PROGRESS_VACUUM_PHASE_VACUUM_HEAP:
+		case PROGRESS_VACUUM_PHASE_TRUNCATE:
 			errcbarg->relname = RelationGetRelationName(rel);
 			errcbarg->indname = NULL; /* Not used for heap */
 			break;
-- 
2.7.4

Reply via email to