On Tue, Dec 24, 2019 at 01:19:09PM +0900, Michael Paquier wrote: > On Mon, Dec 23, 2019 at 07:24:28PM -0600, Justin Pryzby wrote: > > I renamed. > > Hmm. I have found what was partially itching me for patch 0002, and > that's actually the fact that we don't do the error reporting for heap > within lazy_vacuum_heap() because the code relies too much on updating > two progress parameters at the same time, on top of the fact that you > are mixing multiple concepts with this refactoring. One problem is > that if this code is refactored in the future, future callers of > lazy_vacuum_heap() would miss the update of the progress reporting. > Splitting things improves also the readability of the code, so > attached is the refactoring I would do for this portion of the set. > It is also more natural to increment num_index_scans when the
I agree that's better. I don't see any reason why the progress params need to be updated atomically. So rebasified against your patch.
>From 5317d9f3cee163762563f2255f1dd26800ea858b Mon Sep 17 00:00:00 2001 From: Justin Pryzby <pryz...@telsasoft.com> Date: Wed, 27 Nov 2019 20:07:10 -0600 Subject: [PATCH v7 1/6] Rename buf to avoid shadowing buf of another type --- src/backend/access/heap/vacuumlazy.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index ab09d84..3f52278 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -517,7 +517,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, BlockNumber next_unskippable_block; bool skipping_blocks; xl_heap_freeze_tuple *frozen; - StringInfoData buf; + StringInfoData sbuf; const int initprog_index[] = { PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_TOTAL_HEAP_BLKS, @@ -1479,33 +1479,33 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, * This is pretty messy, but we split it up so that we can skip emitting * individual parts of the message when not applicable. */ - initStringInfo(&buf); - appendStringInfo(&buf, + initStringInfo(&sbuf); + appendStringInfo(&sbuf, _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"), nkeep, OldestXmin); - appendStringInfo(&buf, _("There were %.0f unused item identifiers.\n"), + appendStringInfo(&sbuf, _("There were %.0f unused item identifiers.\n"), nunused); - appendStringInfo(&buf, ngettext("Skipped %u page due to buffer pins, ", + appendStringInfo(&sbuf, ngettext("Skipped %u page due to buffer pins, ", "Skipped %u pages due to buffer pins, ", vacrelstats->pinskipped_pages), vacrelstats->pinskipped_pages); - appendStringInfo(&buf, ngettext("%u frozen page.\n", + appendStringInfo(&sbuf, ngettext("%u frozen page.\n", "%u frozen pages.\n", vacrelstats->frozenskipped_pages), vacrelstats->frozenskipped_pages); - appendStringInfo(&buf, ngettext("%u page is entirely empty.\n", + appendStringInfo(&sbuf, ngettext("%u page is entirely empty.\n", "%u pages are entirely empty.\n", empty_pages), empty_pages); - appendStringInfo(&buf, _("%s."), pg_rusage_show(&ru0)); + appendStringInfo(&sbuf, _("%s."), pg_rusage_show(&ru0)); ereport(elevel, (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages", RelationGetRelationName(onerel), tups_vacuumed, num_tuples, vacrelstats->scanned_pages, nblocks), - errdetail_internal("%s", buf.data))); - pfree(buf.data); + errdetail_internal("%s", sbuf.data))); + pfree(sbuf.data); } -- 2.7.4
>From c04d311358fe483bee4ccdfabf4a83f7f1d978b4 Mon Sep 17 00:00:00 2001 From: Justin Pryzby <pryz...@telsasoft.com> Date: Mon, 23 Dec 2019 22:42:28 -0600 Subject: [PATCH v7 2/6] michael dedup --- src/backend/access/heap/vacuumlazy.c | 96 ++++++++++++++++-------------------- 1 file changed, 43 insertions(+), 53 deletions(-) diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 3f52278..36c92f8 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -158,6 +158,9 @@ static bool lazy_check_needs_freeze(Buffer buf, bool *hastup); static void lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats, LVRelStats *vacrelstats); +static void lazy_vacuum_all_indexes(Relation onerel, LVRelStats *vacrelstats, + Relation *Irel, int nindexes, + IndexBulkDeleteResult **indstats); static void lazy_cleanup_index(Relation indrel, IndexBulkDeleteResult *stats, LVRelStats *vacrelstats); @@ -740,12 +743,6 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, if ((vacrelstats->max_dead_tuples - vacrelstats->num_dead_tuples) < MaxHeapTuplesPerPage && vacrelstats->num_dead_tuples > 0) { - const int hvp_index[] = { - PROGRESS_VACUUM_PHASE, - PROGRESS_VACUUM_NUM_INDEX_VACUUMS - }; - int64 hvp_val[2]; - /* * Before beginning index vacuuming, we release any pin we may * hold on the visibility map page. This isn't necessary for @@ -758,28 +755,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, vmbuffer = InvalidBuffer; } - /* Log cleanup info before we touch indexes */ - vacuum_log_cleanup_info(onerel, vacrelstats); - - /* Report that we are now vacuuming indexes */ - pgstat_progress_update_param(PROGRESS_VACUUM_PHASE, - PROGRESS_VACUUM_PHASE_VACUUM_INDEX); - - /* Remove index entries */ - for (i = 0; i < nindexes; i++) - lazy_vacuum_index(Irel[i], - &indstats[i], - vacrelstats); - - /* - * Report that we are now vacuuming the heap. We also increase - * the number of index scans here; note that by using - * pgstat_progress_update_multi_param we can update both - * parameters atomically. - */ - hvp_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_HEAP; - hvp_val[1] = vacrelstats->num_index_scans + 1; - pgstat_progress_update_multi_param(2, hvp_index, hvp_val); + /* Work on all the indexes, then the heap */ + lazy_vacuum_all_indexes(onerel, vacrelstats, Irel, + nindexes, indstats); /* Remove tuples from heap */ lazy_vacuum_heap(onerel, vacrelstats); @@ -790,7 +768,6 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, * valid. */ vacrelstats->num_dead_tuples = 0; - vacrelstats->num_index_scans++; /* * Vacuum the Free Space Map to make newly-freed space visible on @@ -1420,33 +1397,12 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, /* XXX put a threshold on min number of tuples here? */ if (vacrelstats->num_dead_tuples > 0) { - const int hvp_index[] = { - PROGRESS_VACUUM_PHASE, - PROGRESS_VACUUM_NUM_INDEX_VACUUMS - }; - int64 hvp_val[2]; - - /* Log cleanup info before we touch indexes */ - vacuum_log_cleanup_info(onerel, vacrelstats); - - /* Report that we are now vacuuming indexes */ - pgstat_progress_update_param(PROGRESS_VACUUM_PHASE, - PROGRESS_VACUUM_PHASE_VACUUM_INDEX); - - /* Remove index entries */ - for (i = 0; i < nindexes; i++) - lazy_vacuum_index(Irel[i], - &indstats[i], - vacrelstats); - - /* Report that we are now vacuuming the heap */ - hvp_val[0] = PROGRESS_VACUUM_PHASE_VACUUM_HEAP; - hvp_val[1] = vacrelstats->num_index_scans + 1; - pgstat_progress_update_multi_param(2, hvp_index, hvp_val); + /* Work on all the indexes, and then the heap */ + lazy_vacuum_all_indexes(onerel, vacrelstats, Irel, nindexes, + indstats); /* Remove tuples from heap */ lazy_vacuum_heap(onerel, vacrelstats); - vacrelstats->num_index_scans++; } /* @@ -1508,6 +1464,36 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, pfree(sbuf.data); } +/* + * lazy_vacuum_all_indexes() -- vacuum all indexes of relation. + * + * This is a utility wrapper for lazy_vacuum_index(), able to do + * progress reporting. + */ +static void +lazy_vacuum_all_indexes(Relation onerel, LVRelStats *vacrelstats, + Relation *Irel, int nindexes, + IndexBulkDeleteResult **indstats) +{ + int i; + + /* Log cleanup info before we touch indexes */ + vacuum_log_cleanup_info(onerel, vacrelstats); + + /* Report that we are now vacuuming indexes */ + pgstat_progress_update_param(PROGRESS_VACUUM_PHASE, + PROGRESS_VACUUM_PHASE_VACUUM_INDEX); + + /* Remove index entries */ + for (i = 0; i < nindexes; i++) + lazy_vacuum_index(Irel[i], &indstats[i], vacrelstats); + + /* Increase and report the number of index scans */ + vacrelstats->num_index_scans++; + pgstat_progress_update_param(PROGRESS_VACUUM_NUM_INDEX_VACUUMS, + vacrelstats->num_index_scans); +} + /* * lazy_vacuum_heap() -- second pass over the heap @@ -1528,6 +1514,10 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats) PGRUsage ru0; Buffer vmbuffer = InvalidBuffer; + /* Report that we are now vacuuming the heap */ + pgstat_progress_update_param(PROGRESS_VACUUM_PHASE, + PROGRESS_VACUUM_PHASE_VACUUM_HEAP); + pg_rusage_init(&ru0); npages = 0; -- 2.7.4
>From 33dff2cb39a9d0f2f7e1327ba9f3abfb0ea382f0 Mon Sep 17 00:00:00 2001 From: Justin Pryzby <pryz...@telsasoft.com> Date: Mon, 23 Dec 2019 14:38:01 -0600 Subject: [PATCH v7 3/6] dedup2: skip_blocks --- src/backend/access/heap/vacuumlazy.c | 187 ++++++++++++++++------------------- 1 file changed, 84 insertions(+), 103 deletions(-) diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 36c92f8..4c28876 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -480,6 +480,88 @@ vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats) } /* + * Return whether skipping blocks or not. + * Except when aggressive is set, we want to skip pages that are + * all-visible according to the visibility map, but only when we can skip + * at least SKIP_PAGES_THRESHOLD consecutive pages. Since we're reading + * sequentially, the OS should be doing readahead for us, so there's no + * gain in skipping a page now and then; that's likely to disable + * readahead and so be counterproductive. Also, skipping even a single + * page means that we can't update relfrozenxid, so we only want to do it + * if we can skip a goodly number of pages. + * + * When aggressive is set, we can't skip pages just because they are + * all-visible, but we can still skip pages that are all-frozen, since + * such pages do not need freezing and do not affect the value that we can + * safely set for relfrozenxid or relminmxid. + * + * Before entering the main loop, establish the invariant that + * next_unskippable_block is the next block number >= blkno that we can't + * skip based on the visibility map, either all-visible for a regular scan + * or all-frozen for an aggressive scan. We set it to nblocks if there's + * no such block. We also set up the skipping_blocks flag correctly at + * this stage. + * + * Note: The value returned by visibilitymap_get_status could be slightly + * out-of-date, since we make this test before reading the corresponding + * heap page or locking the buffer. This is OK. If we mistakenly think + * that the page is all-visible or all-frozen when in fact the flag's just + * been cleared, we might fail to vacuum the page. It's easy to see that + * skipping a page when aggressive is not set is not a very big deal; we + * might leave some dead tuples lying around, but the next vacuum will + * find them. But even when aggressive *is* set, it's still OK if we miss + * a page whose all-frozen marking has just been cleared. Any new XIDs + * just added to that page are necessarily newer than the GlobalXmin we + * computed, so they'll have no effect on the value to which we can safely + * set relfrozenxid. A similar argument applies for MXIDs and relminmxid. + * + * We will scan the table's last page, at least to the extent of + * determining whether it has tuples or not, even if it should be skipped + * according to the above rules; except when we've already determined that + * it's not worth trying to truncate the table. This avoids having + * lazy_truncate_heap() take access-exclusive lock on the table to attempt + * a truncation that just fails immediately because there are tuples in + * the last page. This is worth avoiding mainly because such a lock must + * be replayed on any hot standby, where it can be disruptive. + */ +static int +skip_blocks(Relation onerel, VacuumParams *params, BlockNumber *next_unskippable_block, BlockNumber nblocks, Buffer *vmbuffer, bool aggressive) +{ + if ((params->options & VACOPT_DISABLE_PAGE_SKIPPING) == 0) + { + while (*next_unskippable_block < nblocks) + { + uint8 vmstatus; + + vmstatus = visibilitymap_get_status(onerel, *next_unskippable_block, + vmbuffer); + if (aggressive) + { + if ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0) + break; + } + else + { + if ((vmstatus & VISIBILITYMAP_ALL_VISIBLE) == 0) + break; + } + vacuum_delay_point(); + ++*next_unskippable_block; + } + } + + + /* + * We know we can't skip the current block. But set up + * skipping_blocks to do the right thing at the following blocks. + */ + if (*next_unskippable_block >= SKIP_PAGES_THRESHOLD) + return true; + else + return false; +} + +/* * lazy_scan_heap() -- scan an open heap relation * * This routine prunes each page in the heap, which will among other @@ -565,78 +647,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, initprog_val[2] = vacrelstats->max_dead_tuples; pgstat_progress_update_multi_param(3, initprog_index, initprog_val); - /* - * Except when aggressive is set, we want to skip pages that are - * all-visible according to the visibility map, but only when we can skip - * at least SKIP_PAGES_THRESHOLD consecutive pages. Since we're reading - * sequentially, the OS should be doing readahead for us, so there's no - * gain in skipping a page now and then; that's likely to disable - * readahead and so be counterproductive. Also, skipping even a single - * page means that we can't update relfrozenxid, so we only want to do it - * if we can skip a goodly number of pages. - * - * When aggressive is set, we can't skip pages just because they are - * all-visible, but we can still skip pages that are all-frozen, since - * such pages do not need freezing and do not affect the value that we can - * safely set for relfrozenxid or relminmxid. - * - * Before entering the main loop, establish the invariant that - * next_unskippable_block is the next block number >= blkno that we can't - * skip based on the visibility map, either all-visible for a regular scan - * or all-frozen for an aggressive scan. We set it to nblocks if there's - * no such block. We also set up the skipping_blocks flag correctly at - * this stage. - * - * Note: The value returned by visibilitymap_get_status could be slightly - * out-of-date, since we make this test before reading the corresponding - * heap page or locking the buffer. This is OK. If we mistakenly think - * that the page is all-visible or all-frozen when in fact the flag's just - * been cleared, we might fail to vacuum the page. It's easy to see that - * skipping a page when aggressive is not set is not a very big deal; we - * might leave some dead tuples lying around, but the next vacuum will - * find them. But even when aggressive *is* set, it's still OK if we miss - * a page whose all-frozen marking has just been cleared. Any new XIDs - * just added to that page are necessarily newer than the GlobalXmin we - * computed, so they'll have no effect on the value to which we can safely - * set relfrozenxid. A similar argument applies for MXIDs and relminmxid. - * - * We will scan the table's last page, at least to the extent of - * determining whether it has tuples or not, even if it should be skipped - * according to the above rules; except when we've already determined that - * it's not worth trying to truncate the table. This avoids having - * lazy_truncate_heap() take access-exclusive lock on the table to attempt - * a truncation that just fails immediately because there are tuples in - * the last page. This is worth avoiding mainly because such a lock must - * be replayed on any hot standby, where it can be disruptive. - */ next_unskippable_block = 0; - if ((params->options & VACOPT_DISABLE_PAGE_SKIPPING) == 0) - { - while (next_unskippable_block < nblocks) - { - uint8 vmstatus; - - vmstatus = visibilitymap_get_status(onerel, next_unskippable_block, - &vmbuffer); - if (aggressive) - { - if ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0) - break; - } - else - { - if ((vmstatus & VISIBILITYMAP_ALL_VISIBLE) == 0) - break; - } - vacuum_delay_point(); - next_unskippable_block++; - } - } - - if (next_unskippable_block >= SKIP_PAGES_THRESHOLD) - skipping_blocks = true; - else - skipping_blocks = false; + skipping_blocks = skip_blocks(onerel, params, &next_unskippable_block, nblocks, &vmbuffer, aggressive); for (blkno = 0; blkno < nblocks; blkno++) { @@ -665,38 +677,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, { /* Time to advance next_unskippable_block */ next_unskippable_block++; - if ((params->options & VACOPT_DISABLE_PAGE_SKIPPING) == 0) - { - while (next_unskippable_block < nblocks) - { - uint8 vmskipflags; - - vmskipflags = visibilitymap_get_status(onerel, - next_unskippable_block, - &vmbuffer); - if (aggressive) - { - if ((vmskipflags & VISIBILITYMAP_ALL_FROZEN) == 0) - break; - } - else - { - if ((vmskipflags & VISIBILITYMAP_ALL_VISIBLE) == 0) - break; - } - vacuum_delay_point(); - next_unskippable_block++; - } - } - - /* - * We know we can't skip the current block. But set up - * skipping_blocks to do the right thing at the following blocks. - */ - if (next_unskippable_block - blkno > SKIP_PAGES_THRESHOLD) - skipping_blocks = true; - else - skipping_blocks = false; + skipping_blocks = skip_blocks(onerel, params, &next_unskippable_block, nblocks, &vmbuffer, aggressive); /* * Normally, the fact that we can't skip this block must mean that -- 2.7.4
>From ab3c74a8d5a7bf92de761f29db7456f05b2dddee Mon Sep 17 00:00:00 2001 From: Justin Pryzby <pryz...@telsasoft.com> Date: Thu, 12 Dec 2019 20:54:37 -0600 Subject: [PATCH v7 4/6] vacuum errcontext to show block being processed As requested here. https://www.postgresql.org/message-id/20190807235154.erbmr4o4bo6vgnjv%40alap3.anarazel.de --- src/backend/access/heap/vacuumlazy.c | 37 ++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 4c28876..fb19af4 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -138,6 +138,12 @@ typedef struct LVRelStats bool lock_waiter_detected; } LVRelStats; +typedef struct +{ + char *relname; + char *relnamespace; + BlockNumber blkno; +} vacuum_error_callback_arg; /* A few variables that don't seem worth passing around as parameters */ static int elevel = -1; @@ -178,6 +184,7 @@ static bool lazy_tid_reaped(ItemPointer itemptr, void *state); static int vac_cmp_itemptr(const void *left, const void *right); static bool heap_page_is_all_visible(Relation rel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen); +static void vacuum_error_callback(void *arg); /* @@ -609,6 +616,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, PROGRESS_VACUUM_MAX_DEAD_TUPLES }; int64 initprog_val[3]; + ErrorContextCallback errcallback; + vacuum_error_callback_arg errcbarg; pg_rusage_init(&ru0); @@ -650,6 +659,15 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, next_unskippable_block = 0; skipping_blocks = skip_blocks(onerel, params, &next_unskippable_block, nblocks, &vmbuffer, aggressive); + /* Setup error traceback support for ereport() */ + errcbarg.relnamespace = get_namespace_name(RelationGetNamespace(onerel)); + errcbarg.relname = relname; + errcbarg.blkno = InvalidBlockNumber; /* Not known yet */ + errcallback.callback = vacuum_error_callback; + errcallback.arg = (void *) &errcbarg; + errcallback.previous = error_context_stack; + error_context_stack = &errcallback; + for (blkno = 0; blkno < nblocks; blkno++) { Buffer buf; @@ -671,6 +689,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, #define FORCE_CHECK_PAGE() \ (blkno == nblocks - 1 && should_attempt_truncation(params, vacrelstats)) + errcbarg.blkno = blkno; + pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno); if (blkno == next_unskippable_block) @@ -737,8 +757,11 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, } /* Work on all the indexes, then the heap */ + /* Don't use the errcontext handler outside this function */ + error_context_stack = errcallback.previous; lazy_vacuum_all_indexes(onerel, vacrelstats, Irel, nindexes, indstats); + error_context_stack = &errcallback; /* Remove tuples from heap */ lazy_vacuum_heap(onerel, vacrelstats); @@ -1346,6 +1369,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, RecordPageWithFreeSpace(onerel, blkno, freespace); } + /* Pop the error context stack */ + error_context_stack = errcallback.previous; + /* report that everything is scanned and vacuumed */ pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno); @@ -2323,3 +2349,14 @@ heap_page_is_all_visible(Relation rel, Buffer buf, return all_visible; } + +/* + * Error context callback for errors occurring during vacuum. + */ +static void +vacuum_error_callback(void *arg) +{ + vacuum_error_callback_arg *cbarg = arg; + errcontext("while scanning block %u of relation \"%s.%s\"", + cbarg->blkno, cbarg->relnamespace, cbarg->relname); +} -- 2.7.4
>From 2b8f8c585da708d46b2be6cf795aef15793380f7 Mon Sep 17 00:00:00 2001 From: Justin Pryzby <pryz...@telsasoft.com> Date: Thu, 12 Dec 2019 20:34:03 -0600 Subject: [PATCH v7 5/6] add errcontext callback in lazy_vacuum_heap, too --- src/backend/access/heap/vacuumlazy.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index fb19af4..b03c8a8 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -1410,6 +1410,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, /* Remove tuples from heap */ lazy_vacuum_heap(onerel, vacrelstats); + error_context_stack = errcallback.previous; } /* @@ -1521,10 +1522,22 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats) PGRUsage ru0; Buffer vmbuffer = InvalidBuffer; + ErrorContextCallback errcallback; + vacuum_error_callback_arg errcbarg; + /* Report that we are now vacuuming the heap */ pgstat_progress_update_param(PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_HEAP); + /* Setup error traceback support for ereport() */ + errcbarg.relnamespace = get_namespace_name(RelationGetNamespace(onerel)); + errcbarg.relname = RelationGetRelationName(onerel); + errcbarg.blkno = InvalidBlockNumber; /* Not known yet */ + errcallback.callback = vacuum_error_callback; + errcallback.arg = (void *) &errcbarg; + errcallback.previous = error_context_stack; + error_context_stack = &errcallback; + pg_rusage_init(&ru0); npages = 0; @@ -1539,6 +1552,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats) vacuum_delay_point(); tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]); + errcbarg.blkno = tblk; buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL, vac_strategy); if (!ConditionalLockBufferForCleanup(buf)) -- 2.7.4
>From 0d9dfb06614aba2eb721bdc03960d04a06645ef1 Mon Sep 17 00:00:00 2001 From: Justin Pryzby <pryz...@telsasoft.com> Date: Sun, 15 Dec 2019 17:00:29 -0600 Subject: [PATCH v7 6/6] Print debug line before starting each vacuum step --- src/backend/access/heap/vacuumlazy.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index b03c8a8..08cb2aa 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -1539,6 +1539,10 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats) error_context_stack = &errcallback; pg_rusage_init(&ru0); + + ereport(elevel, (errmsg("\"%s\": vacuuming heap", + RelationGetRelationName(onerel)))); + npages = 0; tupindex = 0; @@ -1757,6 +1761,9 @@ lazy_vacuum_index(Relation indrel, pg_rusage_init(&ru0); + ereport(elevel, (errmsg("\"%s\": vacuuming index", + RelationGetRelationName(indrel)))); + ivinfo.index = indrel; ivinfo.analyze_only = false; ivinfo.report_progress = false; @@ -1804,6 +1811,9 @@ lazy_cleanup_index(Relation indrel, ivinfo.num_heap_tuples = vacrelstats->new_rel_tuples; ivinfo.strategy = vac_strategy; + ereport(elevel, (errmsg("cleaning up index \"%s\"", + RelationGetRelationName(indrel)))); + stats = index_vacuum_cleanup(&ivinfo, stats); if (!stats) -- 2.7.4