On Tue, Mar 24, 2020 at 09:47:30PM +0900, Masahiko Sawada wrote: > We need to set the error context after setting new_rel_pages.
Done > The type name ErrCbPhase seems to be very generic name, how about > VacErrCbPhase or VacuumErrCbPhase? Done. Thanks for your review comments. -- Justin
>From 26c57039135896ebf29b96c172d35d869ed1ce69 Mon Sep 17 00:00:00 2001 From: Justin Pryzby <pryz...@telsasoft.com> Date: Thu, 12 Dec 2019 20:54:37 -0600 Subject: [PATCH v32 1/3] Introduce vacuum errcontext to display additional information. The additional information displayed will be block number for error occurring while processing heap and index name for error occurring while processing the index. This will help us in diagnosing the problems that occur during a vacuum. For ex. due to corruption (either caused by bad hardware or by some bug) if we get some error while vacuuming, it can help us identify the block in heap and or additional index information. It sets up an error context callback to display additional information with the error. During different phases of vacuum (heap scan, heap vacuum, index vacuum, index clean up, heap truncate), we update the error context callback to display appropriate information. We can extend it to a bit more granular level like adding the phases for FSM operations or for prefetching the blocks while truncating. However, I felt that it requires adding many more error callback function calls and can make the code a bit complex, so left those for now. Author: Justin Pryzby, with few changes by Amit Kapila Reviewed-by: Alvaro Herrera, Amit Kapila, Andres Freund, Michael Paquier and Sawada Masahiko Discussion: https://www.postgresql.org/message-id/20191120210600.gc30...@telsasoft.com --- src/backend/access/heap/vacuumlazy.c | 260 ++++++++++++++++++++++++--- src/tools/pgindent/typedefs.list | 1 + 2 files changed, 236 insertions(+), 25 deletions(-) diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 03c43efc32..cbea791968 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -144,6 +144,17 @@ */ #define ParallelVacuumIsActive(lps) PointerIsValid(lps) +/* Phases of vacuum during which we report error context. */ +typedef enum +{ + VACUUM_ERRCB_PHASE_UNKNOWN, + VACUUM_ERRCB_PHASE_SCAN_HEAP, + VACUUM_ERRCB_PHASE_VACUUM_INDEX, + VACUUM_ERRCB_PHASE_VACUUM_HEAP, + VACUUM_ERRCB_PHASE_INDEX_CLEANUP, + VACUUM_ERRCB_PHASE_TRUNCATE +} VacErrCbPhase; + /* * LVDeadTuples stores the dead tuple TIDs collected during the heap scan. * This is allocated in the DSM segment in parallel mode and in local memory @@ -270,6 +281,8 @@ typedef struct LVParallelState typedef struct LVRelStats { + char *relnamespace; + char *relname; /* useindex = true means two-pass strategy; false means one-pass */ bool useindex; /* Overall statistics about rel */ @@ -290,8 +303,12 @@ typedef struct LVRelStats int num_index_scans; TransactionId latestRemovedXid; bool lock_waiter_detected; -} LVRelStats; + /* Used for error callback */ + char *indname; + BlockNumber blkno; /* used only for heap operations */ + VacErrCbPhase phase; +} LVRelStats; /* A few variables that don't seem worth passing around as parameters */ static int elevel = -1; @@ -314,10 +331,10 @@ static void lazy_vacuum_all_indexes(Relation onerel, Relation *Irel, LVRelStats *vacrelstats, LVParallelState *lps, int nindexes); static void lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats, - LVDeadTuples *dead_tuples, double reltuples); + LVDeadTuples *dead_tuples, double reltuples, LVRelStats *vacrelstats); static void lazy_cleanup_index(Relation indrel, IndexBulkDeleteResult **stats, - double reltuples, bool estimated_count); + double reltuples, bool estimated_count, LVRelStats *vacrelstats); static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer); static bool should_attempt_truncation(VacuumParams *params, @@ -337,13 +354,13 @@ static void lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult * int nindexes); static void parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats, LVShared *lvshared, LVDeadTuples *dead_tuples, - int nindexes); + int nindexes, LVRelStats *vacrelstats); static void vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats, LVRelStats *vacrelstats, LVParallelState *lps, int nindexes); static void vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats, LVShared *lvshared, LVSharedIndStats *shared_indstats, - LVDeadTuples *dead_tuples); + LVDeadTuples *dead_tuples, LVRelStats *vacrelstats); static void lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats, LVRelStats *vacrelstats, LVParallelState *lps, int nindexes); @@ -361,6 +378,10 @@ static void end_parallel_vacuum(Relation *Irel, IndexBulkDeleteResult **stats, LVParallelState *lps, int nindexes); static LVSharedIndStats *get_indstats(LVShared *lvshared, int n); static bool skip_parallel_vacuum_index(Relation indrel, LVShared *lvshared); +static void vacuum_error_callback(void *arg); +static void update_vacuum_error_cbarg(LVRelStats *errcbarg, int phase, + BlockNumber blkno, char *indname, + bool free_oldindname); /* @@ -394,6 +415,7 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params, double new_live_tuples; TransactionId new_frozen_xid; MultiXactId new_min_multi; + ErrorContextCallback errcallback; Assert(params != NULL); Assert(params->index_cleanup != VACOPT_TERNARY_DEFAULT); @@ -460,6 +482,10 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params, vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats)); + vacrelstats->relnamespace = get_namespace_name(RelationGetNamespace(onerel)); + vacrelstats->relname = pstrdup(RelationGetRelationName(onerel)); + vacrelstats->indname = NULL; + vacrelstats->phase = VACUUM_ERRCB_PHASE_UNKNOWN; vacrelstats->old_rel_pages = onerel->rd_rel->relpages; vacrelstats->old_live_tuples = onerel->rd_rel->reltuples; vacrelstats->num_index_scans = 0; @@ -471,6 +497,12 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params, vacrelstats->useindex = (nindexes > 0 && params->index_cleanup == VACOPT_TERNARY_ENABLED); + /* Setup error traceback support for ereport() */ + errcallback.callback = vacuum_error_callback; + errcallback.arg = vacrelstats; + errcallback.previous = error_context_stack; + error_context_stack = &errcallback; + /* Do the vacuuming */ lazy_scan_heap(onerel, params, vacrelstats, Irel, nindexes, aggressive); @@ -499,6 +531,9 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params, if (should_attempt_truncation(params, vacrelstats)) lazy_truncate_heap(onerel, vacrelstats); + /* Pop the error context stack */ + error_context_stack = errcallback.previous; + /* Report that we are now doing final cleanup */ pgstat_progress_update_param(PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_FINAL_CLEANUP); @@ -699,7 +734,6 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, BlockNumber nblocks, blkno; HeapTupleData tuple; - char *relname; TransactionId relfrozenxid = onerel->rd_rel->relfrozenxid; TransactionId relminmxid = onerel->rd_rel->relminmxid; BlockNumber empty_pages, @@ -724,20 +758,20 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, PROGRESS_VACUUM_MAX_DEAD_TUPLES }; int64 initprog_val[3]; + ErrorContextCallback errcallback; pg_rusage_init(&ru0); - relname = RelationGetRelationName(onerel); if (aggressive) ereport(elevel, (errmsg("aggressively vacuuming \"%s.%s\"", - get_namespace_name(RelationGetNamespace(onerel)), - relname))); + vacrelstats->relnamespace, + vacrelstats->relname))); else ereport(elevel, (errmsg("vacuuming \"%s.%s\"", - get_namespace_name(RelationGetNamespace(onerel)), - relname))); + vacrelstats->relnamespace, + vacrelstats->relname))); empty_pages = vacuumed_pages = 0; next_fsm_block_to_vacuum = (BlockNumber) 0; @@ -870,6 +904,12 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, else skipping_blocks = false; + /* Setup error traceback support for ereport() */ + errcallback.callback = vacuum_error_callback; + errcallback.arg = vacrelstats; + errcallback.previous = error_context_stack; + error_context_stack = &errcallback; + for (blkno = 0; blkno < nblocks; blkno++) { Buffer buf; @@ -893,6 +933,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno); + update_vacuum_error_cbarg(vacrelstats, VACUUM_ERRCB_PHASE_SCAN_HEAP, + blkno, NULL, false); + if (blkno == next_unskippable_block) { /* Time to advance next_unskippable_block */ @@ -1534,7 +1577,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer)) { elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u", - relname, blkno); + vacrelstats->relname, blkno); visibilitymap_clear(onerel, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS); } @@ -1555,7 +1598,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, else if (PageIsAllVisible(page) && has_dead_tuples) { elog(WARNING, "page containing dead tuples is marked as all-visible in relation \"%s\" page %u", - relname, blkno); + vacrelstats->relname, blkno); PageClearAllVisible(page); MarkBufferDirty(buf); visibilitymap_clear(onerel, blkno, vmbuffer, @@ -1651,6 +1694,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, if (vacrelstats->useindex) lazy_cleanup_all_indexes(Irel, indstats, vacrelstats, lps, nindexes); + /* Pop the error context stack */ + error_context_stack = errcallback.previous; + /* * End parallel mode before updating index statistics as we cannot write * during parallel mode. @@ -1744,7 +1790,7 @@ lazy_vacuum_all_indexes(Relation onerel, Relation *Irel, for (idx = 0; idx < nindexes; idx++) lazy_vacuum_index(Irel[idx], &stats[idx], vacrelstats->dead_tuples, - vacrelstats->old_live_tuples); + vacrelstats->old_live_tuples, vacrelstats); } /* Increase and report the number of index scans */ @@ -1772,11 +1818,17 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats) int npages; PGRUsage ru0; Buffer vmbuffer = InvalidBuffer; + LVRelStats olderrcbarg; /* Report that we are now vacuuming the heap */ pgstat_progress_update_param(PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_HEAP); + /* Update error traceback information */ + olderrcbarg = *vacrelstats; + update_vacuum_error_cbarg(vacrelstats, VACUUM_ERRCB_PHASE_VACUUM_HEAP, + InvalidBlockNumber, NULL, false); + pg_rusage_init(&ru0); npages = 0; @@ -1791,6 +1843,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats) vacuum_delay_point(); tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples->itemptrs[tupindex]); + vacrelstats->blkno = tblk; buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL, vac_strategy); if (!ConditionalLockBufferForCleanup(buf)) @@ -1822,6 +1875,13 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats) RelationGetRelationName(onerel), tupindex, npages), errdetail_internal("%s", pg_rusage_show(&ru0)))); + + /* Revert back to the old phase information for error traceback */ + update_vacuum_error_cbarg(vacrelstats, + olderrcbarg.phase, + olderrcbarg.blkno, + olderrcbarg.indname, + true); } /* @@ -1844,9 +1904,15 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, int uncnt = 0; TransactionId visibility_cutoff_xid; bool all_frozen; + LVRelStats olderrcbarg; pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno); + /* Update error traceback information */ + olderrcbarg = *vacrelstats; + update_vacuum_error_cbarg(vacrelstats, VACUUM_ERRCB_PHASE_VACUUM_HEAP, + blkno, NULL, false); + START_CRIT_SECTION(); for (; tupindex < dead_tuples->num_tuples; tupindex++) @@ -1923,6 +1989,12 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, *vmbuffer, visibility_cutoff_xid, flags); } + /* Revert back to the old phase information for error traceback */ + update_vacuum_error_cbarg(vacrelstats, + olderrcbarg.phase, + olderrcbarg.blkno, + olderrcbarg.indname, + true); return tupindex; } @@ -2083,7 +2155,7 @@ lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats, * indexes in the case where no workers are launched. */ parallel_vacuum_index(Irel, stats, lps->lvshared, - vacrelstats->dead_tuples, nindexes); + vacrelstats->dead_tuples, nindexes, vacrelstats); /* Wait for all vacuum workers to finish */ WaitForParallelWorkersToFinish(lps->pcxt); @@ -2106,7 +2178,7 @@ lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats, static void parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats, LVShared *lvshared, LVDeadTuples *dead_tuples, - int nindexes) + int nindexes, LVRelStats *vacrelstats) { /* * Increment the active worker count if we are able to launch any worker. @@ -2140,7 +2212,7 @@ parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats, /* Do vacuum or cleanup of the index */ vacuum_one_index(Irel[idx], &(stats[idx]), lvshared, shared_indstats, - dead_tuples); + dead_tuples, vacrelstats); } /* @@ -2180,7 +2252,8 @@ vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats, if (shared_indstats == NULL || skip_parallel_vacuum_index(Irel[i], lps->lvshared)) vacuum_one_index(Irel[i], &(stats[i]), lps->lvshared, - shared_indstats, vacrelstats->dead_tuples); + shared_indstats, vacrelstats->dead_tuples, + vacrelstats); } /* @@ -2200,7 +2273,7 @@ vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats, static void vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats, LVShared *lvshared, LVSharedIndStats *shared_indstats, - LVDeadTuples *dead_tuples) + LVDeadTuples *dead_tuples, LVRelStats *vacrelstats) { IndexBulkDeleteResult *bulkdelete_res = NULL; @@ -2220,10 +2293,10 @@ vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats, /* Do vacuum or cleanup of the index */ if (lvshared->for_cleanup) lazy_cleanup_index(indrel, stats, lvshared->reltuples, - lvshared->estimated_count); + lvshared->estimated_count, vacrelstats); else lazy_vacuum_index(indrel, stats, dead_tuples, - lvshared->reltuples); + lvshared->reltuples, vacrelstats); /* * Copy the index bulk-deletion result returned from ambulkdelete and @@ -2298,7 +2371,8 @@ lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats, for (idx = 0; idx < nindexes; idx++) lazy_cleanup_index(Irel[idx], &stats[idx], vacrelstats->new_rel_tuples, - vacrelstats->tupcount_pages < vacrelstats->rel_pages); + vacrelstats->tupcount_pages < vacrelstats->rel_pages, + vacrelstats); } } @@ -2313,11 +2387,12 @@ lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats, */ static void lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats, - LVDeadTuples *dead_tuples, double reltuples) + LVDeadTuples *dead_tuples, double reltuples, LVRelStats *vacrelstats) { IndexVacuumInfo ivinfo; const char *msg; PGRUsage ru0; + LVRelStats olderrcbarg; pg_rusage_init(&ru0); @@ -2329,6 +2404,14 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats, ivinfo.num_heap_tuples = reltuples; ivinfo.strategy = vac_strategy; + /* Update error traceback information */ + olderrcbarg = *vacrelstats; + update_vacuum_error_cbarg(vacrelstats, + VACUUM_ERRCB_PHASE_VACUUM_INDEX, + InvalidBlockNumber, + RelationGetRelationName(indrel), + false); + /* Do bulk deletion */ *stats = index_bulk_delete(&ivinfo, *stats, lazy_tid_reaped, (void *) dead_tuples); @@ -2343,6 +2426,13 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats, RelationGetRelationName(indrel), dead_tuples->num_tuples), errdetail_internal("%s", pg_rusage_show(&ru0)))); + + /* Revert back to the old phase information for error traceback */ + update_vacuum_error_cbarg(vacrelstats, + olderrcbarg.phase, + olderrcbarg.blkno, + olderrcbarg.indname, + true); } /* @@ -2354,11 +2444,12 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats, static void lazy_cleanup_index(Relation indrel, IndexBulkDeleteResult **stats, - double reltuples, bool estimated_count) + double reltuples, bool estimated_count, LVRelStats *vacrelstats) { IndexVacuumInfo ivinfo; const char *msg; PGRUsage ru0; + LVRelStats olderrcbarg; pg_rusage_init(&ru0); @@ -2371,6 +2462,14 @@ lazy_cleanup_index(Relation indrel, ivinfo.num_heap_tuples = reltuples; ivinfo.strategy = vac_strategy; + /* Update error traceback information */ + olderrcbarg = *vacrelstats; + update_vacuum_error_cbarg(vacrelstats, + VACUUM_ERRCB_PHASE_INDEX_CLEANUP, + InvalidBlockNumber, + RelationGetRelationName(indrel), + false); + *stats = index_vacuum_cleanup(&ivinfo, *stats); if (!(*stats)) @@ -2392,6 +2491,13 @@ lazy_cleanup_index(Relation indrel, (*stats)->tuples_removed, (*stats)->pages_deleted, (*stats)->pages_free, pg_rusage_show(&ru0)))); + + /* Revert back to the old phase information for error traceback */ + update_vacuum_error_cbarg(vacrelstats, + olderrcbarg.phase, + olderrcbarg.blkno, + olderrcbarg.indname, + true); } /* @@ -2440,6 +2546,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats) BlockNumber old_rel_pages = vacrelstats->rel_pages; BlockNumber new_rel_pages; int lock_retry; + LVRelStats olderrcbarg; /* Report that we are now truncating */ pgstat_progress_update_param(PROGRESS_VACUUM_PHASE, @@ -2510,6 +2617,12 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats) return; } + /* Update error traceback information */ + olderrcbarg = *vacrelstats; + update_vacuum_error_cbarg(vacrelstats, + VACUUM_ERRCB_PHASE_TRUNCATE, new_rel_pages, NULL, + false); + /* * Scan backwards from the end to verify that the end pages actually * contain no tuples. This is *necessary*, not optional, because @@ -2517,6 +2630,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats) * were vacuuming. */ new_rel_pages = count_nondeletable_pages(onerel, vacrelstats); + vacrelstats->blkno = new_rel_pages; if (new_rel_pages >= old_rel_pages) { @@ -2530,6 +2644,13 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats) */ RelationTruncate(onerel, new_rel_pages); + /* Revert back to the old phase information for error traceback */ + update_vacuum_error_cbarg(vacrelstats, + olderrcbarg.phase, + olderrcbarg.blkno, + olderrcbarg.indname, + true); + /* * We can release the exclusive lock as soon as we have truncated. * Other backends can't safely access the relation until they have @@ -3320,6 +3441,8 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc) int nindexes; char *sharedquery; IndexBulkDeleteResult **stats; + LVRelStats vacrelstats; + ErrorContextCallback errcallback; lvshared = (LVShared *) shm_toc_lookup(toc, PARALLEL_VACUUM_KEY_SHARED, false); @@ -3369,10 +3492,97 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc) if (lvshared->maintenance_work_mem_worker > 0) maintenance_work_mem = lvshared->maintenance_work_mem_worker; + /* + * Initialize vacrelstats for use as error callback arg by parallel + * worker. + */ + vacrelstats.relnamespace = get_namespace_name(RelationGetNamespace(onerel)); + vacrelstats.relname = pstrdup(RelationGetRelationName(onerel)); + vacrelstats.indname = NULL; + vacrelstats.phase = VACUUM_ERRCB_PHASE_UNKNOWN; /* Not yet processing */ + + /* Setup error traceback support for ereport() */ + errcallback.callback = vacuum_error_callback; + errcallback.arg = &vacrelstats; + errcallback.previous = error_context_stack; + error_context_stack = &errcallback; + /* Process indexes to perform vacuum/cleanup */ - parallel_vacuum_index(indrels, stats, lvshared, dead_tuples, nindexes); + parallel_vacuum_index(indrels, stats, lvshared, dead_tuples, nindexes, + &vacrelstats); + + /* Pop the error context stack */ + error_context_stack = errcallback.previous; vac_close_indexes(nindexes, indrels, RowExclusiveLock); table_close(onerel, ShareUpdateExclusiveLock); pfree(stats); } + +/* + * Error context callback for errors occurring during vacuum. + */ +static void +vacuum_error_callback(void *arg) +{ + LVRelStats *cbarg = arg; + + switch (cbarg->phase) + { + case VACUUM_ERRCB_PHASE_SCAN_HEAP: + if (BlockNumberIsValid(cbarg->blkno)) + errcontext("while scanning block %u of relation \"%s.%s\"", + cbarg->blkno, cbarg->relnamespace, cbarg->relname); + break; + + case VACUUM_ERRCB_PHASE_VACUUM_HEAP: + if (BlockNumberIsValid(cbarg->blkno)) + errcontext("while vacuuming block %u of relation \"%s.%s\"", + cbarg->blkno, cbarg->relnamespace, cbarg->relname); + break; + + case VACUUM_ERRCB_PHASE_VACUUM_INDEX: + errcontext("while vacuuming index \"%s\" of relation \"%s.%s\"", + cbarg->indname, cbarg->relnamespace, cbarg->relname); + break; + + case VACUUM_ERRCB_PHASE_INDEX_CLEANUP: + errcontext("while cleaning up index \"%s\" of relation \"%s.%s\"", + cbarg->indname, cbarg->relnamespace, cbarg->relname); + break; + + case VACUUM_ERRCB_PHASE_TRUNCATE: + if (BlockNumberIsValid(cbarg->blkno)) + errcontext("while truncating relation \"%s.%s\" to %u blocks", + cbarg->relnamespace, cbarg->relname, cbarg->blkno); + break; + + case VACUUM_ERRCB_PHASE_UNKNOWN: + default: + return; /* do nothing; the cbarg may not be + * initialized */ + } +} + +/* + * Update vacuum error callback for the current phase, block, and index. + * + * free_oldindname is true if the previous "indname" should be freed. It must be + * false if the caller has copied the old LVRelStats, to avoid keeping a + * pointer to a freed allocation. In which case, the caller should call again + * with free_oldindname as true to avoid a leak. + */ +static void +update_vacuum_error_cbarg(LVRelStats *errcbarg, int phase, BlockNumber blkno, + char *indname, bool free_oldindname) +{ + errcbarg->blkno = blkno; + errcbarg->phase = phase; + + /* Free index name from any previous phase */ + if (free_oldindname && errcbarg->indname) + pfree(errcbarg->indname); + + /* For index phases, save the name of the current index for the callback */ + errcbarg->indname = indname ? pstrdup(indname) : NULL; +} diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index ca2d9ec8fb..518393344b 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -2565,6 +2565,7 @@ UserMapping UserOpts VacAttrStats VacAttrStatsP +VacErrCbPhase VacOptTernaryValue VacuumParams VacuumRelation -- 2.17.0
>From 481a09267efa1e6426054b945fcb3a977609abcb Mon Sep 17 00:00:00 2001 From: Justin Pryzby <pryz...@telsasoft.com> Date: Wed, 4 Mar 2020 12:28:50 -0600 Subject: [PATCH v32 2/3] Drop reltuples --- src/backend/access/heap/vacuumlazy.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index cbea791968..3b4c7fe66c 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -331,10 +331,10 @@ static void lazy_vacuum_all_indexes(Relation onerel, Relation *Irel, LVRelStats *vacrelstats, LVParallelState *lps, int nindexes); static void lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats, - LVDeadTuples *dead_tuples, double reltuples, LVRelStats *vacrelstats); + LVDeadTuples *dead_tuples, LVRelStats *vacrelstats); static void lazy_cleanup_index(Relation indrel, IndexBulkDeleteResult **stats, - double reltuples, bool estimated_count, LVRelStats *vacrelstats); + LVRelStats *vacrelstats); static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer); static bool should_attempt_truncation(VacuumParams *params, @@ -1790,7 +1790,7 @@ lazy_vacuum_all_indexes(Relation onerel, Relation *Irel, for (idx = 0; idx < nindexes; idx++) lazy_vacuum_index(Irel[idx], &stats[idx], vacrelstats->dead_tuples, - vacrelstats->old_live_tuples, vacrelstats); + vacrelstats); } /* Increase and report the number of index scans */ @@ -2292,11 +2292,10 @@ vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats, /* Do vacuum or cleanup of the index */ if (lvshared->for_cleanup) - lazy_cleanup_index(indrel, stats, lvshared->reltuples, - lvshared->estimated_count, vacrelstats); + lazy_cleanup_index(indrel, stats, vacrelstats); else lazy_vacuum_index(indrel, stats, dead_tuples, - lvshared->reltuples, vacrelstats); + vacrelstats); /* * Copy the index bulk-deletion result returned from ambulkdelete and @@ -2370,8 +2369,6 @@ lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats, { for (idx = 0; idx < nindexes; idx++) lazy_cleanup_index(Irel[idx], &stats[idx], - vacrelstats->new_rel_tuples, - vacrelstats->tupcount_pages < vacrelstats->rel_pages, vacrelstats); } } @@ -2387,7 +2384,7 @@ lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats, */ static void lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats, - LVDeadTuples *dead_tuples, double reltuples, LVRelStats *vacrelstats) + LVDeadTuples *dead_tuples, LVRelStats *vacrelstats) { IndexVacuumInfo ivinfo; const char *msg; @@ -2401,7 +2398,7 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats, ivinfo.report_progress = false; ivinfo.estimated_count = true; ivinfo.message_level = elevel; - ivinfo.num_heap_tuples = reltuples; + ivinfo.num_heap_tuples = vacrelstats->old_live_tuples; ivinfo.strategy = vac_strategy; /* Update error traceback information */ @@ -2444,7 +2441,7 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats, static void lazy_cleanup_index(Relation indrel, IndexBulkDeleteResult **stats, - double reltuples, bool estimated_count, LVRelStats *vacrelstats) + LVRelStats *vacrelstats) { IndexVacuumInfo ivinfo; const char *msg; @@ -2456,10 +2453,11 @@ lazy_cleanup_index(Relation indrel, ivinfo.index = indrel; ivinfo.analyze_only = false; ivinfo.report_progress = false; - ivinfo.estimated_count = estimated_count; + ivinfo.estimated_count = (bool)(vacrelstats->tupcount_pages < + vacrelstats->rel_pages); ivinfo.message_level = elevel; - ivinfo.num_heap_tuples = reltuples; + ivinfo.num_heap_tuples = vacrelstats->new_rel_tuples; ivinfo.strategy = vac_strategy; /* Update error traceback information */ -- 2.17.0
>From 4d1be586b1ef33302528c4b9724d41f77da94687 Mon Sep 17 00:00:00 2001 From: Justin Pryzby <pryz...@telsasoft.com> Date: Wed, 26 Feb 2020 19:22:55 -0600 Subject: [PATCH v32 3/3] Avoid some calls to RelationGetRelationName --- src/backend/access/heap/vacuumlazy.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 3b4c7fe66c..652446dbf2 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -634,8 +634,8 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params, } appendStringInfo(&buf, msgfmt, get_database_name(MyDatabaseId), - get_namespace_name(RelationGetNamespace(onerel)), - RelationGetRelationName(onerel), + vacrelstats->relnamespace, + vacrelstats->relname, vacrelstats->num_index_scans); appendStringInfo(&buf, _("pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n"), vacrelstats->pages_removed, @@ -807,7 +807,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, if (params->nworkers > 0) ereport(WARNING, (errmsg("disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel", - RelationGetRelationName(onerel)))); + vacrelstats->relname))); } else lps = begin_parallel_vacuum(RelationGetRelid(onerel), Irel, @@ -1711,7 +1711,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, if (vacuumed_pages) ereport(elevel, (errmsg("\"%s\": removed %.0f row versions in %u pages", - RelationGetRelationName(onerel), + vacrelstats->relname, tups_vacuumed, vacuumed_pages))); /* @@ -1740,7 +1740,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, ereport(elevel, (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages", - RelationGetRelationName(onerel), + vacrelstats->relname, tups_vacuumed, num_tuples, vacrelstats->scanned_pages, nblocks), errdetail_internal("%s", buf.data))); @@ -1872,7 +1872,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats) ereport(elevel, (errmsg("\"%s\": removed %d row versions in %d pages", - RelationGetRelationName(onerel), + vacrelstats->relname, tupindex, npages), errdetail_internal("%s", pg_rusage_show(&ru0)))); @@ -2420,7 +2420,7 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats, ereport(elevel, (errmsg(msg, - RelationGetRelationName(indrel), + vacrelstats->relname, dead_tuples->num_tuples), errdetail_internal("%s", pg_rusage_show(&ru0)))); @@ -2589,7 +2589,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats) vacrelstats->lock_waiter_detected = true; ereport(elevel, (errmsg("\"%s\": stopping truncate due to conflicting lock request", - RelationGetRelationName(onerel)))); + vacrelstats->relname))); return; } @@ -2668,7 +2668,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats) ereport(elevel, (errmsg("\"%s\": truncated %u to %u pages", - RelationGetRelationName(onerel), + vacrelstats->relname, old_rel_pages, new_rel_pages), errdetail_internal("%s", pg_rusage_show(&ru0)))); @@ -2733,7 +2733,7 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats) { ereport(elevel, (errmsg("\"%s\": suspending truncate due to conflicting lock request", - RelationGetRelationName(onerel)))); + vacrelstats->relname))); vacrelstats->lock_waiter_detected = true; return blkno; -- 2.17.0