Hi, On Tue, Jan 6, 2026 at 3:44 AM Daniil Davydov <[email protected]> wrote: > > On Tue, Jan 6, 2026 at 1:51 AM Masahiko Sawada <[email protected]> wrote: > > > > Are you still working on it? Or shall I draft this part on top of the > > 0001 patch? > > I thought about some "beautiful" approach, but for now I have > only one idea - force parallel a/v workers to get values for these > parameters from shmem (which obviously can be modified by the > leader a/v process). I'll post this patch in the near future. >
I am posting a draft version of the patch (see 0005) that allows parallel leader to propagate changes of cost-based parameters to its parallel workers. It is a very rough fix, but it reflects my idea that is to have some shared state from which parallel workers can get values for the parameters (and which only leader worker can modify, obviously). I have also added a test that shows that this idea is working - the test ensures that parallel workers can change its parameters if they have been changed for the leader worker. Note that so far the work is in progress - this logic works only for vacuum_cost_delay and vacuum_cost_limits parameters. I think that we should agree on an idea first, and only then apply logic for all appropriate parameters. What do you think? -- Best regards, Daniil Davydov
From a5f261dc7b4fe37aba8f24ef5241e2b1f2d85a36 Mon Sep 17 00:00:00 2001 From: Daniil Davidov <[email protected]> Date: Sun, 23 Nov 2025 01:03:24 +0700 Subject: [PATCH v18 1/5] Parallel autovacuum --- src/backend/access/common/reloptions.c | 11 ++ src/backend/commands/vacuumparallel.c | 42 ++++- src/backend/postmaster/autovacuum.c | 166 +++++++++++++++++- src/backend/utils/init/globals.c | 1 + src/backend/utils/misc/guc.c | 8 +- src/backend/utils/misc/guc_parameters.dat | 9 + src/backend/utils/misc/postgresql.conf.sample | 2 + src/bin/psql/tab-complete.in.c | 1 + src/include/miscadmin.h | 1 + src/include/postmaster/autovacuum.h | 4 + src/include/utils/rel.h | 7 + 11 files changed, 242 insertions(+), 10 deletions(-) diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c index 0b83f98ed5f..692ac46733e 100644 --- a/src/backend/access/common/reloptions.c +++ b/src/backend/access/common/reloptions.c @@ -222,6 +222,15 @@ static relopt_int intRelOpts[] = }, SPGIST_DEFAULT_FILLFACTOR, SPGIST_MIN_FILLFACTOR, 100 }, + { + { + "autovacuum_parallel_workers", + "Maximum number of parallel autovacuum workers that can be used for processing this table.", + RELOPT_KIND_HEAP, + ShareUpdateExclusiveLock + }, + -1, -1, 1024 + }, { { "autovacuum_vacuum_threshold", @@ -1881,6 +1890,8 @@ default_reloptions(Datum reloptions, bool validate, relopt_kind kind) {"fillfactor", RELOPT_TYPE_INT, offsetof(StdRdOptions, fillfactor)}, {"autovacuum_enabled", RELOPT_TYPE_BOOL, offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, enabled)}, + {"autovacuum_parallel_workers", RELOPT_TYPE_INT, + offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, autovacuum_parallel_workers)}, {"autovacuum_vacuum_threshold", RELOPT_TYPE_INT, offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, vacuum_threshold)}, {"autovacuum_vacuum_max_threshold", RELOPT_TYPE_INT, diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c index c3b3c9ea21a..6a3a00585f9 100644 --- a/src/backend/commands/vacuumparallel.c +++ b/src/backend/commands/vacuumparallel.c @@ -1,7 +1,9 @@ /*------------------------------------------------------------------------- * * vacuumparallel.c - * Support routines for parallel vacuum execution. + * Support routines for parallel vacuum and autovacuum execution. In the + * comments below, the word "vacuum" will refer to both vacuum and + * autovacuum. * * This file contains routines that are intended to support setting up, using, * and tearing down a ParallelVacuumState. @@ -34,6 +36,7 @@ #include "executor/instrument.h" #include "optimizer/paths.h" #include "pgstat.h" +#include "postmaster/autovacuum.h" #include "storage/bufmgr.h" #include "tcop/tcopprot.h" #include "utils/lsyscache.h" @@ -373,8 +376,9 @@ parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, shared->queryid = pgstat_get_my_query_id(); shared->maintenance_work_mem_worker = (nindexes_mwm > 0) ? - maintenance_work_mem / Min(parallel_workers, nindexes_mwm) : - maintenance_work_mem; + vac_work_mem / Min(parallel_workers, nindexes_mwm) : + vac_work_mem; + shared->dead_items_info.max_bytes = vac_work_mem * (size_t) 1024; /* Prepare DSA space for dead items */ @@ -553,12 +557,17 @@ parallel_vacuum_compute_workers(Relation *indrels, int nindexes, int nrequested, int nindexes_parallel_bulkdel = 0; int nindexes_parallel_cleanup = 0; int parallel_workers; + int max_workers; + + max_workers = AmAutoVacuumWorkerProcess() ? + autovacuum_max_parallel_workers : + max_parallel_maintenance_workers; /* * We don't allow performing parallel operation in standalone backend or * when parallelism is disabled. */ - if (!IsUnderPostmaster || max_parallel_maintenance_workers == 0) + if (!IsUnderPostmaster || max_workers == 0) return 0; /* @@ -597,8 +606,8 @@ parallel_vacuum_compute_workers(Relation *indrels, int nindexes, int nrequested, parallel_workers = (nrequested > 0) ? Min(nrequested, nindexes_parallel) : nindexes_parallel; - /* Cap by max_parallel_maintenance_workers */ - parallel_workers = Min(parallel_workers, max_parallel_maintenance_workers); + /* Cap by GUC variable */ + parallel_workers = Min(parallel_workers, max_workers); return parallel_workers; } @@ -646,6 +655,13 @@ parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scan */ nworkers = Min(nworkers, pvs->pcxt->nworkers); + /* + * Reserve workers in autovacuum global state. Note, that we may be given + * fewer workers than we requested. + */ + if (AmAutoVacuumWorkerProcess() && nworkers > 0) + AutoVacuumReserveParallelWorkers(&nworkers); + /* * Set index vacuum status and mark whether parallel vacuum worker can * process it. @@ -690,6 +706,16 @@ parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scan LaunchParallelWorkers(pvs->pcxt); + if (AmAutoVacuumWorkerProcess() && + pvs->pcxt->nworkers_launched < nworkers) + { + /* + * Tell autovacuum that we could not launch all the previously + * reserved workers. + */ + AutoVacuumReleaseParallelWorkers(nworkers - pvs->pcxt->nworkers_launched); + } + if (pvs->pcxt->nworkers_launched > 0) { /* @@ -738,6 +764,10 @@ parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scan for (int i = 0; i < pvs->pcxt->nworkers_launched; i++) InstrAccumParallelQuery(&pvs->buffer_usage[i], &pvs->wal_usage[i]); + + /* Also release all previously reserved parallel autovacuum workers */ + if (AmAutoVacuumWorkerProcess() && pvs->pcxt->nworkers_launched > 0) + AutoVacuumReleaseParallelWorkers(pvs->pcxt->nworkers_launched); } /* diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c index 3e507d23cc9..bc11970bfee 100644 --- a/src/backend/postmaster/autovacuum.c +++ b/src/backend/postmaster/autovacuum.c @@ -151,6 +151,12 @@ int Log_autoanalyze_min_duration = 600000; static double av_storage_param_cost_delay = -1; static int av_storage_param_cost_limit = -1; +/* + * Variable to keep number of currently reserved parallel autovacuum workers. + * It is only relevant for parallel autovacuum leader process. + */ +static int av_nworkers_reserved = 0; + /* Flags set by signal handlers */ static volatile sig_atomic_t got_SIGUSR2 = false; @@ -285,6 +291,8 @@ typedef struct AutoVacuumWorkItem * av_workItems work item array * av_nworkersForBalance the number of autovacuum workers to use when * calculating the per worker cost limit + * av_freeParallelWorkers the number of free parallel autovacuum workers + * av_maxParallelWorkers the maximum number of parallel autovacuum workers * * This struct is protected by AutovacuumLock, except for av_signal and parts * of the worker list (see above). @@ -299,6 +307,8 @@ typedef struct WorkerInfo av_startingWorker; AutoVacuumWorkItem av_workItems[NUM_WORKITEMS]; pg_atomic_uint32 av_nworkersForBalance; + uint32 av_freeParallelWorkers; + uint32 av_maxParallelWorkers; } AutoVacuumShmemStruct; static AutoVacuumShmemStruct *AutoVacuumShmem; @@ -364,6 +374,8 @@ static void autovac_report_workitem(AutoVacuumWorkItem *workitem, static void avl_sigusr2_handler(SIGNAL_ARGS); static bool av_worker_available(void); static void check_av_worker_gucs(void); +static void adjust_free_parallel_workers(int prev_max_parallel_workers); +static void AutoVacuumReleaseAllParallelWorkers(void); @@ -763,6 +775,8 @@ ProcessAutoVacLauncherInterrupts(void) if (ConfigReloadPending) { int autovacuum_max_workers_prev = autovacuum_max_workers; + int autovacuum_max_parallel_workers_prev = + autovacuum_max_parallel_workers; ConfigReloadPending = false; ProcessConfigFile(PGC_SIGHUP); @@ -779,6 +793,15 @@ ProcessAutoVacLauncherInterrupts(void) if (autovacuum_max_workers_prev != autovacuum_max_workers) check_av_worker_gucs(); + /* + * If autovacuum_max_parallel_workers changed, we must take care of + * the correct value of available parallel autovacuum workers in + * shmem. + */ + if (autovacuum_max_parallel_workers_prev != + autovacuum_max_parallel_workers) + adjust_free_parallel_workers(autovacuum_max_parallel_workers_prev); + /* rebuild the list in case the naptime changed */ rebuild_database_list(InvalidOid); } @@ -1383,6 +1406,19 @@ avl_sigusr2_handler(SIGNAL_ARGS) * AUTOVACUUM WORKER CODE ********************************************************************/ +/* + * Make sure that all reserved workers are released, even if parallel + * autovacuum leader is finishing due to FATAL error. + */ +static void +autovacuum_worker_before_shmem_exit(int code, Datum arg) +{ + if (code != 0) + AutoVacuumReleaseAllParallelWorkers(); + + Assert(av_nworkers_reserved == 0); +} + /* * Main entry point for autovacuum worker processes. */ @@ -1438,6 +1474,8 @@ AutoVacWorkerMain(const void *startup_data, size_t startup_data_len) /* Early initialization */ BaseInit(); + before_shmem_exit(autovacuum_worker_before_shmem_exit, 0); + /* * If an exception is encountered, processing resumes here. * @@ -2480,6 +2518,12 @@ do_autovacuum(void) } PG_CATCH(); { + /* + * Parallel autovacuum can reserve parallel workers. Make sure + * that all reserved workers are released. + */ + AutoVacuumReleaseAllParallelWorkers(); + /* * Abort the transaction, start a new one, and proceed with the * next table in our list. @@ -2880,8 +2924,12 @@ table_recheck_autovac(Oid relid, HTAB *table_toast_map, */ tab->at_params.index_cleanup = VACOPTVALUE_UNSPECIFIED; tab->at_params.truncate = VACOPTVALUE_UNSPECIFIED; - /* As of now, we don't support parallel vacuum for autovacuum */ - tab->at_params.nworkers = -1; + + /* Decide whether we need to process indexes of table in parallel. */ + tab->at_params.nworkers = avopts + ? avopts->autovacuum_parallel_workers + : -1; + tab->at_params.freeze_min_age = freeze_min_age; tab->at_params.freeze_table_age = freeze_table_age; tab->at_params.multixact_freeze_min_age = multixact_freeze_min_age; @@ -3358,6 +3406,85 @@ AutoVacuumRequestWork(AutoVacuumWorkItemType type, Oid relationId, return result; } +/* + * In order to meet the 'autovacuum_max_parallel_workers' limit, leader + * autovacuum process must call this function during computing the parallel + * degree. + * + * 'nworkers' is the desired number of parallel workers to reserve. Function + * sets 'nworkers' to the number of parallel workers that actually can be + * launched and reserves these workers (if any) in global autovacuum state. + * + * NOTE: We will try to provide as many workers as requested, even if caller + * will occupy all available workers. + */ +void +AutoVacuumReserveParallelWorkers(int *nworkers) +{ + /* Only leader worker can call this function. */ + Assert(AmAutoVacuumWorkerProcess() && !IsParallelWorker()); + + /* + * We can only reserve workers at the beginning of parallel index + * processing, so we must not have any reserved workers right now. + */ + Assert(av_nworkers_reserved == 0); + + LWLockAcquire(AutovacuumLock, LW_EXCLUSIVE); + + /* Provide as many workers as we can. */ + *nworkers= Min(AutoVacuumShmem->av_freeParallelWorkers, *nworkers); + AutoVacuumShmem->av_freeParallelWorkers -= *nworkers; + + /* Remember how many workers we have reserved. */ + av_nworkers_reserved = *nworkers; + + LWLockRelease(AutovacuumLock); +} + +/* + * Leader autovacuum process must call this function in order to update global + * autovacuum state, so other leaders will be able to use these parallel + * workers. + * + * 'nworkers' - how many workers caller wants to release. + */ +void +AutoVacuumReleaseParallelWorkers(int nworkers) +{ + /* Only leader worker can call this function. */ + Assert(AmAutoVacuumWorkerProcess() && !IsParallelWorker()); + + LWLockAcquire(AutovacuumLock, LW_EXCLUSIVE); + + /* + * If the maximum number of parallel workers was reduced during execution, + * we must cap available workers number by its new value. + */ + AutoVacuumShmem->av_freeParallelWorkers = + Min(AutoVacuumShmem->av_freeParallelWorkers + nworkers, + AutoVacuumShmem->av_maxParallelWorkers); + + /* Don't have to remember these workers anymore. */ + av_nworkers_reserved -= nworkers; + + LWLockRelease(AutovacuumLock); +} + +/* + * Same as above, but release *all* parallel workers, that were reserved by + * current leader autovacuum process. + */ +static void +AutoVacuumReleaseAllParallelWorkers(void) +{ + /* Only leader worker can call this function. */ + Assert(AmAutoVacuumWorkerProcess() && !IsParallelWorker()); + + if (av_nworkers_reserved > 0) + AutoVacuumReleaseParallelWorkers(av_nworkers_reserved); +} + /* * autovac_init * This is called at postmaster initialization. @@ -3418,6 +3545,10 @@ AutoVacuumShmemInit(void) Assert(!found); AutoVacuumShmem->av_launcherpid = 0; + AutoVacuumShmem->av_maxParallelWorkers = + Min(autovacuum_max_parallel_workers, max_worker_processes); + AutoVacuumShmem->av_freeParallelWorkers = + AutoVacuumShmem->av_maxParallelWorkers; dclist_init(&AutoVacuumShmem->av_freeWorkers); dlist_init(&AutoVacuumShmem->av_runningWorkers); AutoVacuumShmem->av_startingWorker = NULL; @@ -3499,3 +3630,34 @@ check_av_worker_gucs(void) errdetail("The server will only start up to \"autovacuum_worker_slots\" (%d) autovacuum workers at a given time.", autovacuum_worker_slots))); } + +/* + * Make sure that number of free parallel workers corresponds to the + * autovacuum_max_parallel_workers parameter (after it was changed). + */ +static void +adjust_free_parallel_workers(int prev_max_parallel_workers) +{ + LWLockAcquire(AutovacuumLock, LW_EXCLUSIVE); + + /* + * Cap the number of free workers by new parameter's value, if needed. + */ + AutoVacuumShmem->av_freeParallelWorkers = + Min(AutoVacuumShmem->av_freeParallelWorkers, + autovacuum_max_parallel_workers); + + if (autovacuum_max_parallel_workers > prev_max_parallel_workers) + { + /* + * If user wants to increase number of parallel autovacuum workers, we + * must increase number of free workers. + */ + AutoVacuumShmem->av_freeParallelWorkers += + (autovacuum_max_parallel_workers - prev_max_parallel_workers); + } + + AutoVacuumShmem->av_maxParallelWorkers = autovacuum_max_parallel_workers; + + LWLockRelease(AutovacuumLock); +} diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c index 36ad708b360..24ddb276f0c 100644 --- a/src/backend/utils/init/globals.c +++ b/src/backend/utils/init/globals.c @@ -143,6 +143,7 @@ int NBuffers = 16384; int MaxConnections = 100; int max_worker_processes = 8; int max_parallel_workers = 8; +int autovacuum_max_parallel_workers = 0; int MaxBackends = 0; /* GUC parameters for vacuum */ diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index ae9d5f3fb70..c8a99a67767 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -3326,9 +3326,13 @@ set_config_with_handle(const char *name, config_handle *handle, * * Also allow normal setting if the GUC is marked GUC_ALLOW_IN_PARALLEL. * - * Other changes might need to affect other workers, so forbid them. + * Other changes might need to affect other workers, so forbid them. Note, + * that parallel autovacuum leader is an exception, because only + * cost-based delays need to be affected also to parallel vacuum workers, + * and we will handle it elsewhere if appropriate. */ - if (IsInParallelMode() && changeVal && action != GUC_ACTION_SAVE && + if (IsInParallelMode() && !AmAutoVacuumWorkerProcess() && changeVal && + action != GUC_ACTION_SAVE && (record->flags & GUC_ALLOW_IN_PARALLEL) == 0) { ereport(elevel, diff --git a/src/backend/utils/misc/guc_parameters.dat b/src/backend/utils/misc/guc_parameters.dat index 7c60b125564..e933f5048f7 100644 --- a/src/backend/utils/misc/guc_parameters.dat +++ b/src/backend/utils/misc/guc_parameters.dat @@ -154,6 +154,15 @@ max => '2000000000', }, +{ name => 'autovacuum_max_parallel_workers', type => 'int', context => 'PGC_SIGHUP', group => 'VACUUM_AUTOVACUUM', + short_desc => 'Maximum number of parallel autovacuum workers, that can be taken from bgworkers pool.', + long_desc => 'This parameter is capped by "max_worker_processes" (not by "autovacuum_max_workers"!).', + variable => 'autovacuum_max_parallel_workers', + boot_val => '2', + min => '0', + max => 'MAX_BACKENDS', +}, + { name => 'autovacuum_max_workers', type => 'int', context => 'PGC_SIGHUP', group => 'VACUUM_AUTOVACUUM', short_desc => 'Sets the maximum number of simultaneously running autovacuum worker processes.', variable => 'autovacuum_max_workers', diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample index dc9e2255f8a..86c67b790b0 100644 --- a/src/backend/utils/misc/postgresql.conf.sample +++ b/src/backend/utils/misc/postgresql.conf.sample @@ -691,6 +691,8 @@ #autovacuum_worker_slots = 16 # autovacuum worker slots to allocate # (change requires restart) #autovacuum_max_workers = 3 # max number of autovacuum subprocesses +#autovacuum_max_parallel_workers = 2 # disabled by default and limited by + # max_worker_processes #autovacuum_naptime = 1min # time between autovacuum runs #autovacuum_vacuum_threshold = 50 # min number of row updates before # vacuum diff --git a/src/bin/psql/tab-complete.in.c b/src/bin/psql/tab-complete.in.c index 06edea98f06..2b8a4aab390 100644 --- a/src/bin/psql/tab-complete.in.c +++ b/src/bin/psql/tab-complete.in.c @@ -1423,6 +1423,7 @@ static const char *const table_storage_parameters[] = { "autovacuum_multixact_freeze_max_age", "autovacuum_multixact_freeze_min_age", "autovacuum_multixact_freeze_table_age", + "autovacuum_parallel_workers", "autovacuum_vacuum_cost_delay", "autovacuum_vacuum_cost_limit", "autovacuum_vacuum_insert_scale_factor", diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h index db559b39c4d..ad6e19f426c 100644 --- a/src/include/miscadmin.h +++ b/src/include/miscadmin.h @@ -178,6 +178,7 @@ extern PGDLLIMPORT int MaxBackends; extern PGDLLIMPORT int MaxConnections; extern PGDLLIMPORT int max_worker_processes; extern PGDLLIMPORT int max_parallel_workers; +extern PGDLLIMPORT int autovacuum_max_parallel_workers; extern PGDLLIMPORT int commit_timestamp_buffers; extern PGDLLIMPORT int multixact_member_buffers; diff --git a/src/include/postmaster/autovacuum.h b/src/include/postmaster/autovacuum.h index e43067d0260..4acadbc0610 100644 --- a/src/include/postmaster/autovacuum.h +++ b/src/include/postmaster/autovacuum.h @@ -65,6 +65,10 @@ pg_noreturn extern void AutoVacWorkerMain(const void *startup_data, size_t start extern bool AutoVacuumRequestWork(AutoVacuumWorkItemType type, Oid relationId, BlockNumber blkno); +/* parallel autovacuum stuff */ +extern void AutoVacuumReserveParallelWorkers(int *nworkers); +extern void AutoVacuumReleaseParallelWorkers(int nworkers); + /* shared memory stuff */ extern Size AutoVacuumShmemSize(void); extern void AutoVacuumShmemInit(void); diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h index d03ab247788..c1d882659f9 100644 --- a/src/include/utils/rel.h +++ b/src/include/utils/rel.h @@ -311,6 +311,13 @@ typedef struct ForeignKeyCacheInfo typedef struct AutoVacOpts { bool enabled; + + /* + * Max number of parallel autovacuum workers. If value is 0 then parallel + * degree will computed based on number of indexes. + */ + int autovacuum_parallel_workers; + int vacuum_threshold; int vacuum_max_threshold; int vacuum_ins_threshold; -- 2.43.0
From bbcc4b92941325248254b074a2d1c94f244b6a6c Mon Sep 17 00:00:00 2001 From: Daniil Davidov <[email protected]> Date: Sun, 23 Nov 2025 02:32:44 +0700 Subject: [PATCH v18 4/5] Documentation for parallel autovacuum --- doc/src/sgml/config.sgml | 17 +++++++++++++++++ doc/src/sgml/maintenance.sgml | 12 ++++++++++++ doc/src/sgml/ref/create_table.sgml | 20 ++++++++++++++++++++ 3 files changed, 49 insertions(+) diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index 601aa3afb8e..36fcc72f325 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -2847,6 +2847,7 @@ include_dir 'conf.d' <para> When changing this value, consider also adjusting <xref linkend="guc-max-parallel-workers"/>, + <xref linkend="guc-autovacuum-max-parallel-workers"/>, <xref linkend="guc-max-parallel-maintenance-workers"/>, and <xref linkend="guc-max-parallel-workers-per-gather"/>. </para> @@ -9282,6 +9283,22 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; </listitem> </varlistentry> + <varlistentry id="guc-autovacuum-max-parallel-workers" xreflabel="autovacuum_max_parallel_workers"> + <term><varname>autovacuum_max_parallel_workers</varname> (<type>integer</type>) + <indexterm> + <primary><varname>autovacuum_max_parallel_workers</varname></primary> + <secondary>configuration parameter</secondary> + </indexterm> + </term> + <listitem> + <para> + Sets the maximum number of parallel autovacuum workers that + can be used for parallel index vacuuming at one time. Is capped by + <xref linkend="guc-max-worker-processes"/>. The default is 2. + </para> + </listitem> + </varlistentry> + </variablelist> </sect2> diff --git a/doc/src/sgml/maintenance.sgml b/doc/src/sgml/maintenance.sgml index 7c958b06273..c9f9163c551 100644 --- a/doc/src/sgml/maintenance.sgml +++ b/doc/src/sgml/maintenance.sgml @@ -926,6 +926,18 @@ HINT: Execute a database-wide VACUUM in that database. autovacuum workers' activity. </para> + <para> + If an autovacuum worker process comes across a table with the enabled + <xref linkend="reloption-autovacuum-parallel-workers"/> storage parameter, + it will launch parallel workers in order to vacuum indexes of this table + in a parallel mode. Parallel workers are taken from the pool of processes + established by <xref linkend="guc-max-worker-processes"/>, limited by + <xref linkend="guc-max-parallel-workers"/>. + The total number of parallel autovacuum workers that can be active at one + time is limited by the <xref linkend="guc-autovacuum-max-parallel-workers"/> + configuration parameter. + </para> + <para> If several large tables all become eligible for vacuuming in a short amount of time, all autovacuum workers might become occupied with diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml index 77c5a763d45..3592c9acff9 100644 --- a/doc/src/sgml/ref/create_table.sgml +++ b/doc/src/sgml/ref/create_table.sgml @@ -1717,6 +1717,26 @@ WITH ( MODULUS <replaceable class="parameter">numeric_literal</replaceable>, REM </listitem> </varlistentry> + <varlistentry id="reloption-autovacuum-parallel-workers" xreflabel="autovacuum_parallel_workers"> + <term><literal>autovacuum_parallel_workers</literal> (<type>integer</type>) + <indexterm> + <primary><varname>autovacuum_parallel_workers</varname> storage parameter</primary> + </indexterm> + </term> + <listitem> + <para> + Sets the maximum number of parallel autovacuum workers that can process + indexes of this table. + The default value is -1, which means no parallel index vacuuming for + this table. If value is 0 then parallel degree will computed based on + number of indexes. + Note that the computed number of workers may not actually be available at + run time. If this occurs, the autovacuum will run with fewer workers + than expected. + </para> + </listitem> + </varlistentry> + <varlistentry id="reloption-autovacuum-vacuum-threshold" xreflabel="autovacuum_vacuum_threshold"> <term><literal>autovacuum_vacuum_threshold</literal>, <literal>toast.autovacuum_vacuum_threshold</literal> (<type>integer</type>) <indexterm> -- 2.43.0
From 0aafa271ec90dbe494eea79fd484a4856023b3a8 Mon Sep 17 00:00:00 2001 From: Daniil Davidov <[email protected]> Date: Sun, 23 Nov 2025 01:07:47 +0700 Subject: [PATCH v18 2/5] Logging for parallel autovacuum --- src/backend/access/heap/vacuumlazy.c | 27 +++++++++++++++++++++++++-- src/backend/commands/vacuumparallel.c | 20 ++++++++++++++------ src/include/commands/vacuum.h | 16 ++++++++++++++-- src/tools/pgindent/typedefs.list | 1 + 4 files changed, 54 insertions(+), 10 deletions(-) diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 2086a577199..35d2b07aa8a 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -349,6 +349,12 @@ typedef struct LVRelState int num_index_scans; int num_dead_items_resets; Size total_dead_items_bytes; + + /* + * Number of planned and actually launched parallel workers for all index + * scans, or NULL + */ + PVWorkersUsage *workers_usage; /* Counters that follow are only for scanned_pages */ int64 tuples_deleted; /* # deleted from table */ int64 tuples_frozen; /* # newly frozen */ @@ -711,6 +717,16 @@ heap_vacuum_rel(Relation rel, const VacuumParams params, indnames = palloc_array(char *, vacrel->nindexes); for (int i = 0; i < vacrel->nindexes; i++) indnames[i] = pstrdup(RelationGetRelationName(vacrel->indrels[i])); + + /* + * Allocate space for workers usage statistics. Thus, we explicitly + * make clear that such statistics must be accumulated. For now, this + * is used only by autovacuum leader worker, because it must log it in + * the end of table processing. + */ + vacrel->workers_usage = AmAutoVacuumWorkerProcess() ? + (PVWorkersUsage *) palloc0(sizeof(PVWorkersUsage)) : + NULL; } /* @@ -1125,6 +1141,11 @@ heap_vacuum_rel(Relation rel, const VacuumParams params, orig_rel_pages == 0 ? 100.0 : 100.0 * vacrel->lpdead_item_pages / orig_rel_pages, vacrel->lpdead_items); + if (vacrel->workers_usage) + appendStringInfo(&buf, + _("parallel index vacuum/cleanup : workers planned = %d, workers launched = %d\n"), + vacrel->workers_usage->nplanned, + vacrel->workers_usage->nlaunched); for (int i = 0; i < vacrel->nindexes; i++) { IndexBulkDeleteResult *istat = vacrel->indstats[i]; @@ -2700,7 +2721,8 @@ lazy_vacuum_all_indexes(LVRelState *vacrel) { /* Outsource everything to parallel variant */ parallel_vacuum_bulkdel_all_indexes(vacrel->pvs, old_live_tuples, - vacrel->num_index_scans); + vacrel->num_index_scans, + vacrel->workers_usage); /* * Do a postcheck to consider applying wraparound failsafe now. Note @@ -3133,7 +3155,8 @@ lazy_cleanup_all_indexes(LVRelState *vacrel) /* Outsource everything to parallel variant */ parallel_vacuum_cleanup_all_indexes(vacrel->pvs, reltuples, vacrel->num_index_scans, - estimated_count); + estimated_count, + vacrel->workers_usage); } /* Reset the progress counters */ diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c index 6a3a00585f9..490f93959d1 100644 --- a/src/backend/commands/vacuumparallel.c +++ b/src/backend/commands/vacuumparallel.c @@ -227,7 +227,7 @@ struct ParallelVacuumState static int parallel_vacuum_compute_workers(Relation *indrels, int nindexes, int nrequested, bool *will_parallel_vacuum); static void parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scans, - bool vacuum); + bool vacuum, PVWorkersUsage *wusage); static void parallel_vacuum_process_safe_indexes(ParallelVacuumState *pvs); static void parallel_vacuum_process_unsafe_indexes(ParallelVacuumState *pvs); static void parallel_vacuum_process_one_index(ParallelVacuumState *pvs, Relation indrel, @@ -502,7 +502,7 @@ parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs) */ void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, - int num_index_scans) + int num_index_scans, PVWorkersUsage *wusage) { Assert(!IsParallelWorker()); @@ -513,7 +513,7 @@ parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tup pvs->shared->reltuples = num_table_tuples; pvs->shared->estimated_count = true; - parallel_vacuum_process_all_indexes(pvs, num_index_scans, true); + parallel_vacuum_process_all_indexes(pvs, num_index_scans, true, wusage); } /* @@ -521,7 +521,8 @@ parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tup */ void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, - int num_index_scans, bool estimated_count) + int num_index_scans, bool estimated_count, + PVWorkersUsage *wusage) { Assert(!IsParallelWorker()); @@ -533,7 +534,7 @@ parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tup pvs->shared->reltuples = num_table_tuples; pvs->shared->estimated_count = estimated_count; - parallel_vacuum_process_all_indexes(pvs, num_index_scans, false); + parallel_vacuum_process_all_indexes(pvs, num_index_scans, false, wusage); } /* @@ -618,7 +619,7 @@ parallel_vacuum_compute_workers(Relation *indrels, int nindexes, int nrequested, */ static void parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scans, - bool vacuum) + bool vacuum, PVWorkersUsage *wusage) { int nworkers; PVIndVacStatus new_status; @@ -742,6 +743,13 @@ parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scan "launched %d parallel vacuum workers for index cleanup (planned: %d)", pvs->pcxt->nworkers_launched), pvs->pcxt->nworkers_launched, nworkers))); + + /* Remember these values, if we asked to. */ + if (wusage != NULL) + { + wusage->nlaunched += pvs->pcxt->nworkers_launched; + wusage->nplanned += nworkers; + } } /* Vacuum the indexes that can be processed by only leader process */ diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h index e885a4b9c77..ec5d70aacdc 100644 --- a/src/include/commands/vacuum.h +++ b/src/include/commands/vacuum.h @@ -300,6 +300,16 @@ typedef struct VacDeadItemsInfo int64 num_items; /* current # of entries */ } VacDeadItemsInfo; +/* + * PVWorkersUsage stores information about total number of launched and planned + * workers during parallel vacuum. + */ +typedef struct PVWorkersUsage +{ + int nlaunched; + int nplanned; +} PVWorkersUsage; + /* GUC parameters */ extern PGDLLIMPORT int default_statistics_target; /* PGDLLIMPORT for PostGIS */ extern PGDLLIMPORT int vacuum_freeze_min_age; @@ -394,11 +404,13 @@ extern TidStore *parallel_vacuum_get_dead_items(ParallelVacuumState *pvs, extern void parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs); extern void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, - int num_index_scans); + int num_index_scans, + PVWorkersUsage *wusage); extern void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples, int num_index_scans, - bool estimated_count); + bool estimated_count, + PVWorkersUsage *wusage); extern void parallel_vacuum_main(dsm_segment *seg, shm_toc *toc); /* in commands/analyze.c */ diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index b9e671fcda8..6e35c6aa493 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -2397,6 +2397,7 @@ PullFilterOps PushFilter PushFilterOps PushFunction +PVWorkersUsage PyCFunction PyMethodDef PyModuleDef -- 2.43.0
From 29fb650ac54e2f3bbc8f920292662906345e29ac Mon Sep 17 00:00:00 2001 From: Daniil Davidov <[email protected]> Date: Sun, 23 Nov 2025 01:08:14 +0700 Subject: [PATCH v18 3/5] Tests for parallel autovacuum --- src/backend/commands/vacuumparallel.c | 8 + src/backend/postmaster/autovacuum.c | 14 + src/test/modules/Makefile | 1 + src/test/modules/meson.build | 1 + src/test/modules/test_autovacuum/.gitignore | 2 + src/test/modules/test_autovacuum/Makefile | 26 ++ src/test/modules/test_autovacuum/meson.build | 36 +++ .../modules/test_autovacuum/t/001_basic.pl | 170 ++++++++++++ .../test_autovacuum/test_autovacuum--1.0.sql | 34 +++ .../modules/test_autovacuum/test_autovacuum.c | 255 ++++++++++++++++++ .../test_autovacuum/test_autovacuum.control | 3 + 11 files changed, 550 insertions(+) create mode 100644 src/test/modules/test_autovacuum/.gitignore create mode 100644 src/test/modules/test_autovacuum/Makefile create mode 100644 src/test/modules/test_autovacuum/meson.build create mode 100644 src/test/modules/test_autovacuum/t/001_basic.pl create mode 100644 src/test/modules/test_autovacuum/test_autovacuum--1.0.sql create mode 100644 src/test/modules/test_autovacuum/test_autovacuum.c create mode 100644 src/test/modules/test_autovacuum/test_autovacuum.control diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c index 490f93959d1..c2f0a37eef2 100644 --- a/src/backend/commands/vacuumparallel.c +++ b/src/backend/commands/vacuumparallel.c @@ -39,6 +39,7 @@ #include "postmaster/autovacuum.h" #include "storage/bufmgr.h" #include "tcop/tcopprot.h" +#include "utils/injection_point.h" #include "utils/lsyscache.h" #include "utils/rel.h" @@ -752,6 +753,13 @@ parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scan } } + /* + * To be able to exercise whether all reserved parallel workers are being + * released anyway, allow injection points to trigger a failure at this + * point. + */ + INJECTION_POINT("autovacuum-trigger-leader-failure", NULL); + /* Vacuum the indexes that can be processed by only leader process */ parallel_vacuum_process_unsafe_indexes(pvs); diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c index bc11970bfee..a27274bfb4d 100644 --- a/src/backend/postmaster/autovacuum.c +++ b/src/backend/postmaster/autovacuum.c @@ -3439,6 +3439,13 @@ AutoVacuumReserveParallelWorkers(int *nworkers) /* Remember how many workers we have reserved. */ av_nworkers_reserved = *nworkers; + /* + * Injection point to help exercising number of available parallel + * autovacuum workers. + */ + INJECTION_POINT("autovacuum-set-free-parallel-workers-num", + &AutoVacuumShmem->av_freeParallelWorkers); + LWLockRelease(AutovacuumLock); } @@ -3468,6 +3475,13 @@ AutoVacuumReleaseParallelWorkers(int nworkers) /* Don't have to remember these workers anymore. */ av_nworkers_reserved -= nworkers; + /* + * Injection point to help exercising number of available parallel + * autovacuum workers. + */ + INJECTION_POINT("autovacuum-set-free-parallel-workers-num", + &AutoVacuumShmem->av_freeParallelWorkers); + LWLockRelease(AutovacuumLock); } diff --git a/src/test/modules/Makefile b/src/test/modules/Makefile index 4c6d56d97d8..bfe365fa575 100644 --- a/src/test/modules/Makefile +++ b/src/test/modules/Makefile @@ -16,6 +16,7 @@ SUBDIRS = \ plsample \ spgist_name_ops \ test_aio \ + test_autovacuum \ test_binaryheap \ test_bitmapset \ test_bloomfilter \ diff --git a/src/test/modules/meson.build b/src/test/modules/meson.build index 1b31c5b98d6..01a3e3ec044 100644 --- a/src/test/modules/meson.build +++ b/src/test/modules/meson.build @@ -16,6 +16,7 @@ subdir('plsample') subdir('spgist_name_ops') subdir('ssl_passphrase_callback') subdir('test_aio') +subdir('test_autovacuum') subdir('test_binaryheap') subdir('test_bitmapset') subdir('test_bloomfilter') diff --git a/src/test/modules/test_autovacuum/.gitignore b/src/test/modules/test_autovacuum/.gitignore new file mode 100644 index 00000000000..716e17f5a2a --- /dev/null +++ b/src/test/modules/test_autovacuum/.gitignore @@ -0,0 +1,2 @@ +# Generated subdirectories +/tmp_check/ diff --git a/src/test/modules/test_autovacuum/Makefile b/src/test/modules/test_autovacuum/Makefile new file mode 100644 index 00000000000..4cf7344b2ac --- /dev/null +++ b/src/test/modules/test_autovacuum/Makefile @@ -0,0 +1,26 @@ +# src/test/modules/test_autovacuum/Makefile + +PGFILEDESC = "test_autovacuum - test code for parallel autovacuum" + +MODULE_big = test_autovacuum +OBJS = \ + $(WIN32RES) \ + test_autovacuum.o + +EXTENSION = test_autovacuum +DATA = test_autovacuum--1.0.sql + +TAP_TESTS = 1 + +export enable_injection_points + +ifdef USE_PGXS +PG_CONFIG = pg_config +PGXS := $(shell $(PG_CONFIG) --pgxs) +include $(PGXS) +else +subdir = src/test/modules/test_autovacuum +top_builddir = ../../../.. +include $(top_builddir)/src/Makefile.global +include $(top_srcdir)/contrib/contrib-global.mk +endif diff --git a/src/test/modules/test_autovacuum/meson.build b/src/test/modules/test_autovacuum/meson.build new file mode 100644 index 00000000000..3441e5e49cf --- /dev/null +++ b/src/test/modules/test_autovacuum/meson.build @@ -0,0 +1,36 @@ +# Copyright (c) 2024-2025, PostgreSQL Global Development Group + +test_autovacuum_sources = files( + 'test_autovacuum.c', +) + +if host_system == 'windows' + test_autovacuum_sources += rc_lib_gen.process(win32ver_rc, extra_args: [ + '--NAME', 'test_autovacuum', + '--FILEDESC', 'test_autovacuum - test code for parallel autovacuum',]) +endif + +test_autovacuum = shared_module('test_autovacuum', + test_autovacuum_sources, + kwargs: pg_test_mod_args, +) +test_install_libs += test_autovacuum + +test_install_data += files( + 'test_autovacuum.control', + 'test_autovacuum--1.0.sql', +) + +tests += { + 'name': 'test_autovacuum', + 'sd': meson.current_source_dir(), + 'bd': meson.current_build_dir(), + 'tap': { + 'env': { + 'enable_injection_points': get_option('injection_points') ? 'yes' : 'no', + }, + 'tests': [ + 't/001_basic.pl', + ], + }, +} diff --git a/src/test/modules/test_autovacuum/t/001_basic.pl b/src/test/modules/test_autovacuum/t/001_basic.pl new file mode 100644 index 00000000000..8bf153d132c --- /dev/null +++ b/src/test/modules/test_autovacuum/t/001_basic.pl @@ -0,0 +1,170 @@ +use warnings FATAL => 'all'; +use PostgreSQL::Test::Cluster; +use PostgreSQL::Test::Utils; +use Test::More; + +if ($ENV{enable_injection_points} ne 'yes') +{ + plan skip_all => 'Injection points not supported by this build'; +} + +my $psql_out; + +my $node = PostgreSQL::Test::Cluster->new('node1'); +$node->init; + +# Configure postgres, so it can launch parallel autovacuum workers, log all +# information we are interested in and autovacuum works frequently +$node->append_conf('postgresql.conf', qq{ + max_worker_processes = 20 + max_parallel_workers = 20 + max_parallel_maintenance_workers = 20 + autovacuum_max_parallel_workers = 10 + log_min_messages = debug2 + log_autovacuum_min_duration = 0 + autovacuum_naptime = '1s' + min_parallel_index_scan_size = 0 + shared_preload_libraries=test_autovacuum +}); +$node->start; + +my $indexes_num = 4; +my $initial_rows_num = 10_000; +my $autovacuum_parallel_workers = 2; + +# Create table with specified number of b-tree indexes on it +$node->safe_psql('postgres', qq{ + CREATE TABLE test_autovac ( + id SERIAL PRIMARY KEY, + col_1 INTEGER, col_2 INTEGER, col_3 INTEGER, col_4 INTEGER + ) WITH (autovacuum_parallel_workers = $autovacuum_parallel_workers, + autovacuum_enabled = false); + + DO \$\$ + DECLARE + i INTEGER; + BEGIN + FOR i IN 1..$indexes_num LOOP + EXECUTE format('CREATE INDEX idx_col_\%s ON test_autovac (col_\%s);', i, i); + END LOOP; + END \$\$; +}); + +# Insert specified tuples num into the table +$node->safe_psql('postgres', qq{ + DO \$\$ + DECLARE + i INTEGER; + BEGIN + FOR i IN 1..$initial_rows_num LOOP + INSERT INTO test_autovac VALUES (i, i + 1, i + 2, i + 3); + END LOOP; + END \$\$; +}); + +# Now, create some dead tuples and refresh table statistics +$node->safe_psql('postgres', qq{ + UPDATE test_autovac SET col_1 = 0 WHERE (col_1 % 3) = 0; + ANALYZE test_autovac; +}); + +# Create all functions needed for testing +$node->safe_psql('postgres', qq{ + CREATE EXTENSION test_autovacuum; + SELECT inj_set_free_workers_attach(); + SELECT inj_leader_failure_attach(); +}); + +# Test 1 : +# Our table has enough indexes and appropriate reloptions, so autovacuum must +# be able to process it in parallel mode. Just check if it can. +# Also check whether all requested workers: +# 1) launched +# 2) correctly released + +$node->safe_psql('postgres', qq{ + ALTER TABLE test_autovac SET (autovacuum_enabled = true); +}); + +# Wait until the parallel autovacuum on table is completed. At the same time, +# we check that the required number of parallel workers has been started. +$node->wait_for_log(qr/parallel index vacuum\/cleanup : workers planned = 2, / . + qr/workers launched = 2/); + +$node->psql('postgres', + "SELECT get_parallel_autovacuum_free_workers();", + stdout => \$psql_out, +); +is($psql_out, 10, 'All parallel workers has been released by the leader'); + +# Disable autovacuum on table during preparation for the next test +$node->safe_psql('postgres', qq{ + ALTER TABLE test_autovac SET (autovacuum_enabled = false); +}); + +# Create more dead tuples +$node->safe_psql('postgres', qq{ + UPDATE test_autovac SET col_2 = 0 WHERE (col_2 % 3) = 0; + ANALYZE test_autovac; +}); + +# Test 2: +# We want parallel autovacuum workers to be released even if leader gets an +# error. At first, simulate situation, when leader exites due to an ERROR. + +$node->safe_psql('postgres', qq( + SELECT trigger_leader_failure('ERROR'); +)); + +$node->safe_psql('postgres', qq{ + ALTER TABLE test_autovac SET (autovacuum_enabled = true); +}); + +$node->wait_for_log(qr/error, triggered by injection point/); + +$node->psql('postgres', + "SELECT get_parallel_autovacuum_free_workers();", + stdout => \$psql_out, +); +is($psql_out, 10, + 'All parallel workers has been released by the leader after ERROR'); + +# Disable autovacuum on table during preparation for the next test +$node->safe_psql('postgres', qq{ + ALTER TABLE test_autovac SET (autovacuum_enabled = false); +}); + +# Create more dead tuples +$node->safe_psql('postgres', qq{ + UPDATE test_autovac SET col_3 = 0 WHERE (col_3 % 3) = 0; + ANALYZE test_autovac; +}); + +# Test 3: +# Same as Test 2, but simulate situation, when leader exites due to FATAL. + +$node->safe_psql('postgres', qq( + SELECT trigger_leader_failure('FATAL'); +)); + +$node->safe_psql('postgres', qq{ + ALTER TABLE test_autovac SET (autovacuum_enabled = true); +}); + +$node->wait_for_log(qr/fatal, triggered by injection point/); + +$node->psql('postgres', + "SELECT get_parallel_autovacuum_free_workers();", + stdout => \$psql_out, +); +is($psql_out, 10, + 'All parallel workers has been released by the leader after FATAL'); + +# Cleanup +$node->safe_psql('postgres', qq{ + SELECT inj_set_free_workers_detach(); + SELECT inj_leader_failure_detach(); +}); + +$node->stop; +done_testing(); diff --git a/src/test/modules/test_autovacuum/test_autovacuum--1.0.sql b/src/test/modules/test_autovacuum/test_autovacuum--1.0.sql new file mode 100644 index 00000000000..017d5da85ea --- /dev/null +++ b/src/test/modules/test_autovacuum/test_autovacuum--1.0.sql @@ -0,0 +1,34 @@ +/* src/test/modules/test_autovacuum/test_autovacuum--1.0.sql */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION test_autovacuum" to load this file. \quit + +/* + * Functions for expecting or to interfere autovacuum state + */ +CREATE FUNCTION get_parallel_autovacuum_free_workers() +RETURNS INTEGER STRICT +AS 'MODULE_PATHNAME' LANGUAGE C; + +CREATE FUNCTION trigger_leader_failure(failure_type text) +RETURNS VOID STRICT +AS 'MODULE_PATHNAME' LANGUAGE C; + +/* + * Injection point related functions + */ +CREATE FUNCTION inj_set_free_workers_attach() +RETURNS VOID STRICT +AS 'MODULE_PATHNAME' LANGUAGE C; + +CREATE FUNCTION inj_set_free_workers_detach() +RETURNS VOID STRICT +AS 'MODULE_PATHNAME' LANGUAGE C; + +CREATE FUNCTION inj_leader_failure_attach() +RETURNS VOID STRICT +AS 'MODULE_PATHNAME' LANGUAGE C; + +CREATE FUNCTION inj_leader_failure_detach() +RETURNS VOID STRICT +AS 'MODULE_PATHNAME' LANGUAGE C; diff --git a/src/test/modules/test_autovacuum/test_autovacuum.c b/src/test/modules/test_autovacuum/test_autovacuum.c new file mode 100644 index 00000000000..7948f4858ae --- /dev/null +++ b/src/test/modules/test_autovacuum/test_autovacuum.c @@ -0,0 +1,255 @@ +/*------------------------------------------------------------------------- + * + * test_autovacuum.c + * Helpers to write tests for parallel autovacuum + * + * Copyright (c) 2020-2025, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/test/modules/test_autovacuum/test_autovacuum.c + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "fmgr.h" +#include "miscadmin.h" +#include "postmaster/autovacuum.h" +#include "storage/shmem.h" +#include "storage/ipc.h" +#include "storage/lwlock.h" +#include "utils/builtins.h" +#include "utils/injection_point.h" + +PG_MODULE_MAGIC; + +typedef enum AVLeaderFaulureType +{ + FAIL_NONE, + FAIL_ERROR, + FAIL_FATAL, +} AVLeaderFaulureType; + +typedef struct InjPointState +{ + bool enabled_set_free_workers; + uint32 free_parallel_workers; + + bool enabled_leader_failure; + AVLeaderFaulureType ftype; +} InjPointState; + +static InjPointState * inj_point_state; + +/* Shared memory init callbacks */ +static shmem_request_hook_type prev_shmem_request_hook = NULL; +static shmem_startup_hook_type prev_shmem_startup_hook = NULL; + +static void +test_autovacuum_shmem_request(void) +{ + if (prev_shmem_request_hook) + prev_shmem_request_hook(); + + RequestAddinShmemSpace(sizeof(InjPointState)); +} + +static void +test_autovacuum_shmem_startup(void) +{ + bool found; + + if (prev_shmem_startup_hook) + prev_shmem_startup_hook(); + + /* Create or attach to the shared memory state */ + LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); + + inj_point_state = ShmemInitStruct("injection_points", + sizeof(InjPointState), + &found); + + if (!found) + { + /* First time through, initialize */ + inj_point_state->enabled_leader_failure = false; + inj_point_state->enabled_set_free_workers = false; + inj_point_state->ftype = FAIL_NONE; + + /* Keep it in sync with AutoVacuumShmemInit */ + inj_point_state->free_parallel_workers = + Min(autovacuum_max_parallel_workers, max_worker_processes); + + InjectionPointAttach("autovacuum-set-free-parallel-workers-num", + "test_autovacuum", + "inj_set_free_workers", + NULL, + 0); + + InjectionPointAttach("autovacuum-trigger-leader-failure", + "test_autovacuum", + "inj_trigger_leader_failure", + NULL, + 0); + } + + LWLockRelease(AddinShmemInitLock); +} + +void +_PG_init(void) +{ + if (!process_shared_preload_libraries_in_progress) + return; + + prev_shmem_request_hook = shmem_request_hook; + shmem_request_hook = test_autovacuum_shmem_request; + prev_shmem_startup_hook = shmem_startup_hook; + shmem_startup_hook = test_autovacuum_shmem_startup; +} + +extern PGDLLEXPORT void inj_set_free_workers(const char *name, + const void *private_data, + void *arg); +extern PGDLLEXPORT void inj_trigger_leader_failure(const char *name, + const void *private_data, + void *arg); + +/* + * Set number of currently available parallel a/v workers. This value may + * change after reserving or releasing such workers. + * + * Function called from parallel autovacuum leader. + */ +void +inj_set_free_workers(const char *name, const void *private_data, void *arg) +{ + ereport(LOG, + errmsg("set parallel workers injection point called"), + errhidestmt(true), errhidecontext(true)); + + if (inj_point_state->enabled_set_free_workers) + { + Assert(arg != NULL); + inj_point_state->free_parallel_workers = *(uint32 *) arg; + } +} + +/* + * Throw an ERROR or FATAL, if somebody requested it. + * + * Function called from parallel autovacuum leader. + */ +void +inj_trigger_leader_failure(const char *name, const void *private_data, + void *arg) +{ + int elevel; + char *elevel_str; + + ereport(LOG, + errmsg("trigger leader failure injection point called"), + errhidestmt(true), errhidecontext(true)); + + if (inj_point_state->ftype == FAIL_NONE || + !inj_point_state->enabled_leader_failure) + { + return; + } + + elevel = inj_point_state->ftype == FAIL_ERROR ? ERROR : FATAL; + elevel_str = elevel == ERROR ? "error" : "fatal"; + + ereport(elevel, errmsg("%s, triggered by injection point", elevel_str)); +} + +PG_FUNCTION_INFO_V1(get_parallel_autovacuum_free_workers); +Datum +get_parallel_autovacuum_free_workers(PG_FUNCTION_ARGS) +{ + uint32 nworkers; + +#ifndef USE_INJECTION_POINTS + elog(ERROR, "injection points not supported"); +#endif + + LWLockAcquire(AutovacuumLock, LW_EXCLUSIVE); + nworkers = inj_point_state->free_parallel_workers; + LWLockRelease(AutovacuumLock); + + PG_RETURN_UINT32(nworkers); +} + +PG_FUNCTION_INFO_V1(trigger_leader_failure); +Datum +trigger_leader_failure(PG_FUNCTION_ARGS) +{ + const char *failure_type = text_to_cstring(PG_GETARG_TEXT_PP(0)); + +#ifndef USE_INJECTION_POINTS + elog(ERROR, "injection points not supported"); +#endif + + if (strcmp(failure_type, "NONE") == 0) + inj_point_state->ftype = FAIL_NONE; + else if (strcmp(failure_type, "ERROR") == 0) + inj_point_state->ftype = FAIL_ERROR; + else if (strcmp(failure_type, "FATAL") == 0) + inj_point_state->ftype = FAIL_FATAL; + else + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid leader failure type : %s", failure_type))); + + PG_RETURN_VOID(); +} + +PG_FUNCTION_INFO_V1(inj_set_free_workers_attach); +Datum +inj_set_free_workers_attach(PG_FUNCTION_ARGS) +{ +#ifdef USE_INJECTION_POINTS + inj_point_state->enabled_set_free_workers = true; + inj_point_state->ftype = FAIL_NONE; +#else + elog(ERROR, "injection points not supported"); +#endif + PG_RETURN_VOID(); +} + +PG_FUNCTION_INFO_V1(inj_set_free_workers_detach); +Datum +inj_set_free_workers_detach(PG_FUNCTION_ARGS) +{ +#ifdef USE_INJECTION_POINTS + inj_point_state->enabled_set_free_workers = false; +#else + elog(ERROR, "injection points not supported"); +#endif + PG_RETURN_VOID(); +} + +PG_FUNCTION_INFO_V1(inj_leader_failure_attach); +Datum +inj_leader_failure_attach(PG_FUNCTION_ARGS) +{ +#ifdef USE_INJECTION_POINTS + inj_point_state->enabled_leader_failure = true; +#else + elog(ERROR, "injection points not supported"); +#endif + PG_RETURN_VOID(); +} + +PG_FUNCTION_INFO_V1(inj_leader_failure_detach); +Datum +inj_leader_failure_detach(PG_FUNCTION_ARGS) +{ +#ifdef USE_INJECTION_POINTS + inj_point_state->enabled_leader_failure = false; +#else + elog(ERROR, "injection points not supported"); +#endif + PG_RETURN_VOID(); +} diff --git a/src/test/modules/test_autovacuum/test_autovacuum.control b/src/test/modules/test_autovacuum/test_autovacuum.control new file mode 100644 index 00000000000..1b7fad258f0 --- /dev/null +++ b/src/test/modules/test_autovacuum/test_autovacuum.control @@ -0,0 +1,3 @@ +comment = 'Test code for parallel autovacuum' +default_version = '1.0' +module_pathname = '$libdir/test_autovacuum' -- 2.43.0
From 14abdef918a73e465900f758204de19982fc4224 Mon Sep 17 00:00:00 2001 From: Daniil Davidov <[email protected]> Date: Wed, 7 Jan 2026 16:03:20 +0700 Subject: [PATCH v18 5/5] Cost-based parameters propagation for parallel autovacuum --- src/backend/commands/vacuum.c | 26 +++- src/backend/commands/vacuumparallel.c | 130 ++++++++++++++++++ src/include/commands/vacuum.h | 2 + src/test/modules/test_autovacuum/Makefile | 2 + .../modules/test_autovacuum/t/001_basic.pl | 83 +++++++++++ .../test_autovacuum/test_autovacuum--1.0.sql | 12 ++ .../modules/test_autovacuum/test_autovacuum.c | 75 ++++++++++ 7 files changed, 328 insertions(+), 2 deletions(-) diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index aa4fbec143f..4c40a36523a 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -2430,8 +2430,24 @@ vacuum_delay_point(bool is_analyze) /* Always check for interrupts */ CHECK_FOR_INTERRUPTS(); - if (InterruptPending || - (!VacuumCostActive && !ConfigReloadPending)) + if (InterruptPending) + return; + + if (!AmAutoVacuumWorkerProcess()) + { + /* + * If we are autovacuum parallel worker, check whether cost-based + * parameters had changed in leader worker. + * If so, vacuum_cost_delay and vacuum_cost_limit will be set to the + * values which leader worker is operating on. + * + * Do it before checking VacuumCostActive, because its value might be + * changed after leader's parameters consumption. + */ + parallel_vacuum_fix_cost_based_params(); + } + + if (!VacuumCostActive && !ConfigReloadPending) return; /* @@ -2445,6 +2461,12 @@ vacuum_delay_point(bool is_analyze) ConfigReloadPending = false; ProcessConfigFile(PGC_SIGHUP); VacuumUpdateCosts(); + + /* + * If we are parallel autovacuum leader and some of cost-based + * parameters had changed, let other parallel workers know. + */ + parallel_vacuum_propagate_cost_based_params(); } /* diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c index c2f0a37eef2..06ecffeec42 100644 --- a/src/backend/commands/vacuumparallel.c +++ b/src/backend/commands/vacuumparallel.c @@ -54,6 +54,22 @@ #define PARALLEL_VACUUM_KEY_WAL_USAGE 4 #define PARALLEL_VACUUM_KEY_INDEX_STATS 5 +/* + * Only autovacuum leader can reload config file. We use this structure in + * parallel autovacuum for keeping worker's parameters in sync with leader's + * parameters. + */ +typedef struct PVSharedCostParams +{ + slock_t spinlock; /* protects all fields below */ + + /* Copies of corresponding parameters from autovacuum leader process */ + double cost_delay; + int cost_limit; +} PVSharedCostParams; + +static PVSharedCostParams *pv_shared_cost_params = NULL; + /* * Shared information among parallel workers. So this is allocated in the DSM * segment. @@ -123,6 +139,18 @@ typedef struct PVShared /* Statistics of shared dead items */ VacDeadItemsInfo dead_items_info; + + /* + * If 'true' then we are running parallel autovacuum. Otherwise, we are + * running parallel maintenence VACUUM. + */ + bool am_parallel_autovacuum; + + /* + * Struct for syncing parameters between supportive parallel autovacuum + * workers with leader worker. + */ + PVSharedCostParams cost_params; } PVShared; /* Status used during parallel index vacuum or cleanup */ @@ -396,6 +424,17 @@ parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, pg_atomic_init_u32(&(shared->active_nworkers), 0); pg_atomic_init_u32(&(shared->idx), 0); + shared->am_parallel_autovacuum = AmAutoVacuumWorkerProcess(); + + if (shared->am_parallel_autovacuum) + { + shared->cost_params.cost_delay = vacuum_cost_delay; + shared->cost_params.cost_limit = vacuum_cost_limit; + SpinLockInit(&shared->cost_params.spinlock); + + pv_shared_cost_params = &(shared->cost_params); + } + shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_SHARED, shared); pvs->shared = shared; @@ -538,6 +577,53 @@ parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tup parallel_vacuum_process_all_indexes(pvs, num_index_scans, false, wusage); } +/* + * Function to be called from parallel autovacuum worker in order to sync + * some cost-based delay parameter with the leader worker. + */ +bool +parallel_vacuum_fix_cost_based_params(void) +{ + /* Check whether we are running parallel autovacuum */ + if (pv_shared_cost_params == NULL) + return false; + + Assert(IsParallelWorker() && !AmAutoVacuumWorkerProcess()); + + SpinLockAcquire(&pv_shared_cost_params->spinlock); + + vacuum_cost_delay = pv_shared_cost_params->cost_delay; + vacuum_cost_limit = pv_shared_cost_params->cost_limit; + + SpinLockRelease(&pv_shared_cost_params->spinlock); + + if (vacuum_cost_delay > 0 && !VacuumFailsafeActive) + VacuumCostActive = true; + + return true; +} + +/* + * Function to be called from parallel autovacuum leader in order to propagate + * some cost-based parameters to the supportive workers. + */ +void +parallel_vacuum_propagate_cost_based_params(void) +{ + /* Check whether we are running parallel autovacuum */ + if (pv_shared_cost_params == NULL) + return; + + Assert(AmAutoVacuumWorkerProcess()); + + SpinLockAcquire(&pv_shared_cost_params->spinlock); + + pv_shared_cost_params->cost_delay = vacuum_cost_delay; + pv_shared_cost_params->cost_limit = vacuum_cost_limit; + + SpinLockRelease(&pv_shared_cost_params->spinlock); +} + /* * Compute the number of parallel worker processes to request. Both index * vacuum and index cleanup can be executed with parallel workers. @@ -763,12 +849,26 @@ parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scan /* Vacuum the indexes that can be processed by only leader process */ parallel_vacuum_process_unsafe_indexes(pvs); + /* + * To be able to exercise whether leader parallel autovacuum worker can + * propagate cost-based params to parallel workers, wait here until + * configuration is changed... + */ + INJECTION_POINT("av-leader-before-reload-conf", NULL); + /* * Join as a parallel worker. The leader vacuums alone processes all * parallel-safe indexes in the case where no workers are launched. */ parallel_vacuum_process_safe_indexes(pvs); + /* + * ...and then wait until leader guaranteed to propagate new parameters + * values to the workers. I.e. tests are expecting, that during processing + * of parallel safe index we have called vacuum_delay_point, + */ + INJECTION_POINT("av-leader-after-reload-conf", NULL); + /* * Next, accumulate buffer and WAL usage. (This must wait for the workers * to finish, or we might get incomplete data.) @@ -1104,6 +1204,9 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc) VacuumSharedCostBalance = &(shared->cost_balance); VacuumActiveNWorkers = &(shared->active_nworkers); + if (shared->am_parallel_autovacuum) + pv_shared_cost_params = &(shared->cost_params); + /* Set parallel vacuum state */ pvs.indrels = indrels; pvs.nindexes = nindexes; @@ -1131,6 +1234,33 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc) /* Prepare to track buffer usage during parallel execution */ InstrStartParallelQuery(); +#ifdef USE_INJECTION_POINTS + if (shared->am_parallel_autovacuum) + { + Assert(VacuumActiveNWorkers != NULL); + + /* + * To be able to exercise whether leader parallel autovacuum worker can + * propagate cost-based params to parallel workers, wait here until + * configuration is changed and leader workers had updated shared state. + */ + INJECTION_POINT("av-worker-before-reload-conf", NULL); + + /* Simulate config reload during normal processing */ + pg_atomic_add_fetch_u32(VacuumActiveNWorkers, 1); + vacuum_delay_point(false); + pg_atomic_sub_fetch_u32(VacuumActiveNWorkers, 1); + + /* + * Wait until worker guaranteed to consume new parameters values from + * the leader and save new value in injection point state. + */ + INJECTION_POINT("autovacuum-set-cost-based-parameter", + &vacuum_cost_delay); + INJECTION_POINT("av-worker-after-reload-conf", NULL); + } +#endif + /* Process indexes to perform vacuum/cleanup */ parallel_vacuum_process_safe_indexes(&pvs); diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h index ec5d70aacdc..73125439bed 100644 --- a/src/include/commands/vacuum.h +++ b/src/include/commands/vacuum.h @@ -411,6 +411,8 @@ extern void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, int num_index_scans, bool estimated_count, PVWorkersUsage *wusage); +extern bool parallel_vacuum_fix_cost_based_params(void); +extern void parallel_vacuum_propagate_cost_based_params(void); extern void parallel_vacuum_main(dsm_segment *seg, shm_toc *toc); /* in commands/analyze.c */ diff --git a/src/test/modules/test_autovacuum/Makefile b/src/test/modules/test_autovacuum/Makefile index 4cf7344b2ac..32254c53a5d 100644 --- a/src/test/modules/test_autovacuum/Makefile +++ b/src/test/modules/test_autovacuum/Makefile @@ -12,6 +12,8 @@ DATA = test_autovacuum--1.0.sql TAP_TESTS = 1 +EXTRA_INSTALL = src/test/modules/injection_points + export enable_injection_points ifdef USE_PGXS diff --git a/src/test/modules/test_autovacuum/t/001_basic.pl b/src/test/modules/test_autovacuum/t/001_basic.pl index 8bf153d132c..eec0f41b6a6 100644 --- a/src/test/modules/test_autovacuum/t/001_basic.pl +++ b/src/test/modules/test_autovacuum/t/001_basic.pl @@ -28,6 +28,11 @@ $node->append_conf('postgresql.conf', qq{ }); $node->start; +if (!$node->check_extension('injection_points')) +{ + plan skip_all => 'Extension injection_points not installed'; +} + my $indexes_num = 4; my $initial_rows_num = 10_000; my $autovacuum_parallel_workers = 2; @@ -73,6 +78,9 @@ $node->safe_psql('postgres', qq{ CREATE EXTENSION test_autovacuum; SELECT inj_set_free_workers_attach(); SELECT inj_leader_failure_attach(); + SELECT inj_check_av_param_attach(); + + CREATE EXTENSION injection_points; }); # Test 1 : @@ -166,5 +174,80 @@ $node->safe_psql('postgres', qq{ SELECT inj_leader_failure_detach(); }); +# Test 4: +# Check whether parallel autovacuum leader can propagate cost-based parameters +# to parallel workers. + +# Disable autovacuum on table during preparation for the next test +$node->safe_psql('postgres', qq{ + ALTER TABLE test_autovac SET (autovacuum_enabled = false); +}); + +# Create more dead tuples +$node->safe_psql('postgres', qq{ + UPDATE test_autovac SET col_3 = 0 WHERE (col_4 % 3) = 0; + ANALYZE test_autovac; +}); + +$node->safe_psql('postgres', qq{ + SELECT injection_points_attach('av-leader-before-reload-conf', 'wait'); + SELECT injection_points_attach('av-leader-after-reload-conf', 'wait'); + SELECT injection_points_attach('av-worker-before-reload-conf', 'wait'); + SELECT injection_points_attach('av-worker-after-reload-conf', 'wait'); +}); + +$node->safe_psql('postgres', qq{ + ALTER TABLE test_autovac SET (autovacuum_enabled = true); +}); + +# Wait until leader parallel worker get to the point before vacuum_delay_point +# and change cost-based config parameter. + +$node->wait_for_event('autovacuum worker', 'av-leader-before-reload-conf'); +$node->psql('postgres', qq{ + ALTER SYSTEM SET autovacuum_vacuum_cost_delay = 10; + SELECT pg_reload_conf(); +}); +$node->psql('postgres', qq{ + SELECT injection_points_wakeup('av-leader-before-reload-conf'); +}); + +# Wait until leader worker propagates new patameter's value to the other +# workers and let them to call vacuum_delay_point + +$node->wait_for_event('autovacuum worker', 'av-leader-after-reload-conf'); +$node->safe_psql('postgres', qq{ + SELECT injection_points_wakeup('av-leader-after-reload-conf'); + SELECT injection_points_wakeup('av-worker-before-reload-conf'); +}); + +# Check whether parallel worker has consume new parameter's value from the +# leader. +# Aactually, it can happen before worker gets to the injection point, but we +# want to make everything as deterministic as possible. + +$node->wait_for_event('parallel worker', 'av-worker-after-reload-conf'); +$node->psql('postgres', + "SELECT get_parallel_autovacuum_worker_param_value('vacuum_cost_delay');", + stdout => \$psql_out, +); +is($psql_out, 10.0, 'Leader successfully propagated parameter value'); + +$node->safe_psql('postgres', qq{ + SELECT injection_points_wakeup('av-worker-after-reload-conf'); +}); + +# Cleanup +$node->safe_psql('postgres', qq{ + SELECT injection_points_detach('av-leader-before-reload-conf'); + SELECT injection_points_detach('av-leader-after-reload-conf'); + SELECT injection_points_detach('av-worker-before-reload-conf'); + SELECT injection_points_detach('av-worker-after-reload-conf'); + SELECT inj_check_av_param_detach(); + + DROP EXTENSION test_autovacuum; + DROP EXTENSION injection_points; +}); + $node->stop; done_testing(); diff --git a/src/test/modules/test_autovacuum/test_autovacuum--1.0.sql b/src/test/modules/test_autovacuum/test_autovacuum--1.0.sql index 017d5da85ea..cb0407952d7 100644 --- a/src/test/modules/test_autovacuum/test_autovacuum--1.0.sql +++ b/src/test/modules/test_autovacuum/test_autovacuum--1.0.sql @@ -14,6 +14,10 @@ CREATE FUNCTION trigger_leader_failure(failure_type text) RETURNS VOID STRICT AS 'MODULE_PATHNAME' LANGUAGE C; +CREATE FUNCTION get_parallel_autovacuum_worker_param_value(param_name text) +RETURNS FLOAT8 STRICT +AS 'MODULE_PATHNAME' LANGUAGE C; + /* * Injection point related functions */ @@ -32,3 +36,11 @@ AS 'MODULE_PATHNAME' LANGUAGE C; CREATE FUNCTION inj_leader_failure_detach() RETURNS VOID STRICT AS 'MODULE_PATHNAME' LANGUAGE C; + +CREATE FUNCTION inj_check_av_param_attach() +RETURNS VOID STRICT +AS 'MODULE_PATHNAME' LANGUAGE C; + +CREATE FUNCTION inj_check_av_param_detach() +RETURNS VOID STRICT +AS 'MODULE_PATHNAME' LANGUAGE C; diff --git a/src/test/modules/test_autovacuum/test_autovacuum.c b/src/test/modules/test_autovacuum/test_autovacuum.c index 7948f4858ae..e96cfda7ae9 100644 --- a/src/test/modules/test_autovacuum/test_autovacuum.c +++ b/src/test/modules/test_autovacuum/test_autovacuum.c @@ -38,6 +38,9 @@ typedef struct InjPointState bool enabled_leader_failure; AVLeaderFaulureType ftype; + + bool enabled_check_av_param; + double vacuum_cost_delay; } InjPointState; static InjPointState * inj_point_state; @@ -92,6 +95,12 @@ test_autovacuum_shmem_startup(void) "inj_trigger_leader_failure", NULL, 0); + + InjectionPointAttach("autovacuum-set-cost-based-parameter", + "test_autovacuum", + "inj_set_av_parameter", + NULL, + 0); } LWLockRelease(AddinShmemInitLock); @@ -109,6 +118,9 @@ _PG_init(void) shmem_startup_hook = test_autovacuum_shmem_startup; } +extern PGDLLEXPORT void inj_set_av_parameter(const char *name, + const void *private_data, + void *arg); extern PGDLLEXPORT void inj_set_free_workers(const char *name, const void *private_data, void *arg); @@ -205,6 +217,45 @@ trigger_leader_failure(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } +/* + * Set current setting of "vacuum_cost_delay" parameter. + * + * Function is called from parallel autovacuum worker. + */ +void +inj_set_av_parameter(const char *name, const void *private_data, void *arg) +{ + ereport(LOG, + errmsg("set autovacuum parameter injection point called"), + errhidestmt(true), errhidecontext(true)); + + if (inj_point_state->enabled_check_av_param) + { + Assert(arg != NULL); + inj_point_state->vacuum_cost_delay = *(double *) arg; + } +} + +PG_FUNCTION_INFO_V1(get_parallel_autovacuum_worker_param_value); +Datum +get_parallel_autovacuum_worker_param_value(PG_FUNCTION_ARGS) +{ + const char *param_name = text_to_cstring(PG_GETARG_TEXT_PP(0)); + double value = 0.0; + +#ifndef USE_INJECTION_POINTS + elog(ERROR, "injection points not supported"); +#endif + + if (strcmp(param_name, "vacuum_cost_delay") == 0) + value = inj_point_state->vacuum_cost_delay; + else + elog(ERROR, + "cannot retrieve parameter %s from injection point", param_name); + + PG_RETURN_FLOAT8((float8) value); +} + PG_FUNCTION_INFO_V1(inj_set_free_workers_attach); Datum inj_set_free_workers_attach(PG_FUNCTION_ARGS) @@ -253,3 +304,27 @@ inj_leader_failure_detach(PG_FUNCTION_ARGS) #endif PG_RETURN_VOID(); } + +PG_FUNCTION_INFO_V1(inj_check_av_param_attach); +Datum +inj_check_av_param_attach(PG_FUNCTION_ARGS) +{ +#ifdef USE_INJECTION_POINTS + inj_point_state->enabled_check_av_param = true; +#else + elog(ERROR, "injection points not supported"); +#endif + PG_RETURN_VOID(); +} + +PG_FUNCTION_INFO_V1(inj_check_av_param_detach); +Datum +inj_check_av_param_detach(PG_FUNCTION_ARGS) +{ +#ifdef USE_INJECTION_POINTS + inj_point_state->enabled_check_av_param = false; +#else + elog(ERROR, "injection points not supported"); +#endif + PG_RETURN_VOID(); +} -- 2.43.0
