Hi, On Fri, Feb 20, 2026 at 05:26:37PM +0000, Bertrand Drouvot wrote: > Hi, > > On Fri, Feb 20, 2026 at 11:02:49AM -0500, Andres Freund wrote: > > Hi, > > > > How could a user benefit from that split? To me this is pointless number > > gathering that wastes resources and confuses users. > > I was thinking that could be useful to know the distribution between "long" > waits > (greater than the deadlock timeout) among all the waits. > > If the vast majority are long waits that may indicate that the application is > misbehaving (as opposed to a tiny percentage of long waits). > > I was also thinking to bring those stats per-backend (as a next step) and that > could also probably be more useful (distribution per host for example, thanks > to > joining with pg_stat_activity).
As it seems that I'm the only one thinking that this split could be useful, I'm removing it in the attached. We can still split later on if we have requests from the field. So, we're back to what we were discussing before the split. As in v7, 0003 is adding the new GUC. So that we can see what having a new GUC implies in ProcSleep() and we can just get rid of 0003 if we think the GUC is not worth the extra complexity (I don't have a strong opinion on it but tempted to think that the extra GUC is not worth it). Regards, -- Bertrand Drouvot PostgreSQL Contributors Team RDS Open Source Databases Amazon Web Services: https://aws.amazon.com
>From bae7e839d430dbea957dea316038b9bc52688170 Mon Sep 17 00:00:00 2001 From: Bertrand Drouvot <[email protected]> Date: Tue, 29 Jul 2025 08:36:35 +0000 Subject: [PATCH v8 1/3] Add lock statistics Adding a new stat kind PGSTAT_KIND_LOCK for the lock statistics. This new statistic kind is a fixed one because its key is the lock type so that we know its size is LOCKTAG_LAST_TYPE + 1. This statistic kind records the following counters: waits wait_time fastpath_exceeded The waits and wait_time counters are incremented if log_lock_waits is on and the session waited longer than deadlock_timeout to acquire the lock. fastpath_exceeded is incremented when the lock can not be acquired via fast path because the fast path slot limit was exceeded. No extra details is added (like the ones, i.e relation oid, database oid, we can find in pg_locks). The idea is to provide an idea on what the locking behaviour looks like. XXX: Bump stat file format --- src/backend/storage/lmgr/lock.c | 58 ++++---- src/backend/storage/lmgr/proc.c | 6 + src/backend/utils/activity/Makefile | 1 + src/backend/utils/activity/meson.build | 1 + src/backend/utils/activity/pgstat.c | 18 +++ src/backend/utils/activity/pgstat_lock.c | 160 +++++++++++++++++++++++ src/include/pgstat.h | 27 ++++ src/include/utils/pgstat_internal.h | 21 +++ src/include/utils/pgstat_kind.h | 5 +- src/tools/pgindent/typedefs.list | 4 + 10 files changed, 273 insertions(+), 28 deletions(-) 28.7% src/backend/storage/lmgr/ 55.2% src/backend/utils/activity/ 7.5% src/include/utils/ 7.6% src/include/ diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index d930c66cdbd..b2d597bfd1e 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -39,6 +39,7 @@ #include "access/xlogutils.h" #include "miscadmin.h" #include "pg_trace.h" +#include "pgstat.h" #include "storage/lmgr.h" #include "storage/proc.h" #include "storage/procarray.h" @@ -984,37 +985,42 @@ LockAcquireExtended(const LOCKTAG *locktag, * lock type on a relation we have already locked using the fast-path, but * for now we don't worry about that case either. */ - if (EligibleForRelationFastPath(locktag, lockmode) && - FastPathLocalUseCounts[FAST_PATH_REL_GROUP(locktag->locktag_field2)] < FP_LOCK_SLOTS_PER_GROUP) + if (EligibleForRelationFastPath(locktag, lockmode)) { - uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode); - bool acquired; - - /* - * LWLockAcquire acts as a memory sequencing point, so it's safe to - * assume that any strong locker whose increment to - * FastPathStrongRelationLocks->counts becomes visible after we test - * it has yet to begin to transfer fast-path locks. - */ - LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE); - if (FastPathStrongRelationLocks->count[fasthashcode] != 0) - acquired = false; - else - acquired = FastPathGrantRelationLock(locktag->locktag_field2, - lockmode); - LWLockRelease(&MyProc->fpInfoLock); - if (acquired) + if (FastPathLocalUseCounts[FAST_PATH_REL_GROUP(locktag->locktag_field2)] < + FP_LOCK_SLOTS_PER_GROUP) { + uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode); + bool acquired; + /* - * The locallock might contain stale pointers to some old shared - * objects; we MUST reset these to null before considering the - * lock to be acquired via fast-path. + * LWLockAcquire acts as a memory sequencing point, so it's safe + * to assume that any strong locker whose increment to + * FastPathStrongRelationLocks->counts becomes visible after we + * test it has yet to begin to transfer fast-path locks. */ - locallock->lock = NULL; - locallock->proclock = NULL; - GrantLockLocal(locallock, owner); - return LOCKACQUIRE_OK; + LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE); + if (FastPathStrongRelationLocks->count[fasthashcode] != 0) + acquired = false; + else + acquired = FastPathGrantRelationLock(locktag->locktag_field2, + lockmode); + LWLockRelease(&MyProc->fpInfoLock); + if (acquired) + { + /* + * The locallock might contain stale pointers to some old + * shared objects; we MUST reset these to null before + * considering the lock to be acquired via fast-path. + */ + locallock->lock = NULL; + locallock->proclock = NULL; + GrantLockLocal(locallock, owner); + return LOCKACQUIRE_OK; + } } + else + pgstat_count_lock_fastpath_exceeded(locallock->tag.lock.locktag_type); } /* diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index 771b006b522..e38c8820103 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -1611,9 +1611,15 @@ ProcSleep(LOCALLOCK *locallock) "Processes holding the lock: %s. Wait queue: %s.", lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data)))); else if (myWaitStatus == PROC_WAIT_STATUS_OK) + { + /* Increment the lock statistics counters */ + pgstat_count_lock_waits(locallock->tag.lock.locktag_type, + msecs); + ereport(LOG, (errmsg("process %d acquired %s on %s after %ld.%03d ms", MyProcPid, modename, buf.data, msecs, usecs))); + } else { Assert(myWaitStatus == PROC_WAIT_STATUS_ERROR); diff --git a/src/backend/utils/activity/Makefile b/src/backend/utils/activity/Makefile index c37bfb350bb..ca3ef89bf59 100644 --- a/src/backend/utils/activity/Makefile +++ b/src/backend/utils/activity/Makefile @@ -26,6 +26,7 @@ OBJS = \ pgstat_database.o \ pgstat_function.o \ pgstat_io.o \ + pgstat_lock.o \ pgstat_relation.o \ pgstat_replslot.o \ pgstat_shmem.o \ diff --git a/src/backend/utils/activity/meson.build b/src/backend/utils/activity/meson.build index 53bd5a246ca..1aa7ece5290 100644 --- a/src/backend/utils/activity/meson.build +++ b/src/backend/utils/activity/meson.build @@ -11,6 +11,7 @@ backend_sources += files( 'pgstat_database.c', 'pgstat_function.c', 'pgstat_io.c', + 'pgstat_lock.c', 'pgstat_relation.c', 'pgstat_replslot.c', 'pgstat_shmem.c', diff --git a/src/backend/utils/activity/pgstat.c b/src/backend/utils/activity/pgstat.c index 11bb71cad5a..eb8ccbaa628 100644 --- a/src/backend/utils/activity/pgstat.c +++ b/src/backend/utils/activity/pgstat.c @@ -83,6 +83,7 @@ * - pgstat_database.c * - pgstat_function.c * - pgstat_io.c + * - pgstat_lock.c * - pgstat_relation.c * - pgstat_replslot.c * - pgstat_slru.c @@ -448,6 +449,23 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE] .snapshot_cb = pgstat_io_snapshot_cb, }, + [PGSTAT_KIND_LOCK] = { + .name = "lock", + + .fixed_amount = true, + .write_to_file = true, + + .snapshot_ctl_off = offsetof(PgStat_Snapshot, lock), + .shared_ctl_off = offsetof(PgStat_ShmemControl, lock), + .shared_data_off = offsetof(PgStatShared_Lock, stats), + .shared_data_len = sizeof(((PgStatShared_Lock *) 0)->stats), + + .flush_static_cb = pgstat_lock_flush_cb, + .init_shmem_cb = pgstat_lock_init_shmem_cb, + .reset_all_cb = pgstat_lock_reset_all_cb, + .snapshot_cb = pgstat_lock_snapshot_cb, + }, + [PGSTAT_KIND_SLRU] = { .name = "slru", diff --git a/src/backend/utils/activity/pgstat_lock.c b/src/backend/utils/activity/pgstat_lock.c new file mode 100644 index 00000000000..91a53a6013a --- /dev/null +++ b/src/backend/utils/activity/pgstat_lock.c @@ -0,0 +1,160 @@ +/* ------------------------------------------------------------------------- + * + * pgstat_lock.c + * Implementation of lock statistics. + * + * This file contains the implementation of lock statistics. It is kept separate + * from pgstat.c to enforce the line between the statistics access / storage + * implementation and the details about individual types of statistics. + * + * Copyright (c) 2021-2025, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/backend/utils/activity/pgstat_lock.c + * ------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "utils/pgstat_internal.h" + +static PgStat_PendingLock PendingLockStats; +static bool have_lockstats = false; + +/* + * Simpler wrapper of pgstat_lock_flush_cb() + */ +void +pgstat_lock_flush(bool nowait) +{ + (void) pgstat_lock_flush_cb(nowait); +} + +/* + * Flush out locally pending lock statistics + * + * If no stats have been recorded, this function returns false. + * + * If nowait is true, this function returns true if the lock could not be + * acquired. Otherwise, return false. + */ +bool +pgstat_lock_flush_cb(bool nowait) +{ + LWLock *lcktype_lock; + PgStat_LockEntry *lck_shstats; + bool lock_not_acquired = false; + + if (!have_lockstats) + return false; + + for (int i = 0; i <= LOCKTAG_LAST_TYPE; i++) + { + lcktype_lock = &pgStatLocal.shmem->lock.locks[i]; + lck_shstats = + &pgStatLocal.shmem->lock.stats.stats[i]; + + if (!nowait) + LWLockAcquire(lcktype_lock, LW_EXCLUSIVE); + else if (!LWLockConditionalAcquire(lcktype_lock, LW_EXCLUSIVE)) + { + lock_not_acquired = true; + continue; + } + +#define LOCKSTAT_ACC(fld) \ + (lck_shstats->fld += PendingLockStats.stats[i].fld) + LOCKSTAT_ACC(waits); + LOCKSTAT_ACC(wait_time); + LOCKSTAT_ACC(fastpath_exceeded); +#undef LOCKSTAT_ACC + + LWLockRelease(lcktype_lock); + } + + memset(&PendingLockStats, 0, sizeof(PendingLockStats)); + + have_lockstats = false; + + return lock_not_acquired; +} + + +void +pgstat_lock_init_shmem_cb(void *stats) +{ + PgStatShared_Lock *stat_shmem = (PgStatShared_Lock *) stats; + + for (int i = 0; i <= LOCKTAG_LAST_TYPE; i++) + LWLockInitialize(&stat_shmem->locks[i], LWTRANCHE_PGSTATS_DATA); +} + +void +pgstat_lock_reset_all_cb(TimestampTz ts) +{ + for (int i = 0; i <= LOCKTAG_LAST_TYPE; i++) + { + LWLock *lcktype_lock = &pgStatLocal.shmem->lock.locks[i]; + PgStat_LockEntry *lck_shstats = &pgStatLocal.shmem->lock.stats.stats[i]; + + LWLockAcquire(lcktype_lock, LW_EXCLUSIVE); + + /* + * Use the lock in the first lock type PgStat_LockEntry to protect the + * reset timestamp as well. + */ + if (i == 0) + pgStatLocal.shmem->lock.stats.stat_reset_timestamp = ts; + + memset(lck_shstats, 0, sizeof(*lck_shstats)); + LWLockRelease(lcktype_lock); + } +} + +void +pgstat_lock_snapshot_cb(void) +{ + for (int i = 0; i <= LOCKTAG_LAST_TYPE; i++) + { + LWLock *lcktype_lock = &pgStatLocal.shmem->lock.locks[i]; + PgStat_LockEntry *lck_shstats = &pgStatLocal.shmem->lock.stats.stats[i]; + PgStat_LockEntry *lck_snap = &pgStatLocal.snapshot.lock.stats[i]; + + LWLockAcquire(lcktype_lock, LW_SHARED); + + /* + * Use the lock in the first lock type PgStat_LockEntry to protect the + * reset timestamp as well. + */ + if (i == 0) + pgStatLocal.snapshot.lock.stat_reset_timestamp = + pgStatLocal.shmem->lock.stats.stat_reset_timestamp; + + /* using struct assignment due to better type safety */ + *lck_snap = *lck_shstats; + LWLockRelease(lcktype_lock); + } +} + +#define PGSTAT_COUNT_LOCK_FUNC(stat) \ +void \ +CppConcat(pgstat_count_lock_,stat)(uint8 locktag_type) \ +{ \ + Assert(locktag_type <= LOCKTAG_LAST_TYPE); \ + PendingLockStats.stats[locktag_type].stat++; \ + have_lockstats = true; \ + pgstat_report_fixed = true; \ +} + +/* pgstat_count_lock_fastpath_exceeded */ +PGSTAT_COUNT_LOCK_FUNC(fastpath_exceeded) + +void +pgstat_count_lock_waits(uint8 locktag_type, long msecs) +{ + Assert(locktag_type <= LOCKTAG_LAST_TYPE); + PendingLockStats.stats[locktag_type].waits++; + PendingLockStats.stats[locktag_type].wait_time += (PgStat_Counter) msecs; + have_lockstats = true; + pgstat_report_fixed = true; +} diff --git a/src/include/pgstat.h b/src/include/pgstat.h index 0e9d2b4c623..f563132bf7b 100644 --- a/src/include/pgstat.h +++ b/src/include/pgstat.h @@ -15,6 +15,7 @@ #include "portability/instr_time.h" #include "postmaster/pgarch.h" /* for MAX_XFN_CHARS */ #include "replication/conflict.h" +#include "storage/lock.h" #include "utils/backend_progress.h" /* for backward compatibility */ /* IWYU pragma: export */ #include "utils/backend_status.h" /* for backward compatibility */ /* IWYU pragma: export */ #include "utils/pgstat_kind.h" @@ -346,6 +347,24 @@ typedef struct PgStat_IO PgStat_BktypeIO stats[BACKEND_NUM_TYPES]; } PgStat_IO; +typedef struct PgStat_LockEntry +{ + PgStat_Counter waits; + PgStat_Counter wait_time; /* time in milliseconds */ + PgStat_Counter fastpath_exceeded; +} PgStat_LockEntry; + +typedef struct PgStat_PendingLock +{ + PgStat_LockEntry stats[LOCKTAG_LAST_TYPE + 1]; +} PgStat_PendingLock; + +typedef struct PgStat_Lock +{ + TimestampTz stat_reset_timestamp; + PgStat_LockEntry stats[LOCKTAG_LAST_TYPE + 1]; +} PgStat_Lock; + typedef struct PgStat_StatDBEntry { PgStat_Counter xact_commit; @@ -618,6 +637,14 @@ extern bool pgstat_tracks_io_op(BackendType bktype, IOObject io_object, IOContext io_context, IOOp io_op); +/* + * Functions in pgstat_lock.c + */ + +extern void pgstat_lock_flush(bool nowait); +extern void pgstat_count_lock_fastpath_exceeded(uint8 locktag_type); +extern void pgstat_count_lock_waits(uint8 locktag_type, long msecs); + /* * Functions in pgstat_database.c */ diff --git a/src/include/utils/pgstat_internal.h b/src/include/utils/pgstat_internal.h index 9b8fbae00ed..97704421a92 100644 --- a/src/include/utils/pgstat_internal.h +++ b/src/include/utils/pgstat_internal.h @@ -464,6 +464,16 @@ typedef struct PgStatShared_IO PgStat_IO stats; } PgStatShared_IO; +typedef struct PgStatShared_Lock +{ + /* + * locks[i] protects stats.stats[i]. locks[0] also protects + * stats.stat_reset_timestamp. + */ + LWLock locks[LOCKTAG_LAST_TYPE + 1]; + PgStat_Lock stats; +} PgStatShared_Lock; + typedef struct PgStatShared_SLRU { /* lock protects ->stats */ @@ -570,6 +580,7 @@ typedef struct PgStat_ShmemControl PgStatShared_BgWriter bgwriter; PgStatShared_Checkpointer checkpointer; PgStatShared_IO io; + PgStatShared_Lock lock; PgStatShared_SLRU slru; PgStatShared_Wal wal; @@ -602,6 +613,8 @@ typedef struct PgStat_Snapshot PgStat_IO io; + PgStat_Lock lock; + PgStat_SLRUStats slru[SLRU_NUM_ELEMENTS]; PgStat_WalStats wal; @@ -752,6 +765,14 @@ extern void pgstat_io_init_shmem_cb(void *stats); extern void pgstat_io_reset_all_cb(TimestampTz ts); extern void pgstat_io_snapshot_cb(void); +/* + * Functions in pgstat_lock.c + */ + +extern bool pgstat_lock_flush_cb(bool nowait); +extern void pgstat_lock_init_shmem_cb(void *stats); +extern void pgstat_lock_reset_all_cb(TimestampTz ts); +extern void pgstat_lock_snapshot_cb(void); /* * Functions in pgstat_relation.c diff --git a/src/include/utils/pgstat_kind.h b/src/include/utils/pgstat_kind.h index c30b6235623..2d78a029683 100644 --- a/src/include/utils/pgstat_kind.h +++ b/src/include/utils/pgstat_kind.h @@ -36,8 +36,9 @@ #define PGSTAT_KIND_BGWRITER 8 #define PGSTAT_KIND_CHECKPOINTER 9 #define PGSTAT_KIND_IO 10 -#define PGSTAT_KIND_SLRU 11 -#define PGSTAT_KIND_WAL 12 +#define PGSTAT_KIND_LOCK 11 +#define PGSTAT_KIND_SLRU 12 +#define PGSTAT_KIND_WAL 13 #define PGSTAT_KIND_BUILTIN_MIN PGSTAT_KIND_DATABASE #define PGSTAT_KIND_BUILTIN_MAX PGSTAT_KIND_WAL diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index 1a89ef94bec..b4de923667f 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -2256,6 +2256,7 @@ PgStatShared_Database PgStatShared_Function PgStatShared_HashEntry PgStatShared_IO +PgStatShared_Lock PgStatShared_Relation PgStatShared_ReplSlot PgStatShared_SLRU @@ -2278,8 +2279,11 @@ PgStat_HashKey PgStat_IO PgStat_KindInfo PgStat_LocalState +PgStat_Lock +PgStat_LockEntry PgStat_PendingDroppedStatsItem PgStat_PendingIO +PgStat_PendingLock PgStat_SLRUStats PgStat_ShmemControl PgStat_Snapshot -- 2.34.1
>From e71db2d88480f5a1a300c8c73b4428bfe5d5d877 Mon Sep 17 00:00:00 2001 From: Bertrand Drouvot <[email protected]> Date: Thu, 31 Jul 2025 09:35:31 +0000 Subject: [PATCH v8 2/3] Add the pg_stat_lock view This new view reports lock statistics. Note that it does not omit combinations which do not make sense (as pg_locks does). Also wait_time is reported as bigint as the deadlock_timeout default value is 1s. This commit also adds documentation and a few tests. XXX: Bump catversion --- doc/src/sgml/monitoring.sgml | 113 ++++++++++++++ src/backend/catalog/system_views.sql | 9 ++ src/backend/utils/activity/pgstat_lock.c | 8 + src/backend/utils/adt/pgstatfuncs.c | 39 +++++ src/include/catalog/pg_proc.dat | 9 ++ src/include/pgstat.h | 1 + src/test/isolation/expected/stats.out | 189 +++++++++++++++++++++++ src/test/isolation/expected/stats_1.out | 189 +++++++++++++++++++++++ src/test/isolation/specs/stats.spec | 95 ++++++++++++ src/test/regress/expected/rules.out | 6 + src/test/regress/expected/stats.out | 48 ++++++ src/test/regress/sql/stats.sql | 36 +++++ 12 files changed, 742 insertions(+) 15.9% doc/src/sgml/ 5.0% src/backend/utils/adt/ 54.1% src/test/isolation/expected/ 11.3% src/test/isolation/specs/ 5.7% src/test/regress/expected/ 4.0% src/test/regress/sql/ 3.6% src/ diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml index b77d189a500..3a196bc305c 100644 --- a/doc/src/sgml/monitoring.sgml +++ b/doc/src/sgml/monitoring.sgml @@ -493,6 +493,15 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser </entry> </row> + <row> + <entry><structname>pg_stat_lock</structname><indexterm><primary>pg_stat_lock</primary></indexterm></entry> + <entry> + One row for each lock type, containing cluster-wide locks statistics. + See <link linkend="monitoring-pg-stat-lock-view"> + <structname>pg_stat_lock</structname></link> for details. + </entry> + </row> + <row> <entry><structname>pg_stat_replication_slots</structname><indexterm><primary>pg_stat_replication_slots</primary></indexterm></entry> <entry>One row per replication slot, showing statistics about the @@ -3124,6 +3133,104 @@ description | Waiting for a newly initialized WAL file to reach durable storage </sect2> + + <sect2 id="monitoring-pg-stat-lock-view"> + <title><structname>pg_stat_lock</structname></title> + + <indexterm> + <primary>pg_stat_lock</primary> + </indexterm> + + <para> + The <structname>pg_stat_lock</structname> view will contain one row for each + lock type, showing cluster-wide locks statistics. + </para> + + <table id="pg-stat-lock-view" xreflabel="pg_stat_lock"> + <title><structname>pg_stat_lock</structname> View</title> + <tgroup cols="1"> + <thead> + <row> + <entry role="catalog_table_entry"> + <para role="column_definition"> + Column Type + </para> + <para> + Description + </para> + </entry> + </row> + </thead> + <tbody> + <row> + <entry role="catalog_table_entry"> + <para role="column_definition"> + <structfield>locktype</structfield> <type>text</type> + </para> + <para> + Type of the lockable object. See <link linkend="view-pg-locks"> + <structname>pg_locks</structname></link> for details. + </para> + </entry> + </row> + + <row> + <entry role="catalog_table_entry"> + <para role="column_definition"> + <structfield>waits</structfield> <type>bigint</type> + </para> + <para> + Number of times a lock of this type had to wait because of a + conflicting lock. Only incremented when <xref linkend="guc-log-lock-waits"/> + is enabled and the lock was successfully acquired after waiting longer + than <xref linkend="guc-deadlock-timeout"/>. + </para> + </entry> + </row> + + <row> + <entry role="catalog_table_entry"> + <para role="column_definition"> + <structfield>wait_time</structfield> <type>bigint</type> + </para> + <para> + Total time spent waiting for locks of this type, in milliseconds. + Only incremented when <xref linkend="guc-log-lock-waits"/> is enabled and + the lock was successfully acquired after waiting longer than + <xref linkend="guc-deadlock-timeout"/>. + </para> + </entry> + </row> + + <row> + <entry role="catalog_table_entry"> + <para role="column_definition"> + <structfield>fastpath_exceeded</structfield> <type>bigint</type> + </para> + <para> + Number of times a lock of this type could not be acquired via fast path + because the fast path slot limit was exceeded. You may want to increase + <xref linkend="guc-max-locks-per-transaction"/> if you feel this counter + is too high. + </para> + </entry> + </row> + + <row> + <entry role="catalog_table_entry"> + <para role="column_definition"> + <structfield>stats_reset</structfield> <type>timestamp with time zone</type> + </para> + <para> + Time at which these statistics were last reset. + </para> + </entry> + </row> + </tbody> + </tgroup> + </table> + </sect2> + <sect2 id="monitoring-pg-stat-bgwriter-view"> <title><structname>pg_stat_bgwriter</structname></title> @@ -5195,6 +5302,12 @@ description | Waiting for a newly initialized WAL file to reach durable storage <structname>pg_stat_io</structname> view. </para> </listitem> + <listitem> + <para> + <literal>lock</literal>: Reset all the counters shown in the + <structname>pg_stat_lock</structname> view. + </para> + </listitem> <listitem> <para> <literal>recovery_prefetch</literal>: Reset all the counters shown in diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql index 1ea8f1faa9e..22efeb713e4 100644 --- a/src/backend/catalog/system_views.sql +++ b/src/backend/catalog/system_views.sql @@ -985,6 +985,15 @@ CREATE VIEW pg_stat_slru AS s.stats_reset FROM pg_stat_get_slru() s; +CREATE VIEW pg_stat_lock AS + SELECT + l.locktype, + l.waits, + l.wait_time, + l.fastpath_exceeded, + l.stats_reset + FROM pg_stat_get_lock() l; + CREATE VIEW pg_stat_wal_receiver AS SELECT s.pid, diff --git a/src/backend/utils/activity/pgstat_lock.c b/src/backend/utils/activity/pgstat_lock.c index 91a53a6013a..954fa51c3e0 100644 --- a/src/backend/utils/activity/pgstat_lock.c +++ b/src/backend/utils/activity/pgstat_lock.c @@ -21,6 +21,14 @@ static PgStat_PendingLock PendingLockStats; static bool have_lockstats = false; +PgStat_Lock * +pgstat_fetch_stat_lock(void) +{ + pgstat_snapshot_fixed(PGSTAT_KIND_LOCK); + + return &pgStatLocal.snapshot.lock; +} + /* * Simpler wrapper of pgstat_lock_flush_cb() */ diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c index b1df96e7b0b..27ca2d8f07c 100644 --- a/src/backend/utils/adt/pgstatfuncs.c +++ b/src/backend/utils/adt/pgstatfuncs.c @@ -1737,6 +1737,42 @@ pg_stat_get_wal(PG_FUNCTION_ARGS) wal_stats->stat_reset_timestamp)); } +Datum +pg_stat_get_lock(PG_FUNCTION_ARGS) +{ +#define PG_STAT_LOCK_COLS 5 + ReturnSetInfo *rsinfo; + PgStat_Lock *lock_stats; + + InitMaterializedSRF(fcinfo, 0); + rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + + lock_stats = pgstat_fetch_stat_lock(); + + for (int lcktype = 0; lcktype <= LOCKTAG_LAST_TYPE; lcktype++) + { + const char *locktypename; + Datum values[PG_STAT_LOCK_COLS] = {0}; + bool nulls[PG_STAT_LOCK_COLS] = {0}; + PgStat_LockEntry *lck_stats = &lock_stats->stats[lcktype]; + int i = 0; + + locktypename = LockTagTypeNames[lcktype]; + + values[i++] = CStringGetTextDatum(locktypename); + values[i++] = Int64GetDatum(lck_stats->waits); + values[i++] = Int64GetDatum(lck_stats->wait_time); + values[i++] = Int64GetDatum(lck_stats->fastpath_exceeded); + values[i] = TimestampTzGetDatum(lock_stats->stat_reset_timestamp); + + Assert(i + 1 == PG_STAT_LOCK_COLS); + + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls); + } + + return (Datum) 0; +} + /* * Returns statistics of SLRU caches. */ @@ -1921,6 +1957,7 @@ pg_stat_reset_shared(PG_FUNCTION_ARGS) pgstat_reset_of_kind(PGSTAT_KIND_BGWRITER); pgstat_reset_of_kind(PGSTAT_KIND_CHECKPOINTER); pgstat_reset_of_kind(PGSTAT_KIND_IO); + pgstat_reset_of_kind(PGSTAT_KIND_LOCK); XLogPrefetchResetStats(); pgstat_reset_of_kind(PGSTAT_KIND_SLRU); pgstat_reset_of_kind(PGSTAT_KIND_WAL); @@ -1938,6 +1975,8 @@ pg_stat_reset_shared(PG_FUNCTION_ARGS) pgstat_reset_of_kind(PGSTAT_KIND_CHECKPOINTER); else if (strcmp(target, "io") == 0) pgstat_reset_of_kind(PGSTAT_KIND_IO); + else if (strcmp(target, "lock") == 0) + pgstat_reset_of_kind(PGSTAT_KIND_LOCK); else if (strcmp(target, "recovery_prefetch") == 0) XLogPrefetchResetStats(); else if (strcmp(target, "slru") == 0) diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat index dac40992cbc..daeb45704c6 100644 --- a/src/include/catalog/pg_proc.dat +++ b/src/include/catalog/pg_proc.dat @@ -6027,6 +6027,15 @@ proargnames => '{backend_type,object,context,reads,read_bytes,read_time,writes,write_bytes,write_time,writebacks,writeback_time,extends,extend_bytes,extend_time,hits,evictions,reuses,fsyncs,fsync_time,stats_reset}', prosrc => 'pg_stat_get_io' }, +{ oid => '9375', descr => 'statistics: per lock type statistics', + proname => 'pg_stat_get_lock', prorows => '10', proretset => 't', + provolatile => 'v', proparallel => 'r', prorettype => 'record', + proargtypes => '', + proallargtypes => '{text,int8,int8,int8,timestamptz}', + proargmodes => '{o,o,o,o,o}', + proargnames => '{locktype,waits,wait_time,fastpath_exceeded,stats_reset}', + prosrc => 'pg_stat_get_lock' }, + { oid => '6386', descr => 'statistics: backend IO statistics', proname => 'pg_stat_get_backend_io', prorows => '5', proretset => 't', provolatile => 'v', proparallel => 'r', prorettype => 'record', diff --git a/src/include/pgstat.h b/src/include/pgstat.h index f563132bf7b..1c261e2c7a8 100644 --- a/src/include/pgstat.h +++ b/src/include/pgstat.h @@ -644,6 +644,7 @@ extern bool pgstat_tracks_io_op(BackendType bktype, IOObject io_object, extern void pgstat_lock_flush(bool nowait); extern void pgstat_count_lock_fastpath_exceeded(uint8 locktag_type); extern void pgstat_count_lock_waits(uint8 locktag_type, long msecs); +extern PgStat_Lock *pgstat_fetch_stat_lock(void); /* * Functions in pgstat_database.c diff --git a/src/test/isolation/expected/stats.out b/src/test/isolation/expected/stats.out index cfad309ccf3..3cae3052e40 100644 --- a/src/test/isolation/expected/stats.out +++ b/src/test/isolation/expected/stats.out @@ -3751,3 +3751,192 @@ test_stat_func| 1|t |t (1 row) step s1_commit: COMMIT; + +starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_log_lock_waits s2_set_deadlock_timeout s2_set_log_lock_waits s1_begin s1_lock_relation s2_begin s2_ff s2_lock_relation s1_sleep s1_commit s2_commit s2_report_stat_lock_relation +pg_stat_force_next_flush +------------------------ + +(1 row) + +step s1_set_deadlock_timeout: SET deadlock_timeout = '10ms'; +step s1_reset_stat_lock: SELECT pg_stat_reset_shared('lock'); +pg_stat_reset_shared +-------------------- + +(1 row) + +step s1_set_log_lock_waits: SET log_lock_waits = on; +step s2_set_deadlock_timeout: SET deadlock_timeout = '10ms'; +step s2_set_log_lock_waits: SET log_lock_waits = on; +step s1_begin: BEGIN; +step s1_lock_relation: LOCK TABLE test_stat_tab; +step s2_begin: BEGIN; +step s2_ff: SELECT pg_stat_force_next_flush(); +pg_stat_force_next_flush +------------------------ + +(1 row) + +step s2_lock_relation: LOCK TABLE test_stat_tab; <waiting ...> +step s1_sleep: SELECT pg_sleep(0.5); +pg_sleep +-------- + +(1 row) + +step s1_commit: COMMIT; +step s2_lock_relation: <... completed> +step s2_commit: COMMIT; +step s2_report_stat_lock_relation: SELECT waits > 0, wait_time > 500 FROM pg_stat_lock WHERE locktype = 'relation'; +?column?|?column? +--------+-------- +t |t +(1 row) + + +starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_log_lock_waits s2_set_deadlock_timeout s2_set_log_lock_waits s1_table_insert s1_begin s1_table_update_k1 s2_begin s2_ff s2_table_update_k1 s1_sleep s1_commit s2_commit s2_report_stat_lock_transactionid +pg_stat_force_next_flush +------------------------ + +(1 row) + +step s1_set_deadlock_timeout: SET deadlock_timeout = '10ms'; +step s1_reset_stat_lock: SELECT pg_stat_reset_shared('lock'); +pg_stat_reset_shared +-------------------- + +(1 row) + +step s1_set_log_lock_waits: SET log_lock_waits = on; +step s2_set_deadlock_timeout: SET deadlock_timeout = '10ms'; +step s2_set_log_lock_waits: SET log_lock_waits = on; +step s1_table_insert: INSERT INTO test_stat_tab(key, value) VALUES('k1', 1), ('k2', 1), ('k3', 1); +step s1_begin: BEGIN; +step s1_table_update_k1: UPDATE test_stat_tab SET value = value + 1 WHERE key = 'k1'; +step s2_begin: BEGIN; +step s2_ff: SELECT pg_stat_force_next_flush(); +pg_stat_force_next_flush +------------------------ + +(1 row) + +step s2_table_update_k1: UPDATE test_stat_tab SET value = value + 1 WHERE key = 'k1'; <waiting ...> +step s1_sleep: SELECT pg_sleep(0.5); +pg_sleep +-------- + +(1 row) + +step s1_commit: COMMIT; +step s2_table_update_k1: <... completed> +step s2_commit: COMMIT; +step s2_report_stat_lock_transactionid: SELECT waits > 0, wait_time > 500 FROM pg_stat_lock WHERE locktype = 'transactionid'; +?column?|?column? +--------+-------- +t |t +(1 row) + + +starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_log_lock_waits s2_set_deadlock_timeout s2_set_log_lock_waits s1_lock_advisory_lock s2_begin s2_ff s2_lock_advisory_lock s1_sleep s1_lock_advisory_unlock s2_lock_advisory_unlock s2_commit s2_report_stat_lock_advisory +pg_stat_force_next_flush +------------------------ + +(1 row) + +step s1_set_deadlock_timeout: SET deadlock_timeout = '10ms'; +step s1_reset_stat_lock: SELECT pg_stat_reset_shared('lock'); +pg_stat_reset_shared +-------------------- + +(1 row) + +step s1_set_log_lock_waits: SET log_lock_waits = on; +step s2_set_deadlock_timeout: SET deadlock_timeout = '10ms'; +step s2_set_log_lock_waits: SET log_lock_waits = on; +step s1_lock_advisory_lock: SELECT pg_advisory_lock(1); +pg_advisory_lock +---------------- + +(1 row) + +step s2_begin: BEGIN; +step s2_ff: SELECT pg_stat_force_next_flush(); +pg_stat_force_next_flush +------------------------ + +(1 row) + +step s2_lock_advisory_lock: SELECT pg_advisory_lock(1); <waiting ...> +step s1_sleep: SELECT pg_sleep(0.5); +pg_sleep +-------- + +(1 row) + +step s1_lock_advisory_unlock: SELECT pg_advisory_unlock(1); +pg_advisory_unlock +------------------ +t +(1 row) + +step s2_lock_advisory_lock: <... completed> +pg_advisory_lock +---------------- + +(1 row) + +step s2_lock_advisory_unlock: SELECT pg_advisory_unlock(1); +pg_advisory_unlock +------------------ +t +(1 row) + +step s2_commit: COMMIT; +step s2_report_stat_lock_advisory: SELECT waits > 0, wait_time > 500 FROM pg_stat_lock WHERE locktype = 'advisory'; +?column?|?column? +--------+-------- +t |t +(1 row) + + +starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_log_lock_waits s2_set_deadlock_timeout s2_unset_log_lock_waits s1_begin s1_lock_relation s2_begin s2_ff s2_lock_relation s1_sleep s1_commit s2_commit s2_report_stat_lock_relation +pg_stat_force_next_flush +------------------------ + +(1 row) + +step s1_set_deadlock_timeout: SET deadlock_timeout = '10ms'; +step s1_reset_stat_lock: SELECT pg_stat_reset_shared('lock'); +pg_stat_reset_shared +-------------------- + +(1 row) + +step s1_set_log_lock_waits: SET log_lock_waits = on; +step s2_set_deadlock_timeout: SET deadlock_timeout = '10ms'; +step s2_unset_log_lock_waits: SET log_lock_waits = off; +step s1_begin: BEGIN; +step s1_lock_relation: LOCK TABLE test_stat_tab; +step s2_begin: BEGIN; +step s2_ff: SELECT pg_stat_force_next_flush(); +pg_stat_force_next_flush +------------------------ + +(1 row) + +step s2_lock_relation: LOCK TABLE test_stat_tab; <waiting ...> +step s1_sleep: SELECT pg_sleep(0.5); +pg_sleep +-------- + +(1 row) + +step s1_commit: COMMIT; +step s2_lock_relation: <... completed> +step s2_commit: COMMIT; +step s2_report_stat_lock_relation: SELECT waits > 0, wait_time > 500 FROM pg_stat_lock WHERE locktype = 'relation'; +?column?|?column? +--------+-------- +f |f +(1 row) + diff --git a/src/test/isolation/expected/stats_1.out b/src/test/isolation/expected/stats_1.out index e1d937784cb..ea4fd97a9a5 100644 --- a/src/test/isolation/expected/stats_1.out +++ b/src/test/isolation/expected/stats_1.out @@ -3775,3 +3775,192 @@ test_stat_func| 1|t |t (1 row) step s1_commit: COMMIT; + +starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_log_lock_waits s2_set_deadlock_timeout s2_set_log_lock_waits s1_begin s1_lock_relation s2_begin s2_ff s2_lock_relation s1_sleep s1_commit s2_commit s2_report_stat_lock_relation +pg_stat_force_next_flush +------------------------ + +(1 row) + +step s1_set_deadlock_timeout: SET deadlock_timeout = '10ms'; +step s1_reset_stat_lock: SELECT pg_stat_reset_shared('lock'); +pg_stat_reset_shared +-------------------- + +(1 row) + +step s1_set_log_lock_waits: SET log_lock_waits = on; +step s2_set_deadlock_timeout: SET deadlock_timeout = '10ms'; +step s2_set_log_lock_waits: SET log_lock_waits = on; +step s1_begin: BEGIN; +step s1_lock_relation: LOCK TABLE test_stat_tab; +step s2_begin: BEGIN; +step s2_ff: SELECT pg_stat_force_next_flush(); +pg_stat_force_next_flush +------------------------ + +(1 row) + +step s2_lock_relation: LOCK TABLE test_stat_tab; <waiting ...> +step s1_sleep: SELECT pg_sleep(0.5); +pg_sleep +-------- + +(1 row) + +step s1_commit: COMMIT; +step s2_lock_relation: <... completed> +step s2_commit: COMMIT; +step s2_report_stat_lock_relation: SELECT waits > 0, wait_time > 500 FROM pg_stat_lock WHERE locktype = 'relation'; +?column?|?column? +--------+-------- +t |t +(1 row) + + +starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_log_lock_waits s2_set_deadlock_timeout s2_set_log_lock_waits s1_table_insert s1_begin s1_table_update_k1 s2_begin s2_ff s2_table_update_k1 s1_sleep s1_commit s2_commit s2_report_stat_lock_transactionid +pg_stat_force_next_flush +------------------------ + +(1 row) + +step s1_set_deadlock_timeout: SET deadlock_timeout = '10ms'; +step s1_reset_stat_lock: SELECT pg_stat_reset_shared('lock'); +pg_stat_reset_shared +-------------------- + +(1 row) + +step s1_set_log_lock_waits: SET log_lock_waits = on; +step s2_set_deadlock_timeout: SET deadlock_timeout = '10ms'; +step s2_set_log_lock_waits: SET log_lock_waits = on; +step s1_table_insert: INSERT INTO test_stat_tab(key, value) VALUES('k1', 1), ('k2', 1), ('k3', 1); +step s1_begin: BEGIN; +step s1_table_update_k1: UPDATE test_stat_tab SET value = value + 1 WHERE key = 'k1'; +step s2_begin: BEGIN; +step s2_ff: SELECT pg_stat_force_next_flush(); +pg_stat_force_next_flush +------------------------ + +(1 row) + +step s2_table_update_k1: UPDATE test_stat_tab SET value = value + 1 WHERE key = 'k1'; <waiting ...> +step s1_sleep: SELECT pg_sleep(0.5); +pg_sleep +-------- + +(1 row) + +step s1_commit: COMMIT; +step s2_table_update_k1: <... completed> +step s2_commit: COMMIT; +step s2_report_stat_lock_transactionid: SELECT waits > 0, wait_time > 500 FROM pg_stat_lock WHERE locktype = 'transactionid'; +?column?|?column? +--------+-------- +t |t +(1 row) + + +starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_log_lock_waits s2_set_deadlock_timeout s2_set_log_lock_waits s1_lock_advisory_lock s2_begin s2_ff s2_lock_advisory_lock s1_sleep s1_lock_advisory_unlock s2_lock_advisory_unlock s2_commit s2_report_stat_lock_advisory +pg_stat_force_next_flush +------------------------ + +(1 row) + +step s1_set_deadlock_timeout: SET deadlock_timeout = '10ms'; +step s1_reset_stat_lock: SELECT pg_stat_reset_shared('lock'); +pg_stat_reset_shared +-------------------- + +(1 row) + +step s1_set_log_lock_waits: SET log_lock_waits = on; +step s2_set_deadlock_timeout: SET deadlock_timeout = '10ms'; +step s2_set_log_lock_waits: SET log_lock_waits = on; +step s1_lock_advisory_lock: SELECT pg_advisory_lock(1); +pg_advisory_lock +---------------- + +(1 row) + +step s2_begin: BEGIN; +step s2_ff: SELECT pg_stat_force_next_flush(); +pg_stat_force_next_flush +------------------------ + +(1 row) + +step s2_lock_advisory_lock: SELECT pg_advisory_lock(1); <waiting ...> +step s1_sleep: SELECT pg_sleep(0.5); +pg_sleep +-------- + +(1 row) + +step s1_lock_advisory_unlock: SELECT pg_advisory_unlock(1); +pg_advisory_unlock +------------------ +t +(1 row) + +step s2_lock_advisory_lock: <... completed> +pg_advisory_lock +---------------- + +(1 row) + +step s2_lock_advisory_unlock: SELECT pg_advisory_unlock(1); +pg_advisory_unlock +------------------ +t +(1 row) + +step s2_commit: COMMIT; +step s2_report_stat_lock_advisory: SELECT waits > 0, wait_time > 500 FROM pg_stat_lock WHERE locktype = 'advisory'; +?column?|?column? +--------+-------- +t |t +(1 row) + + +starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_log_lock_waits s2_set_deadlock_timeout s2_unset_log_lock_waits s1_begin s1_lock_relation s2_begin s2_ff s2_lock_relation s1_sleep s1_commit s2_commit s2_report_stat_lock_relation +pg_stat_force_next_flush +------------------------ + +(1 row) + +step s1_set_deadlock_timeout: SET deadlock_timeout = '10ms'; +step s1_reset_stat_lock: SELECT pg_stat_reset_shared('lock'); +pg_stat_reset_shared +-------------------- + +(1 row) + +step s1_set_log_lock_waits: SET log_lock_waits = on; +step s2_set_deadlock_timeout: SET deadlock_timeout = '10ms'; +step s2_unset_log_lock_waits: SET log_lock_waits = off; +step s1_begin: BEGIN; +step s1_lock_relation: LOCK TABLE test_stat_tab; +step s2_begin: BEGIN; +step s2_ff: SELECT pg_stat_force_next_flush(); +pg_stat_force_next_flush +------------------------ + +(1 row) + +step s2_lock_relation: LOCK TABLE test_stat_tab; <waiting ...> +step s1_sleep: SELECT pg_sleep(0.5); +pg_sleep +-------- + +(1 row) + +step s1_commit: COMMIT; +step s2_lock_relation: <... completed> +step s2_commit: COMMIT; +step s2_report_stat_lock_relation: SELECT waits > 0, wait_time > 500 FROM pg_stat_lock WHERE locktype = 'relation'; +?column?|?column? +--------+-------- +f |f +(1 row) + diff --git a/src/test/isolation/specs/stats.spec b/src/test/isolation/specs/stats.spec index da16710da0f..42be68c545f 100644 --- a/src/test/isolation/specs/stats.spec +++ b/src/test/isolation/specs/stats.spec @@ -130,6 +130,14 @@ step s1_slru_check_stats { WHERE before.stat = 'blks_zeroed'; } +# Lock stats steps +step s1_set_deadlock_timeout { SET deadlock_timeout = '10ms'; } +step s1_set_log_lock_waits { SET log_lock_waits = on; } +step s1_reset_stat_lock { SELECT pg_stat_reset_shared('lock'); } +step s1_sleep { SELECT pg_sleep(0.5); } +step s1_lock_relation { LOCK TABLE test_stat_tab; } +step s1_lock_advisory_lock { SELECT pg_advisory_lock(1); } +step s1_lock_advisory_unlock { SELECT pg_advisory_unlock(1); } session s2 setup { SET stats_fetch_consistency = 'none'; } @@ -164,6 +172,16 @@ step s2_big_notify { SELECT pg_notify('stats_test_use', repeat(i::text, current_setting('block_size')::int / 2)) FROM generate_series(1, 3) g(i); } +# Lock stats steps +step s2_set_deadlock_timeout { SET deadlock_timeout = '10ms'; } +step s2_set_log_lock_waits { SET log_lock_waits = on; } +step s2_unset_log_lock_waits { SET log_lock_waits = off; } +step s2_report_stat_lock_relation { SELECT waits > 0, wait_time > 500 FROM pg_stat_lock WHERE locktype = 'relation'; } +step s2_report_stat_lock_transactionid { SELECT waits > 0, wait_time > 500 FROM pg_stat_lock WHERE locktype = 'transactionid'; } +step s2_report_stat_lock_advisory { SELECT waits > 0, wait_time > 500 FROM pg_stat_lock WHERE locktype = 'advisory'; } +step s2_lock_relation { LOCK TABLE test_stat_tab; } +step s2_lock_advisory_lock { SELECT pg_advisory_lock(1); } +step s2_lock_advisory_unlock { SELECT pg_advisory_unlock(1); } ###################### # Function stats tests @@ -765,3 +783,80 @@ permutation s1_clear_snapshot s1_func_stats s1_commit + +###################### +# Lock stats tests +###################### + +# relation lock + +permutation + s1_set_deadlock_timeout + s1_reset_stat_lock + s1_set_log_lock_waits + s2_set_deadlock_timeout + s2_set_log_lock_waits + s1_begin + s1_lock_relation + s2_begin + s2_ff + s2_lock_relation + s1_sleep + s1_commit + s2_commit + s2_report_stat_lock_relation + +# transaction lock + +permutation + s1_set_deadlock_timeout + s1_reset_stat_lock + s1_set_log_lock_waits + s2_set_deadlock_timeout + s2_set_log_lock_waits + s1_table_insert + s1_begin + s1_table_update_k1 + s2_begin + s2_ff + s2_table_update_k1 + s1_sleep + s1_commit + s2_commit + s2_report_stat_lock_transactionid + +# advisory lock + +permutation + s1_set_deadlock_timeout + s1_reset_stat_lock + s1_set_log_lock_waits + s2_set_deadlock_timeout + s2_set_log_lock_waits + s1_lock_advisory_lock + s2_begin + s2_ff + s2_lock_advisory_lock + s1_sleep + s1_lock_advisory_unlock + s2_lock_advisory_unlock + s2_commit + s2_report_stat_lock_advisory + +# Ensure log_lock_waits behaves correctly + +permutation + s1_set_deadlock_timeout + s1_reset_stat_lock + s1_set_log_lock_waits + s2_set_deadlock_timeout + s2_unset_log_lock_waits + s1_begin + s1_lock_relation + s2_begin + s2_ff + s2_lock_relation + s1_sleep + s1_commit + s2_commit + s2_report_stat_lock_relation diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out index 78a37d9fc8f..c013fbebb88 100644 --- a/src/test/regress/expected/rules.out +++ b/src/test/regress/expected/rules.out @@ -1951,6 +1951,12 @@ pg_stat_io| SELECT backend_type, fsync_time, stats_reset FROM pg_stat_get_io() b(backend_type, object, context, reads, read_bytes, read_time, writes, write_bytes, write_time, writebacks, writeback_time, extends, extend_bytes, extend_time, hits, evictions, reuses, fsyncs, fsync_time, stats_reset); +pg_stat_lock| SELECT locktype, + waits, + wait_time, + fastpath_exceeded, + stats_reset + FROM pg_stat_get_lock() l(locktype, waits, wait_time, fastpath_exceeded, stats_reset); pg_stat_progress_analyze| SELECT s.pid, s.datid, d.datname, diff --git a/src/test/regress/expected/stats.out b/src/test/regress/expected/stats.out index cd00f35bf7a..93de87747d4 100644 --- a/src/test/regress/expected/stats.out +++ b/src/test/regress/expected/stats.out @@ -1910,4 +1910,52 @@ SELECT * FROM check_estimated_rows('SELECT * FROM table_fillfactor'); (1 row) DROP TABLE table_fillfactor; +-- Test fastpath_exceeded stat +CREATE TABLE part_test (id int) PARTITION BY RANGE (id); +SELECT pg_stat_reset_shared('lock'); + pg_stat_reset_shared +---------------------- + +(1 row) + +-- Create partitions (exceeds number of slots) +DO $$ +DECLARE + max_locks int; +BEGIN + SELECT setting::int INTO max_locks + FROM pg_settings + WHERE name = 'max_locks_per_transaction'; + + FOR i IN 1..(max_locks + 10) LOOP + EXECUTE format( + 'CREATE TABLE part_test_%s PARTITION OF part_test + FOR VALUES FROM (%s) TO (%s)', + i, (i-1)*1000, i*1000 + ); + END LOOP; +END; +$$; +SELECT fastpath_exceeded AS fastpath_exceeded_before FROM pg_stat_lock WHERE locktype = 'relation' \gset +-- Needs a lock on each partition +SELECT count(*) FROM part_test; + count +------- + 0 +(1 row) + +-- Ensure pending stats are flushed +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +SELECT fastpath_exceeded > :fastpath_exceeded_before FROM pg_stat_lock WHERE locktype = 'relation'; + ?column? +---------- + t +(1 row) + +DROP TABLE part_test; -- End of Stats Test diff --git a/src/test/regress/sql/stats.sql b/src/test/regress/sql/stats.sql index 8768e0f27fd..db9948eedb8 100644 --- a/src/test/regress/sql/stats.sql +++ b/src/test/regress/sql/stats.sql @@ -944,4 +944,40 @@ SELECT * FROM check_estimated_rows('SELECT * FROM table_fillfactor'); DROP TABLE table_fillfactor; +-- Test fastpath_exceeded stat +CREATE TABLE part_test (id int) PARTITION BY RANGE (id); + +SELECT pg_stat_reset_shared('lock'); + +-- Create partitions (exceeds number of slots) +DO $$ +DECLARE + max_locks int; +BEGIN + SELECT setting::int INTO max_locks + FROM pg_settings + WHERE name = 'max_locks_per_transaction'; + + FOR i IN 1..(max_locks + 10) LOOP + EXECUTE format( + 'CREATE TABLE part_test_%s PARTITION OF part_test + FOR VALUES FROM (%s) TO (%s)', + i, (i-1)*1000, i*1000 + ); + END LOOP; +END; +$$; + +SELECT fastpath_exceeded AS fastpath_exceeded_before FROM pg_stat_lock WHERE locktype = 'relation' \gset + +-- Needs a lock on each partition +SELECT count(*) FROM part_test; + +-- Ensure pending stats are flushed +SELECT pg_stat_force_next_flush(); + +SELECT fastpath_exceeded > :fastpath_exceeded_before FROM pg_stat_lock WHERE locktype = 'relation'; + +DROP TABLE part_test; + -- End of Stats Test -- 2.34.1
>From 1a7dfa836fa8c3c67b6423c587ebba2edf823cdf Mon Sep 17 00:00:00 2001 From: Bertrand Drouvot <[email protected]> Date: Fri, 20 Feb 2026 06:13:28 +0000 Subject: [PATCH v8 3/3] Introduce a new track_lock_timing GUC A new GUC (track_lock_timing) is added and defaults to on. If on, waits and wait_time counters are incremented if the session waited longer than deadlock_timeout to acquire the lock. It's on by default, as this is the same idea as 2aac62be8cb. --- doc/src/sgml/config.sgml | 19 +++ doc/src/sgml/monitoring.sgml | 4 +- src/backend/storage/lmgr/proc.c | 160 +++++++++--------- src/backend/utils/misc/guc_parameters.dat | 6 + src/backend/utils/misc/postgresql.conf.sample | 1 + src/include/pgstat.h | 1 + src/test/isolation/expected/stats.out | 24 +-- src/test/isolation/expected/stats_1.out | 24 +-- src/test/isolation/specs/stats.spec | 24 +-- 9 files changed, 149 insertions(+), 114 deletions(-) 7.6% doc/src/sgml/ 47.7% src/backend/storage/lmgr/ 36.3% src/test/isolation/expected/ 6.1% src/test/isolation/specs/ diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index f670e2d4c31..1d3acb697da 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -8842,6 +8842,25 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; </listitem> </varlistentry> + <varlistentry id="guc-track-lock-timing" xreflabel="track_lock_timing"> + <term><varname>track_lock_timing</varname> (<type>boolean</type>) + <indexterm> + <primary><varname>track_lock_timing</varname> configuration parameter</primary> + </indexterm> + </term> + <listitem> + <para> + Enables timing of lock waits. This parameter is on by default, as it tracks + only the timings for successful acquisitions that waited longer than + <xref linkend="guc-deadlock-timeout"/>. Lock timing information is + displayed in the <link linkend="monitoring-pg-stat-lock-view"> + <structname>pg_stat_lock</structname></link> view. + Only superusers and users with the appropriate <literal>SET</literal> + privilege can change this setting. + </para> + </listitem> + </varlistentry> + <varlistentry id="guc-track-wal-io-timing" xreflabel="track_wal_io_timing"> <term><varname>track_wal_io_timing</varname> (<type>boolean</type>) <indexterm> diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml index 3a196bc305c..75c3e036860 100644 --- a/doc/src/sgml/monitoring.sgml +++ b/doc/src/sgml/monitoring.sgml @@ -3181,7 +3181,7 @@ description | Waiting for a newly initialized WAL file to reach durable storage </para> <para> Number of times a lock of this type had to wait because of a - conflicting lock. Only incremented when <xref linkend="guc-log-lock-waits"/> + conflicting lock. Only incremented when <xref linkend="guc-track-lock-timing"/> is enabled and the lock was successfully acquired after waiting longer than <xref linkend="guc-deadlock-timeout"/>. </para> @@ -3195,7 +3195,7 @@ description | Waiting for a newly initialized WAL file to reach durable storage </para> <para> Total time spent waiting for locks of this type, in milliseconds. - Only incremented when <xref linkend="guc-log-lock-waits"/> is enabled and + Only incremented when <xref linkend="guc-track-lock-timing"/> is enabled and the lock was successfully acquired after waiting longer than <xref linkend="guc-deadlock-timeout"/>. </para> diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index e38c8820103..7681f87f078 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -62,6 +62,7 @@ int IdleInTransactionSessionTimeout = 0; int TransactionTimeout = 0; int IdleSessionTimeout = 0; bool log_lock_waits = true; +bool track_lock_timing = true; /* Pointer to this process's PGPROC struct, if any */ PGPROC *MyProc = NULL; @@ -1547,99 +1548,110 @@ ProcSleep(LOCALLOCK *locallock) /* * If awoken after the deadlock check interrupt has run, and - * log_lock_waits is on, then report about the wait. + * log_lock_waits or track_lock_timing is on, then report or track + * about the wait. */ - if (log_lock_waits && deadlock_state != DS_NOT_YET_CHECKED) + if ((log_lock_waits || track_lock_timing) && + deadlock_state != DS_NOT_YET_CHECKED) { - StringInfoData buf, - lock_waiters_sbuf, - lock_holders_sbuf; - const char *modename; long secs; int usecs; long msecs; - int lockHoldersNum = 0; - initStringInfo(&buf); - initStringInfo(&lock_waiters_sbuf); - initStringInfo(&lock_holders_sbuf); - - DescribeLockTag(&buf, &locallock->tag.lock); - modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid, - lockmode); TimestampDifference(get_timeout_start_time(DEADLOCK_TIMEOUT), GetCurrentTimestamp(), &secs, &usecs); msecs = secs * 1000 + usecs / 1000; usecs = usecs % 1000; - /* Gather a list of all lock holders and waiters */ - LWLockAcquire(partitionLock, LW_SHARED); - GetLockHoldersAndWaiters(locallock, &lock_holders_sbuf, - &lock_waiters_sbuf, &lockHoldersNum); - LWLockRelease(partitionLock); - - if (deadlock_state == DS_SOFT_DEADLOCK) - ereport(LOG, - (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms", - MyProcPid, modename, buf.data, msecs, usecs), - (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.", - "Processes holding the lock: %s. Wait queue: %s.", - lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data)))); - else if (deadlock_state == DS_HARD_DEADLOCK) - { - /* - * This message is a bit redundant with the error that will be - * reported subsequently, but in some cases the error report - * might not make it to the log (eg, if it's caught by an - * exception handler), and we want to ensure all long-wait - * events get logged. - */ - ereport(LOG, - (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms", - MyProcPid, modename, buf.data, msecs, usecs), - (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.", - "Processes holding the lock: %s. Wait queue: %s.", - lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data)))); - } - - if (myWaitStatus == PROC_WAIT_STATUS_WAITING) - ereport(LOG, - (errmsg("process %d still waiting for %s on %s after %ld.%03d ms", - MyProcPid, modename, buf.data, msecs, usecs), - (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.", - "Processes holding the lock: %s. Wait queue: %s.", - lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data)))); - else if (myWaitStatus == PROC_WAIT_STATUS_OK) - { - /* Increment the lock statistics counters */ + /* Increment the lock statistics counters */ + if (track_lock_timing && myWaitStatus == PROC_WAIT_STATUS_OK) pgstat_count_lock_waits(locallock->tag.lock.locktag_type, msecs); - ereport(LOG, - (errmsg("process %d acquired %s on %s after %ld.%03d ms", - MyProcPid, modename, buf.data, msecs, usecs))); - } - else + if (log_lock_waits) { - Assert(myWaitStatus == PROC_WAIT_STATUS_ERROR); - - /* - * Currently, the deadlock checker always kicks its own - * process, which means that we'll only see - * PROC_WAIT_STATUS_ERROR when deadlock_state == - * DS_HARD_DEADLOCK, and there's no need to print redundant - * messages. But for completeness and future-proofing, print - * a message if it looks like someone else kicked us off the - * lock. - */ - if (deadlock_state != DS_HARD_DEADLOCK) + StringInfoData buf, + lock_waiters_sbuf, + lock_holders_sbuf; + const char *modename; + int lockHoldersNum = 0; + + initStringInfo(&buf); + initStringInfo(&lock_waiters_sbuf); + initStringInfo(&lock_holders_sbuf); + + DescribeLockTag(&buf, &locallock->tag.lock); + modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid, + lockmode); + + /* Gather a list of all lock holders and waiters */ + LWLockAcquire(partitionLock, LW_SHARED); + GetLockHoldersAndWaiters(locallock, &lock_holders_sbuf, + &lock_waiters_sbuf, &lockHoldersNum); + LWLockRelease(partitionLock); + + if (deadlock_state == DS_SOFT_DEADLOCK) + ereport(LOG, + (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms", + MyProcPid, modename, buf.data, msecs, usecs), + (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.", + "Processes holding the lock: %s. Wait queue: %s.", + lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data)))); + else if (deadlock_state == DS_HARD_DEADLOCK) + { + /* + * This message is a bit redundant with the error that + * will be reported subsequently, but in some cases the + * error report might not make it to the log (eg, if it's + * caught by an exception handler), and we want to ensure + * all long-wait events get logged. + */ ereport(LOG, - (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms", + (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms", MyProcPid, modename, buf.data, msecs, usecs), (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.", "Processes holding the lock: %s. Wait queue: %s.", lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data)))); + } + + if (myWaitStatus == PROC_WAIT_STATUS_WAITING) + ereport(LOG, + (errmsg("process %d still waiting for %s on %s after %ld.%03d ms", + MyProcPid, modename, buf.data, msecs, usecs), + (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.", + "Processes holding the lock: %s. Wait queue: %s.", + lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data)))); + + else if (myWaitStatus == PROC_WAIT_STATUS_OK) + ereport(LOG, + (errmsg("process %d acquired %s on %s after %ld.%03d ms", + MyProcPid, modename, buf.data, msecs, usecs))); + else + { + Assert(myWaitStatus == PROC_WAIT_STATUS_ERROR); + + /* + * Currently, the deadlock checker always kicks its own + * process, which means that we'll only see + * PROC_WAIT_STATUS_ERROR when deadlock_state == + * DS_HARD_DEADLOCK, and there's no need to print + * redundant messages. But for completeness and + * future-proofing, print a message if it looks like + * someone else kicked us off the lock. + */ + if (deadlock_state != DS_HARD_DEADLOCK) + ereport(LOG, + (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms", + MyProcPid, modename, buf.data, msecs, usecs), + (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.", + "Processes holding the lock: %s. Wait queue: %s.", + lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data)))); + } + + pfree(buf.data); + pfree(lock_holders_sbuf.data); + pfree(lock_waiters_sbuf.data); } /* @@ -1647,10 +1659,6 @@ ProcSleep(LOCALLOCK *locallock) * state so we don't print the above messages again. */ deadlock_state = DS_NO_DEADLOCK; - - pfree(buf.data); - pfree(lock_holders_sbuf.data); - pfree(lock_waiters_sbuf.data); } } while (myWaitStatus == PROC_WAIT_STATUS_WAITING); diff --git a/src/backend/utils/misc/guc_parameters.dat b/src/backend/utils/misc/guc_parameters.dat index 9507778415d..c21e6c6ea4e 100644 --- a/src/backend/utils/misc/guc_parameters.dat +++ b/src/backend/utils/misc/guc_parameters.dat @@ -3110,6 +3110,12 @@ boot_val => 'false', }, +{ name => 'track_lock_timing', type => 'bool', context => 'PGC_SUSET', group => 'STATS_CUMULATIVE', + short_desc => 'Collects timing statistics for lock acquisition.', + variable => 'track_lock_timing', + boot_val => 'true', +}, + { name => 'track_wal_io_timing', type => 'bool', context => 'PGC_SUSET', group => 'STATS_CUMULATIVE', short_desc => 'Collects timing statistics for WAL I/O activity.', variable => 'track_wal_io_timing', diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample index f938cc65a3a..8a3a704aaa5 100644 --- a/src/backend/utils/misc/postgresql.conf.sample +++ b/src/backend/utils/misc/postgresql.conf.sample @@ -685,6 +685,7 @@ #track_counts = on #track_cost_delay_timing = off #track_io_timing = off +#track_lock_timing = on #track_wal_io_timing = off #track_functions = none # none, pl, all #stats_fetch_consistency = cache # cache, none, snapshot diff --git a/src/include/pgstat.h b/src/include/pgstat.h index 1c261e2c7a8..1ba7d98d06b 100644 --- a/src/include/pgstat.h +++ b/src/include/pgstat.h @@ -843,6 +843,7 @@ extern PgStat_WalStats *pgstat_fetch_stat_wal(void); extern PGDLLIMPORT bool pgstat_track_counts; extern PGDLLIMPORT int pgstat_track_functions; extern PGDLLIMPORT int pgstat_fetch_consistency; +extern PGDLLIMPORT bool track_lock_timing; /* diff --git a/src/test/isolation/expected/stats.out b/src/test/isolation/expected/stats.out index 3cae3052e40..4f7ad061549 100644 --- a/src/test/isolation/expected/stats.out +++ b/src/test/isolation/expected/stats.out @@ -3752,7 +3752,7 @@ test_stat_func| 1|t |t step s1_commit: COMMIT; -starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_log_lock_waits s2_set_deadlock_timeout s2_set_log_lock_waits s1_begin s1_lock_relation s2_begin s2_ff s2_lock_relation s1_sleep s1_commit s2_commit s2_report_stat_lock_relation +starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_track_lock_timing s2_set_deadlock_timeout s2_set_track_lock_timing s1_begin s1_lock_relation s2_begin s2_ff s2_lock_relation s1_sleep s1_commit s2_commit s2_report_stat_lock_relation pg_stat_force_next_flush ------------------------ @@ -3765,9 +3765,9 @@ pg_stat_reset_shared (1 row) -step s1_set_log_lock_waits: SET log_lock_waits = on; +step s1_set_track_lock_timing: SET track_lock_timing = on; step s2_set_deadlock_timeout: SET deadlock_timeout = '10ms'; -step s2_set_log_lock_waits: SET log_lock_waits = on; +step s2_set_track_lock_timing: SET track_lock_timing = on; step s1_begin: BEGIN; step s1_lock_relation: LOCK TABLE test_stat_tab; step s2_begin: BEGIN; @@ -3794,7 +3794,7 @@ t |t (1 row) -starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_log_lock_waits s2_set_deadlock_timeout s2_set_log_lock_waits s1_table_insert s1_begin s1_table_update_k1 s2_begin s2_ff s2_table_update_k1 s1_sleep s1_commit s2_commit s2_report_stat_lock_transactionid +starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_track_lock_timing s2_set_deadlock_timeout s2_set_track_lock_timing s1_table_insert s1_begin s1_table_update_k1 s2_begin s2_ff s2_table_update_k1 s1_sleep s1_commit s2_commit s2_report_stat_lock_transactionid pg_stat_force_next_flush ------------------------ @@ -3807,9 +3807,9 @@ pg_stat_reset_shared (1 row) -step s1_set_log_lock_waits: SET log_lock_waits = on; +step s1_set_track_lock_timing: SET track_lock_timing = on; step s2_set_deadlock_timeout: SET deadlock_timeout = '10ms'; -step s2_set_log_lock_waits: SET log_lock_waits = on; +step s2_set_track_lock_timing: SET track_lock_timing = on; step s1_table_insert: INSERT INTO test_stat_tab(key, value) VALUES('k1', 1), ('k2', 1), ('k3', 1); step s1_begin: BEGIN; step s1_table_update_k1: UPDATE test_stat_tab SET value = value + 1 WHERE key = 'k1'; @@ -3837,7 +3837,7 @@ t |t (1 row) -starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_log_lock_waits s2_set_deadlock_timeout s2_set_log_lock_waits s1_lock_advisory_lock s2_begin s2_ff s2_lock_advisory_lock s1_sleep s1_lock_advisory_unlock s2_lock_advisory_unlock s2_commit s2_report_stat_lock_advisory +starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_track_lock_timing s2_set_deadlock_timeout s2_set_track_lock_timing s1_lock_advisory_lock s2_begin s2_ff s2_lock_advisory_lock s1_sleep s1_lock_advisory_unlock s2_lock_advisory_unlock s2_commit s2_report_stat_lock_advisory pg_stat_force_next_flush ------------------------ @@ -3850,9 +3850,9 @@ pg_stat_reset_shared (1 row) -step s1_set_log_lock_waits: SET log_lock_waits = on; +step s1_set_track_lock_timing: SET track_lock_timing = on; step s2_set_deadlock_timeout: SET deadlock_timeout = '10ms'; -step s2_set_log_lock_waits: SET log_lock_waits = on; +step s2_set_track_lock_timing: SET track_lock_timing = on; step s1_lock_advisory_lock: SELECT pg_advisory_lock(1); pg_advisory_lock ---------------- @@ -3899,7 +3899,7 @@ t |t (1 row) -starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_log_lock_waits s2_set_deadlock_timeout s2_unset_log_lock_waits s1_begin s1_lock_relation s2_begin s2_ff s2_lock_relation s1_sleep s1_commit s2_commit s2_report_stat_lock_relation +starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_track_lock_timing s2_set_deadlock_timeout s2_unset_track_lock_timing s1_begin s1_lock_relation s2_begin s2_ff s2_lock_relation s1_sleep s1_commit s2_commit s2_report_stat_lock_relation pg_stat_force_next_flush ------------------------ @@ -3912,9 +3912,9 @@ pg_stat_reset_shared (1 row) -step s1_set_log_lock_waits: SET log_lock_waits = on; +step s1_set_track_lock_timing: SET track_lock_timing = on; step s2_set_deadlock_timeout: SET deadlock_timeout = '10ms'; -step s2_unset_log_lock_waits: SET log_lock_waits = off; +step s2_unset_track_lock_timing: SET track_lock_timing = off; step s1_begin: BEGIN; step s1_lock_relation: LOCK TABLE test_stat_tab; step s2_begin: BEGIN; diff --git a/src/test/isolation/expected/stats_1.out b/src/test/isolation/expected/stats_1.out index ea4fd97a9a5..e1a60d41bad 100644 --- a/src/test/isolation/expected/stats_1.out +++ b/src/test/isolation/expected/stats_1.out @@ -3776,7 +3776,7 @@ test_stat_func| 1|t |t step s1_commit: COMMIT; -starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_log_lock_waits s2_set_deadlock_timeout s2_set_log_lock_waits s1_begin s1_lock_relation s2_begin s2_ff s2_lock_relation s1_sleep s1_commit s2_commit s2_report_stat_lock_relation +starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_track_lock_timing s2_set_deadlock_timeout s2_set_track_lock_timing s1_begin s1_lock_relation s2_begin s2_ff s2_lock_relation s1_sleep s1_commit s2_commit s2_report_stat_lock_relation pg_stat_force_next_flush ------------------------ @@ -3789,9 +3789,9 @@ pg_stat_reset_shared (1 row) -step s1_set_log_lock_waits: SET log_lock_waits = on; +step s1_set_track_lock_timing: SET track_lock_timing = on; step s2_set_deadlock_timeout: SET deadlock_timeout = '10ms'; -step s2_set_log_lock_waits: SET log_lock_waits = on; +step s2_set_track_lock_timing: SET track_lock_timing = on; step s1_begin: BEGIN; step s1_lock_relation: LOCK TABLE test_stat_tab; step s2_begin: BEGIN; @@ -3818,7 +3818,7 @@ t |t (1 row) -starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_log_lock_waits s2_set_deadlock_timeout s2_set_log_lock_waits s1_table_insert s1_begin s1_table_update_k1 s2_begin s2_ff s2_table_update_k1 s1_sleep s1_commit s2_commit s2_report_stat_lock_transactionid +starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_track_lock_timing s2_set_deadlock_timeout s2_set_track_lock_timing s1_table_insert s1_begin s1_table_update_k1 s2_begin s2_ff s2_table_update_k1 s1_sleep s1_commit s2_commit s2_report_stat_lock_transactionid pg_stat_force_next_flush ------------------------ @@ -3831,9 +3831,9 @@ pg_stat_reset_shared (1 row) -step s1_set_log_lock_waits: SET log_lock_waits = on; +step s1_set_track_lock_timing: SET track_lock_timing = on; step s2_set_deadlock_timeout: SET deadlock_timeout = '10ms'; -step s2_set_log_lock_waits: SET log_lock_waits = on; +step s2_set_track_lock_timing: SET track_lock_timing = on; step s1_table_insert: INSERT INTO test_stat_tab(key, value) VALUES('k1', 1), ('k2', 1), ('k3', 1); step s1_begin: BEGIN; step s1_table_update_k1: UPDATE test_stat_tab SET value = value + 1 WHERE key = 'k1'; @@ -3861,7 +3861,7 @@ t |t (1 row) -starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_log_lock_waits s2_set_deadlock_timeout s2_set_log_lock_waits s1_lock_advisory_lock s2_begin s2_ff s2_lock_advisory_lock s1_sleep s1_lock_advisory_unlock s2_lock_advisory_unlock s2_commit s2_report_stat_lock_advisory +starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_track_lock_timing s2_set_deadlock_timeout s2_set_track_lock_timing s1_lock_advisory_lock s2_begin s2_ff s2_lock_advisory_lock s1_sleep s1_lock_advisory_unlock s2_lock_advisory_unlock s2_commit s2_report_stat_lock_advisory pg_stat_force_next_flush ------------------------ @@ -3874,9 +3874,9 @@ pg_stat_reset_shared (1 row) -step s1_set_log_lock_waits: SET log_lock_waits = on; +step s1_set_track_lock_timing: SET track_lock_timing = on; step s2_set_deadlock_timeout: SET deadlock_timeout = '10ms'; -step s2_set_log_lock_waits: SET log_lock_waits = on; +step s2_set_track_lock_timing: SET track_lock_timing = on; step s1_lock_advisory_lock: SELECT pg_advisory_lock(1); pg_advisory_lock ---------------- @@ -3923,7 +3923,7 @@ t |t (1 row) -starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_log_lock_waits s2_set_deadlock_timeout s2_unset_log_lock_waits s1_begin s1_lock_relation s2_begin s2_ff s2_lock_relation s1_sleep s1_commit s2_commit s2_report_stat_lock_relation +starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s1_set_track_lock_timing s2_set_deadlock_timeout s2_unset_track_lock_timing s1_begin s1_lock_relation s2_begin s2_ff s2_lock_relation s1_sleep s1_commit s2_commit s2_report_stat_lock_relation pg_stat_force_next_flush ------------------------ @@ -3936,9 +3936,9 @@ pg_stat_reset_shared (1 row) -step s1_set_log_lock_waits: SET log_lock_waits = on; +step s1_set_track_lock_timing: SET track_lock_timing = on; step s2_set_deadlock_timeout: SET deadlock_timeout = '10ms'; -step s2_unset_log_lock_waits: SET log_lock_waits = off; +step s2_unset_track_lock_timing: SET track_lock_timing = off; step s1_begin: BEGIN; step s1_lock_relation: LOCK TABLE test_stat_tab; step s2_begin: BEGIN; diff --git a/src/test/isolation/specs/stats.spec b/src/test/isolation/specs/stats.spec index 42be68c545f..81b45a801d9 100644 --- a/src/test/isolation/specs/stats.spec +++ b/src/test/isolation/specs/stats.spec @@ -132,7 +132,7 @@ step s1_slru_check_stats { # Lock stats steps step s1_set_deadlock_timeout { SET deadlock_timeout = '10ms'; } -step s1_set_log_lock_waits { SET log_lock_waits = on; } +step s1_set_track_lock_timing { SET track_lock_timing = on; } step s1_reset_stat_lock { SELECT pg_stat_reset_shared('lock'); } step s1_sleep { SELECT pg_sleep(0.5); } step s1_lock_relation { LOCK TABLE test_stat_tab; } @@ -174,8 +174,8 @@ step s2_big_notify { SELECT pg_notify('stats_test_use', # Lock stats steps step s2_set_deadlock_timeout { SET deadlock_timeout = '10ms'; } -step s2_set_log_lock_waits { SET log_lock_waits = on; } -step s2_unset_log_lock_waits { SET log_lock_waits = off; } +step s2_set_track_lock_timing { SET track_lock_timing = on; } +step s2_unset_track_lock_timing { SET track_lock_timing = off; } step s2_report_stat_lock_relation { SELECT waits > 0, wait_time > 500 FROM pg_stat_lock WHERE locktype = 'relation'; } step s2_report_stat_lock_transactionid { SELECT waits > 0, wait_time > 500 FROM pg_stat_lock WHERE locktype = 'transactionid'; } step s2_report_stat_lock_advisory { SELECT waits > 0, wait_time > 500 FROM pg_stat_lock WHERE locktype = 'advisory'; } @@ -793,9 +793,9 @@ permutation permutation s1_set_deadlock_timeout s1_reset_stat_lock - s1_set_log_lock_waits + s1_set_track_lock_timing s2_set_deadlock_timeout - s2_set_log_lock_waits + s2_set_track_lock_timing s1_begin s1_lock_relation s2_begin @@ -811,9 +811,9 @@ permutation permutation s1_set_deadlock_timeout s1_reset_stat_lock - s1_set_log_lock_waits + s1_set_track_lock_timing s2_set_deadlock_timeout - s2_set_log_lock_waits + s2_set_track_lock_timing s1_table_insert s1_begin s1_table_update_k1 @@ -830,9 +830,9 @@ permutation permutation s1_set_deadlock_timeout s1_reset_stat_lock - s1_set_log_lock_waits + s1_set_track_lock_timing s2_set_deadlock_timeout - s2_set_log_lock_waits + s2_set_track_lock_timing s1_lock_advisory_lock s2_begin s2_ff @@ -843,14 +843,14 @@ permutation s2_commit s2_report_stat_lock_advisory -# Ensure log_lock_waits behaves correctly +# Ensure track_lock_timing behaves correctly permutation s1_set_deadlock_timeout s1_reset_stat_lock - s1_set_log_lock_waits + s1_set_track_lock_timing s2_set_deadlock_timeout - s2_unset_log_lock_waits + s2_unset_track_lock_timing s1_begin s1_lock_relation s2_begin -- 2.34.1
