On 3/4/25 14:11, Andres Freund wrote: > Hi, > > On 2025-03-04 14:05:22 +0100, Tomas Vondra wrote: >> On 3/3/25 21:52, Andres Freund wrote: >>>> It's not a proper constant, of course, but it seemed close >>>> enough. Yes, it might confuse people into thinking it's a constant, or >>>> is there some additional impact? >>> >>> That seems plenty. I just looked at the shem sizing function and was >>> confused >>> because I didn't see where the max_locks_per_transaction affects the >>> allocation size. >>> >> >> But the shmem sizing doesn't use FP_LOCK_SLOTS_PER_BACKEND at all, both >> proc.c and postinit.c use the "full" formula, not the macro > > Not sure what I brainfarted there... >
This got me thinking - maybe it'd be better to use the new FastPathLockSlotsPerBackend() in all places that need the number of slots per backend, including those in proc.c etc.? Arguably, these places should have used FP_LOCK_SLOTS_PER_BACKEND before. The attached v2 patch does that. >>>> The one fix I can think of is making it look more like a function, >>>> possibly just like this: >>>> >>>> #define FastPathLockSlotsPerBackend() \ >>>> (FP_LOCK_SLOTS_PER_GROUP * FastPathLockGroupsPerBackend) >>>> >>>> Or do you have another suggestion? >>> >>> That'd work for me. >>> >> >> Attached is a patch doing this, but considering it has nothing to do >> with the shmem sizing, I wonder if it's worth it. > > Yes. > OK, barring objections I'll push the v2. regards -- Tomas Vondra
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index 11b4d1085bb..ccfe6b69bf5 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -226,10 +226,10 @@ int FastPathLockGroupsPerBackend = 0; * the FAST_PATH_SLOT macro, split it into group and index (in the group). */ #define FAST_PATH_GROUP(index) \ - (AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_BACKEND), \ + (AssertMacro((uint32) (index) < FastPathLockSlotsPerBackend()), \ ((index) / FP_LOCK_SLOTS_PER_GROUP)) #define FAST_PATH_INDEX(index) \ - (AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_BACKEND), \ + (AssertMacro((uint32) (index) < FastPathLockSlotsPerBackend()), \ ((index) % FP_LOCK_SLOTS_PER_GROUP)) /* Macros for manipulating proc->fpLockBits */ @@ -242,7 +242,7 @@ int FastPathLockGroupsPerBackend = 0; #define FAST_PATH_BIT_POSITION(n, l) \ (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \ AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \ - AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \ + AssertMacro((n) < FastPathLockSlotsPerBackend()), \ ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (FAST_PATH_INDEX(n)))) #define FAST_PATH_SET_LOCKMODE(proc, n, l) \ FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l) @@ -2691,7 +2691,7 @@ static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode) { uint32 i; - uint32 unused_slot = FP_LOCK_SLOTS_PER_BACKEND; + uint32 unused_slot = FastPathLockSlotsPerBackend(); /* fast-path group the lock belongs to */ uint32 group = FAST_PATH_REL_GROUP(relid); @@ -2713,7 +2713,7 @@ FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode) } /* If no existing entry, use any empty slot. */ - if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND) + if (unused_slot < FastPathLockSlotsPerBackend()) { MyProc->fpRelId[unused_slot] = relid; FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode); diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index 49204f91a20..749a79d48ef 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -116,7 +116,7 @@ ProcGlobalShmemSize(void) * nicely aligned in each backend. */ fpLockBitsSize = MAXALIGN(FastPathLockGroupsPerBackend * sizeof(uint64)); - fpRelIdSize = MAXALIGN(FastPathLockGroupsPerBackend * sizeof(Oid) * FP_LOCK_SLOTS_PER_GROUP); + fpRelIdSize = MAXALIGN(FastPathLockSlotsPerBackend() * sizeof(Oid)); size = add_size(size, mul_size(TotalProcs, (fpLockBitsSize + fpRelIdSize))); @@ -231,7 +231,7 @@ InitProcGlobal(void) * shared memory and then divide that between backends. */ fpLockBitsSize = MAXALIGN(FastPathLockGroupsPerBackend * sizeof(uint64)); - fpRelIdSize = MAXALIGN(FastPathLockGroupsPerBackend * sizeof(Oid) * FP_LOCK_SLOTS_PER_GROUP); + fpRelIdSize = MAXALIGN(FastPathLockSlotsPerBackend() * sizeof(Oid)); fpPtr = ShmemAlloc(TotalProcs * (fpLockBitsSize + fpRelIdSize)); MemSet(fpPtr, 0, TotalProcs * (fpLockBitsSize + fpRelIdSize)); diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c index 318600d6d02..763893eed8f 100644 --- a/src/backend/utils/init/postinit.c +++ b/src/backend/utils/init/postinit.c @@ -586,7 +586,7 @@ InitializeFastPathLocks(void) while (FastPathLockGroupsPerBackend < FP_LOCK_GROUPS_PER_BACKEND_MAX) { /* stop once we exceed max_locks_per_xact */ - if (FastPathLockGroupsPerBackend * FP_LOCK_SLOTS_PER_GROUP >= max_locks_per_xact) + if (FastPathLockSlotsPerBackend() >= max_locks_per_xact) break; FastPathLockGroupsPerBackend *= 2; diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h index 20777f7d5ae..114eb1f8f76 100644 --- a/src/include/storage/proc.h +++ b/src/include/storage/proc.h @@ -88,7 +88,8 @@ extern PGDLLIMPORT int FastPathLockGroupsPerBackend; #define FP_LOCK_GROUPS_PER_BACKEND_MAX 1024 #define FP_LOCK_SLOTS_PER_GROUP 16 /* don't change */ -#define FP_LOCK_SLOTS_PER_BACKEND (FP_LOCK_SLOTS_PER_GROUP * FastPathLockGroupsPerBackend) +#define FastPathLockSlotsPerBackend() \ + (FP_LOCK_SLOTS_PER_GROUP * FastPathLockGroupsPerBackend) /* * Flags for PGPROC.delayChkptFlags