I will design a new GUC while ensuring consistency with 'log_lock_waits'.

Regarding the patch, when I applied it to HEAD, it failed to compile with
the following errors. Could you update the patch to address this?

proc.c:1538:20: error: use of undeclared identifier 'buf'
 1538 |                         initStringInfo(&buf);
      |                                         ^
proc.c:1539:20: error: use of undeclared identifier 'lock_waiters_sbuf'
 1539 | initStringInfo(&lock_waiters_sbuf);
      |                                         ^
proc.c:1540:20: error: use of undeclared identifier 'lock_holders_sbuf'
 1540 | initStringInfo(&lock_holders_sbuf);
      |                                         ^
....

Conflicted with another commit [1]. I'll rebase it later.


[1] https://git.postgresql.org/gitweb/?p=postgresql.git;a=commit;h=3c0fd64fec8ed6fa3987c33f076fcffbc3f268c3
Rebased and added new GUC log_lock_failure and minor fixes. Currently only NOWAIT errors are supported.
I would like to revisit the possibility of extending this GUC to include
client cancellations and lock timeouts at another opportunity.

--
Regards,
Yuki Seino
NTT DATA GROUP CORPORATION
diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index fbdd6ce574..559b07e7ff 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -7730,6 +7730,25 @@ log_line_prefix = '%m [%p] %q%u@%d/%a '
       </listitem>
      </varlistentry>
 
+     <varlistentry id="guc-log-lock-failure" xreflabel="log_lock_failure">
+      <term><varname>log_lock_failure</varname> (<type>boolean</type>)
+      <indexterm>
+       <primary><varname>log_lock_failure</varname> configuration 
parameter</primary>
+      </indexterm>
+      </term>
+      <listitem>
+       <para>
+        Controls whether a log message is produced when a lock acquisition
+        fails.  This is useful for analyzing the causes of lock failures.
+        Currently, only lock failures due to <literal>NOWAIT</literal> are
+        supported.  The default is <literal>off</literal>.  Only superusers
+        and users with the appropriate <literal>SET</literal> privilege can
+        change this setting.
+       </para>
+      </listitem>
+     </varlistentry>
+
+
      <varlistentry id="guc-log-recovery-conflict-waits" 
xreflabel="log_recovery_conflict_waits">
       <term><varname>log_recovery_conflict_waits</varname> 
(<type>boolean</type>)
       <indexterm>
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 720ef99ee8..0d8e680ff2 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -61,6 +61,7 @@ int                   IdleInTransactionSessionTimeout = 0;
 int                    TransactionTimeout = 0;
 int                    IdleSessionTimeout = 0;
 bool           log_lock_waits = false;
+bool           log_lock_failure = false;
 
 /* Pointer to this process's PGPROC struct, if any */
 PGPROC    *MyProc = NULL;
@@ -1210,10 +1211,38 @@ JoinWaitQueue(LOCALLOCK *locallock, LockMethod 
lockMethodTable, bool dontWait)
 
        /*
         * At this point we know that we'd really need to sleep. If we've been
-        * commanded not to do that, bail out.
+        * commanded not to do that, bail out. Output lock information only
+        * if log_lock_failure is on.
         */
        if (dontWait)
+       {
+               if (log_lock_failure)
+               {
+                       StringInfoData buf,
+                                               lock_waiters_sbuf,
+                                               lock_holders_sbuf;
+                       const char *modename;
+                       int                     lockHoldersNum = 0;
+                       initStringInfo(&buf);
+                       initStringInfo(&lock_waiters_sbuf);
+                       initStringInfo(&lock_holders_sbuf);
+
+                       DescribeLockTag(&buf, &locallock->tag.lock);
+                       modename = 
GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
+                                                                               
lockmode);
+
+                       /* Collect lock holders and waiters */
+                       CollectLockHoldersAndWaiters(proclock, lock, 
&lock_holders_sbuf, &lock_waiters_sbuf, &lockHoldersNum);
+
+                       ereport(LOG,
+                                                       (errmsg("process %d 
could not obtain %s on %s",
+                                                                       
MyProcPid, modename, buf.data),
+                                                       
(errdetail_log_plural("Process holding the lock: %s. Wait: %s.",
+                                                                               
                "Processes holding the lock: %s. Wait: %s.",
+                                                                               
                lockHoldersNum, lock_holders_sbuf.data, 
lock_waiters_sbuf.data))));
+               }
                return PROC_WAIT_STATUS_ERROR;
+       }
 
        /*
         * Insert self into queue, at the position determined above.
@@ -1509,10 +1538,6 @@ ProcSleep(LOCALLOCK *locallock)
                        long            secs;
                        int                     usecs;
                        long            msecs;
-                       dlist_iter      proc_iter;
-                       PROCLOCK   *curproclock;
-                       bool            first_holder = true,
-                                               first_waiter = true;
                        int                     lockHoldersNum = 0;
 
                        initStringInfo(&buf);
@@ -1528,53 +1553,10 @@ ProcSleep(LOCALLOCK *locallock)
                        msecs = secs * 1000 + usecs / 1000;
                        usecs = usecs % 1000;
 
-                       /*
-                        * we loop over the lock's procLocks to gather a list 
of all
-                        * holders and waiters. Thus we will be able to provide 
more
-                        * detailed information for lock debugging purposes.
-                        *
-                        * lock->procLocks contains all processes which hold or 
wait for
-                        * this lock.
-                        */
-
                        LWLockAcquire(partitionLock, LW_SHARED);
 
-                       dlist_foreach(proc_iter, &lock->procLocks)
-                       {
-                               curproclock =
-                                       dlist_container(PROCLOCK, lockLink, 
proc_iter.cur);
-
-                               /*
-                                * we are a waiter if myProc->waitProcLock == 
curproclock; we
-                                * are a holder if it is NULL or something 
different
-                                */
-                               if (curproclock->tag.myProc->waitProcLock == 
curproclock)
-                               {
-                                       if (first_waiter)
-                                       {
-                                               
appendStringInfo(&lock_waiters_sbuf, "%d",
-                                                                               
 curproclock->tag.myProc->pid);
-                                               first_waiter = false;
-                                       }
-                                       else
-                                               
appendStringInfo(&lock_waiters_sbuf, ", %d",
-                                                                               
 curproclock->tag.myProc->pid);
-                               }
-                               else
-                               {
-                                       if (first_holder)
-                                       {
-                                               
appendStringInfo(&lock_holders_sbuf, "%d",
-                                                                               
 curproclock->tag.myProc->pid);
-                                               first_holder = false;
-                                       }
-                                       else
-                                               
appendStringInfo(&lock_holders_sbuf, ", %d",
-                                                                               
 curproclock->tag.myProc->pid);
-
-                                       lockHoldersNum++;
-                               }
-                       }
+                       /* Collect lock holders and waiters */
+                       CollectLockHoldersAndWaiters(NULL, lock, 
&lock_holders_sbuf, &lock_waiters_sbuf, &lockHoldersNum);
 
                        LWLockRelease(partitionLock);
 
@@ -1980,3 +1962,58 @@ BecomeLockGroupMember(PGPROC *leader, int pid)
 
        return ok;
 }
+
+/*
+ * we loop over the lock's procLocks to gather a list of all
+ * holders and waiters. Thus we will be able to provide more
+ * detailed information for lock debugging purposes.
+ *
+ * lock->procLocks contains all processes which hold or wait for
+ * this lock.
+ */
+void
+CollectLockHoldersAndWaiters(PROCLOCK *waitProcLock, LOCK *lock, StringInfo 
lock_holders_sbuf, StringInfo lock_waiters_sbuf, int *lockHoldersNum)
+{
+       bool first_holder = true;
+       bool first_waiter = true;
+       dlist_iter proc_iter;
+       PROCLOCK *curproclock;
+
+       dlist_foreach(proc_iter, &lock->procLocks)
+       {
+               curproclock =
+                       dlist_container(PROCLOCK, lockLink, proc_iter.cur);
+
+               /*
+                       * we are a waiter if myProc->waitProcLock == 
curproclock; we
+                       * are a holder if it is NULL or something different
+                       */
+               if ((waitProcLock == NULL && 
curproclock->tag.myProc->waitProcLock == curproclock) ||
+                       (waitProcLock != NULL && waitProcLock == curproclock))
+               {
+                       if (first_waiter)
+                       {
+                               appendStringInfo(lock_waiters_sbuf, "%d",
+                                                                       
curproclock->tag.myProc->pid);
+                               first_waiter = false;
+                       }
+                       else
+                               appendStringInfo(lock_waiters_sbuf, ", %d",
+                                                                       
curproclock->tag.myProc->pid);
+               }
+               else
+               {
+                       if (first_holder)
+                       {
+                               appendStringInfo(lock_holders_sbuf, "%d",
+                                                                       
curproclock->tag.myProc->pid);
+                               first_holder = false;
+                       }
+                       else
+                               appendStringInfo(lock_holders_sbuf, ", %d",
+                                                                       
curproclock->tag.myProc->pid);
+
+                       (*lockHoldersNum)++;
+               }
+       }
+}
\ No newline at end of file
diff --git a/src/backend/utils/misc/guc_tables.c 
b/src/backend/utils/misc/guc_tables.c
index 8cf1afbad2..43b3bbdcfc 100644
--- a/src/backend/utils/misc/guc_tables.c
+++ b/src/backend/utils/misc/guc_tables.c
@@ -1571,6 +1571,15 @@ struct config_bool ConfigureNamesBool[] =
                false,
                NULL, NULL, NULL
        },
+       {
+               {"log_lock_failure", PGC_SUSET, LOGGING_WHAT,
+                       gettext_noop("Logs lock failed."),
+                       NULL
+               },
+               &log_lock_failure,
+               false,
+               NULL, NULL, NULL
+       },
        {
                {"log_recovery_conflict_waits", PGC_SIGHUP, LOGGING_WHAT,
                        gettext_noop("Logs standby recovery conflict waits."),
diff --git a/src/backend/utils/misc/postgresql.conf.sample 
b/src/backend/utils/misc/postgresql.conf.sample
index a2ac7575ca..4fa64cc1aa 100644
--- a/src/backend/utils/misc/postgresql.conf.sample
+++ b/src/backend/utils/misc/postgresql.conf.sample
@@ -609,6 +609,7 @@
                                        #   %% = '%'
                                        # e.g. '<%u%%%d> '
 #log_lock_waits = off                  # log lock waits >= deadlock_timeout
+#log_lock_failure = off                        # log lock failed
 #log_recovery_conflict_waits = off     # log standby recovery conflict waits
                                        # >= deadlock_timeout
 #log_parameter_max_length = -1         # when logging statements, limit logged
diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h
index 5a3dd5d2d4..28b81c266d 100644
--- a/src/include/storage/proc.h
+++ b/src/include/storage/proc.h
@@ -461,6 +461,7 @@ extern PGDLLIMPORT int IdleInTransactionSessionTimeout;
 extern PGDLLIMPORT int TransactionTimeout;
 extern PGDLLIMPORT int IdleSessionTimeout;
 extern PGDLLIMPORT bool log_lock_waits;
+extern PGDLLIMPORT bool log_lock_failure;
 
 #ifdef EXEC_BACKEND
 extern PGDLLIMPORT slock_t *ProcStructLock;
@@ -499,5 +500,6 @@ extern PGPROC *AuxiliaryPidGetProc(int pid);
 
 extern void BecomeLockGroupLeader(void);
 extern bool BecomeLockGroupMember(PGPROC *leader, int pid);
+extern void CollectLockHoldersAndWaiters(PROCLOCK *waitProcLock, LOCK *lock, 
StringInfo lock_holders_sbuf, StringInfo lock_waiters_sbuf, int 
*lockHoldersNum);
 
 #endif                                                 /* _PROC_H_ */
diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c
index 0e40ed32a2..efe570403f 100644
--- a/src/test/regress/pg_regress.c
+++ b/src/test/regress/pg_regress.c
@@ -2400,6 +2400,7 @@ regression_main(int argc, char *argv[],
                fputs("log_checkpoints = on\n", pg_conf);
                fputs("log_line_prefix = '%m %b[%p] %q%a '\n", pg_conf);
                fputs("log_lock_waits = on\n", pg_conf);
+               fputs("log_lock_failure = on\n", pg_conf);
                fputs("log_temp_files = 128kB\n", pg_conf);
                fputs("max_prepared_transactions = 2\n", pg_conf);
 

Reply via email to