On Sun, 14 Sep 2025 at 21:21 +0100, Mike Crowe wrote:
Passing a timeout from before the epoch to
std::shared_timed_mutex::try_lock_until or
std::shared_timed_mutex::try_lock_shared_until causes the POSIX rwlock
functions to be passed an invalid negative timeout which results in them
returning EINVAL.
thread.timedmutex.requirements.general in the C++ standard says:
If abs_time has already passed, the function attempts to obtain
ownership without blocking (as if by calling try_lock()).
and a time before the epoch has clearly already passed (see description
of libstdc++/PR116586). Let's treat such negative times as being at the
epoch so that these methods work correctly. Add test cases to prove that
this, and potential similar problems are no longer present.
PR libstdc++-v3/116586
libstdc++-v3/ChangeLog:
* include/std/shared_mutex: (shared_timed_mutex::try_lock_until,
shared_timed_mutex::try_lock_shared_until): Ensure that
negative timeout is not passed to POSIX rwlock functions.
* testsuite/30_threads/shared_timed_mutex/try_lock_until/116586.cc:
New test.
In r16-4427-gfb558b78108ff2 I pushed just the new test, without the
code changes. When fixing https://gcc.gnu.org/PR122401 I found that
the new test hangs on older Glibc. I observed it on 2.28 but
presumably other versions without your clocklock extensions are
affected.
Suggested fix below ...
Signed-off-by: Mike Crowe <[email protected]>
---
libstdc++-v3/include/std/shared_mutex | 20 ++++
.../try_lock_until/116586.cc | 94 +++++++++++++++++++
2 files changed, 114 insertions(+)
create mode 100644
libstdc++-v3/testsuite/30_threads/shared_timed_mutex/try_lock_until/116586.cc
diff --git a/libstdc++-v3/include/std/shared_mutex
b/libstdc++-v3/include/std/shared_mutex
index 94c8532399d..ef0f1df5a2b 100644
--- a/libstdc++-v3/include/std/shared_mutex
+++ b/libstdc++-v3/include/std/shared_mutex
@@ -529,6 +529,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
static_cast<long>(__ns.count())
};
+ if (__ts.tv_sec < 0 || __ts.tv_nsec < 0) {
+ __ts.tv_sec = 0;
+ __ts.tv_nsec = 0;
+ }
+
int __ret = __glibcxx_rwlock_timedwrlock(&_M_rwlock, &__ts);
// On self-deadlock, we just fail to acquire the lock. Technically,
// the program violated the precondition.
@@ -555,6 +560,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
static_cast<long>(__ns.count())
};
+ if (__ts.tv_sec < 0 || __ts.tv_nsec < 0) {
+ __ts.tv_sec = 0;
+ __ts.tv_nsec = 0;
+ }
+
int __ret = pthread_rwlock_clockwrlock(&_M_rwlock, CLOCK_MONOTONIC,
&__ts);
// On self-deadlock, we just fail to acquire the lock. Technically,
@@ -605,6 +615,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
static_cast<long>(__ns.count())
};
+ if (__ts.tv_sec < 0 || __ts.tv_nsec < 0) {
+ __ts.tv_sec = 0;
+ __ts.tv_nsec = 0;
+ }
+
int __ret;
// Unlike for lock(), we are not allowed to throw an exception so if
// the maximum number of read locks has been exceeded, or we would
@@ -645,6 +660,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
static_cast<long>(__ns.count())
};
+ if (__ts.tv_sec < 0 || __ts.tv_nsec < 0) {
+ __ts.tv_sec = 0;
+ __ts.tv_nsec = 0;
+ }
+
int __ret = pthread_rwlock_clockrdlock(&_M_rwlock, CLOCK_MONOTONIC,
&__ts);
// On self-deadlock, we just fail to acquire the lock. Technically,
diff --git
a/libstdc++-v3/testsuite/30_threads/shared_timed_mutex/try_lock_until/116586.cc
b/libstdc++-v3/testsuite/30_threads/shared_timed_mutex/try_lock_until/116586.cc
new file mode 100644
index 00000000000..4ed650a78cf
--- /dev/null
+++
b/libstdc++-v3/testsuite/30_threads/shared_timed_mutex/try_lock_until/116586.cc
@@ -0,0 +1,94 @@
+// { dg-do run { target c++14 } }
+
+#include <chrono>
+#include <shared_mutex>
+#include <testsuite_hooks.h>
+
+namespace chrono = std::chrono;
+
+// thread.timedmutex.requirements.general:
+// If abs_time has already passed, the function attempts to obtain
+// ownership without blocking (as if by calling try_lock()).
+
+template <typename Clock>
+void
+test_exclusive_absolute(chrono::nanoseconds offset)
+{
+ std::shared_timed_mutex stm;
+ chrono::time_point<Clock> tp(offset);
+ VERIFY(stm.try_lock_until(tp));
+ VERIFY(!stm.try_lock_until(tp));
+}
+
+template <typename Clock>
+void
+test_shared_absolute(chrono::nanoseconds offset)
+{
+ std::shared_timed_mutex stm;
+ chrono::time_point<Clock> tp(offset);
+ VERIFY(stm.try_lock_shared_until(tp));
+ stm.unlock_shared();
+
+ VERIFY(stm.try_lock_for(chrono::seconds{10}));
+
+ {
+ // NPTL will give us EDEADLK if pthread_rwlock_timedrdlock() is called on
+ // the same thread that already holds the exclusive (write) lock, so let's
+ // arrange for a different thread to try to acquire the shared lock.
+ auto t = std::async(std::launch::async, [&stm, tp]() {
+ VERIFY(!stm.try_lock_shared_until(tp));
+ });
+ }
+}
+
+// The type of clock used for the actual wait depends on whether
+// _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK is defined. We might as well just test
+// both steady_clock and system_clock.
+template <typename Clock>
+void
+test_exclusive_relative(chrono::nanoseconds offset)
+{
+ std::shared_timed_mutex stm;
+ const auto d = -Clock::now().time_since_epoch() + offset;
+ VERIFY(stm.try_lock_for(d));
+ VERIFY(!stm.try_lock_for(d));
+}
+
+template <typename Clock>
+void
+test_shared_relative(chrono::nanoseconds offset)
+{
+ std::shared_timed_mutex stm;
+ const auto d = -Clock::now().time_since_epoch() + offset;
+ VERIFY(stm.try_lock_shared_for(d));
+ stm.unlock_shared();
+ // Should complete immediately
+ VERIFY(stm.try_lock_for(chrono::seconds{10}));
+ VERIFY(!stm.try_lock_shared_for(d));
I think this function needs the same use of std::async as above,
because when _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK is not defined we
still use pthread_rwlock_timedrdlock and so we still get EDEADLK (and
then loop forever) here. You won't see a problem with modern Glibc
versions because I think pthread_rwlock_clockrdlock doesn't return
EDEADLK here.
+}
+
+int main()
+{
+ // Try once with an offset that ought to result in tv_sec == 0, tv_nsec < 0
+ // and one with an offset that ought to result in tv_sec < 0, tv_nsec == 0
+ // for the absolute calls at least. It's not really possible to arrange for
+ // the relative calls to have tv_nsec == 0 due to time advancing.
+ for (const chrono::nanoseconds offset : {
+ // tv_sec == 0, tv_nsec == 0
+ chrono::nanoseconds{0},
+ // tv_sec == 0, tv_nsec < 0
+ chrono::duration_cast<chrono::nanoseconds>(chrono::milliseconds{-10}),
+ // tv_sec < 0
+ chrono::duration_cast<chrono::nanoseconds>(chrono::seconds{-10})
+ }) {
+ test_exclusive_absolute<chrono::system_clock>(offset);
+ test_shared_absolute<chrono::system_clock>(offset);
+ test_exclusive_relative<chrono::system_clock>(offset);
+ test_shared_relative<chrono::system_clock>(offset);
+
+ test_exclusive_absolute<chrono::steady_clock>(offset);
+ test_shared_absolute<chrono::steady_clock>(offset);
+ test_exclusive_relative<chrono::steady_clock>(offset);
+ test_shared_relative<chrono::steady_clock>(offset);
+ }
+}
--
2.39.5