Module Name: src
Committed By: martin
Date: Tue Aug 1 17:19:57 UTC 2023
Modified Files:
src/sys/external/bsd/drm2/linux [netbsd-8]: linux_ww_mutex.c
Log Message:
Pull up following revision(s) (requested by riastradh in ticket #1875):
sys/external/bsd/drm2/linux/linux_ww_mutex.c: revision 1.10
drm: Spruce up ww_mutex comments. Audit return values.
To generate a diff of this commit:
cvs rdiff -u -r1.2.10.4 -r1.2.10.5 \
src/sys/external/bsd/drm2/linux/linux_ww_mutex.c
Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.
Modified files:
Index: src/sys/external/bsd/drm2/linux/linux_ww_mutex.c
diff -u src/sys/external/bsd/drm2/linux/linux_ww_mutex.c:1.2.10.4 src/sys/external/bsd/drm2/linux/linux_ww_mutex.c:1.2.10.5
--- src/sys/external/bsd/drm2/linux/linux_ww_mutex.c:1.2.10.4 Tue Aug 1 16:58:07 2023
+++ src/sys/external/bsd/drm2/linux/linux_ww_mutex.c Tue Aug 1 17:19:57 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: linux_ww_mutex.c,v 1.2.10.4 2023/08/01 16:58:07 martin Exp $ */
+/* $NetBSD: linux_ww_mutex.c,v 1.2.10.5 2023/08/01 17:19:57 martin Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.2.10.4 2023/08/01 16:58:07 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.2.10.5 2023/08/01 17:19:57 martin Exp $");
#include <sys/types.h>
#include <sys/atomic.h>
@@ -168,6 +168,13 @@ static lockops_t ww_lockops = {
};
#endif
+/*
+ * ww_mutex_init(mutex, class)
+ *
+ * Initialize mutex in the given class. Must precede any other
+ * ww_mutex_* operations. After done, mutex must be destroyed
+ * with ww_mutex_destroy.
+ */
void
ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
{
@@ -187,6 +194,13 @@ ww_mutex_init(struct ww_mutex *mutex, st
#endif
}
+/*
+ * ww_mutex_destroy(mutex)
+ *
+ * Destroy mutex initialized by ww_mutex_init. Caller must not be
+ * with any other ww_mutex_* operations except after
+ * reinitializing with ww_mutex_init.
+ */
void
ww_mutex_destroy(struct ww_mutex *mutex)
{
@@ -205,9 +219,15 @@ ww_mutex_destroy(struct ww_mutex *mutex)
}
/*
- * XXX WARNING: This returns true if it is locked by ANYONE. Does not
- * mean `Do I hold this lock?' (answering which really requires an
- * acquire context).
+ * ww_mutex_is_locked(mutex)
+ *
+ * True if anyone holds mutex locked at the moment, false if not.
+ * Answer is stale as soon returned unless mutex is held by
+ * caller.
+ *
+ * XXX WARNING: This returns true if it is locked by ANYONE. Does
+ * not mean `Do I hold this lock?' (answering which really
+ * requires an acquire context).
*/
bool
ww_mutex_is_locked(struct ww_mutex *mutex)
@@ -233,33 +253,78 @@ ww_mutex_is_locked(struct ww_mutex *mute
return locked;
}
+/*
+ * ww_mutex_state_wait(mutex, state)
+ *
+ * Wait for mutex, which must be in the given state, to transition
+ * to another state. Uninterruptible; never fails.
+ *
+ * Caller must hold mutex's internal lock.
+ *
+ * May sleep.
+ *
+ * Internal subroutine.
+ */
static void
ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state)
{
+ KASSERT(mutex_owned(&mutex->wwm_lock));
KASSERT(mutex->wwm_state == state);
do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
while (mutex->wwm_state == state);
}
+/*
+ * ww_mutex_state_wait_sig(mutex, state)
+ *
+ * Wait for mutex, which must be in the given state, to transition
+ * to another state, or fail if interrupted by a signal. Return 0
+ * on success, -EINTR if interrupted by a signal.
+ *
+ * Caller must hold mutex's internal lock.
+ *
+ * May sleep.
+ *
+ * Internal subroutine.
+ */
static int
ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state)
{
int ret;
+ KASSERT(mutex_owned(&mutex->wwm_lock));
KASSERT(mutex->wwm_state == state);
do {
/* XXX errno NetBSD->Linux */
ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
- if (ret == -ERESTART)
- ret = -ERESTARTSYS;
- if (ret)
+ if (ret) {
+ KASSERTMSG((ret == -EINTR || ret == -ERESTART),
+ "ret=%d", ret);
+ ret = -EINTR;
break;
+ }
} while (mutex->wwm_state == state);
+ KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
return ret;
}
+/*
+ * ww_mutex_lock_wait(mutex, ctx)
+ *
+ * With mutex locked and in the WW_CTX or WW_WANTOWN state, owned
+ * by another thread with an acquire context, wait to acquire
+ * mutex. While waiting, record ctx in the tree of waiters. Does
+ * not update the mutex state otherwise.
+ *
+ * Caller must not already hold mutex. Caller must hold mutex's
+ * internal lock. Uninterruptible; never fails.
+ *
+ * May sleep.
+ *
+ * Internal subroutine.
+ */
static void
ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
{
@@ -291,6 +356,22 @@ ww_mutex_lock_wait(struct ww_mutex *mute
rb_tree_remove_node(&mutex->wwm_waiters, ctx);
}
+/*
+ * ww_mutex_lock_wait_sig(mutex, ctx)
+ *
+ * With mutex locked and in the WW_CTX or WW_WANTOWN state, owned
+ * by another thread with an acquire context, wait to acquire
+ * mutex and return 0, or return -EINTR if interrupted by a
+ * signal. While waiting, record ctx in the tree of waiters.
+ * Does not update the mutex state otherwise.
+ *
+ * Caller must not already hold mutex. Caller must hold mutex's
+ * internal lock.
+ *
+ * May sleep.
+ *
+ * Internal subroutine.
+ */
static int
ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
{
@@ -318,18 +399,31 @@ ww_mutex_lock_wait_sig(struct ww_mutex *
do {
/* XXX errno NetBSD->Linux */
ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
- if (ret == -ERESTART)
- ret = -ERESTARTSYS;
- if (ret)
+ if (ret) {
+ KASSERTMSG((ret == -EINTR || ret == -ERESTART),
+ "ret=%d", ret);
+ ret = -EINTR;
goto out;
+ }
} while (!(((mutex->wwm_state == WW_CTX) ||
(mutex->wwm_state == WW_WANTOWN)) &&
(mutex->wwm_u.ctx == ctx)));
out: rb_tree_remove_node(&mutex->wwm_waiters, ctx);
+ KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
return ret;
}
+/*
+ * ww_mutex_lock_noctx(mutex)
+ *
+ * Acquire mutex without an acquire context. Caller must not
+ * already hold the mutex. Uninterruptible; never fails.
+ *
+ * May sleep.
+ *
+ * Internal subroutine, implementing ww_mutex_lock(..., NULL).
+ */
static void
ww_mutex_lock_noctx(struct ww_mutex *mutex)
{
@@ -364,6 +458,18 @@ retry: switch (mutex->wwm_state) {
mutex_exit(&mutex->wwm_lock);
}
+/*
+ * ww_mutex_lock_noctx_sig(mutex)
+ *
+ * Acquire mutex without an acquire context and return 0, or fail
+ * and return -EINTR if interrupted by a signal. Caller must not
+ * already hold the mutex.
+ *
+ * May sleep.
+ *
+ * Internal subroutine, implementing
+ * ww_mutex_lock_interruptible(..., NULL).
+ */
static int
ww_mutex_lock_noctx_sig(struct ww_mutex *mutex)
{
@@ -379,8 +485,10 @@ retry: switch (mutex->wwm_state) {
KASSERTMSG((mutex->wwm_u.owner != curlwp),
"locking %p against myself: %p", mutex, curlwp);
ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
- if (ret)
+ if (ret) {
+ KASSERTMSG(ret == -EINTR, "ret=%d", ret);
goto out;
+ }
goto retry;
case WW_CTX:
KASSERT(mutex->wwm_u.ctx != NULL);
@@ -390,8 +498,10 @@ retry: switch (mutex->wwm_state) {
KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
"locking %p against myself: %p", mutex, curlwp);
ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
- if (ret)
+ if (ret) {
+ KASSERTMSG(ret == -EINTR, "ret=%d", ret);
goto out;
+ }
goto retry;
default:
panic("wait/wound mutex %p in bad state: %d",
@@ -402,12 +512,29 @@ retry: switch (mutex->wwm_state) {
WW_LOCKED(mutex);
ret = 0;
out: mutex_exit(&mutex->wwm_lock);
+ KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
return ret;
}
+/*
+ * ww_mutex_lock(mutex, ctx)
+ *
+ * Lock the mutex and return 0, or fail if impossible.
+ *
+ * - If ctx is null, caller must not hold mutex, and ww_mutex_lock
+ * always succeeds and returns 0.
+ *
+ * - If ctx is nonnull, then:
+ * . Fail with -EALREADY if caller already holds mutex.
+ * . Fail with -EDEADLK if someone else holds mutex but there is
+ * a cycle.
+ *
+ * May sleep.
+ */
int
ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
{
+ int ret;
/*
* We do not WW_WANTLOCK at the beginning because we may
@@ -419,7 +546,8 @@ ww_mutex_lock(struct ww_mutex *mutex, st
if (ctx == NULL) {
WW_WANTLOCK(mutex);
ww_mutex_lock_noctx(mutex);
- return 0;
+ ret = 0;
+ goto out;
}
KASSERTMSG((ctx->wwx_owner == curlwp),
@@ -466,8 +594,8 @@ retry: switch (mutex->wwm_state) {
* for objects whose locking order is determined by
* userland.
*/
- mutex_exit(&mutex->wwm_lock);
- return -EALREADY;
+ ret = -EALREADY;
+ goto out_unlock;
}
/*
@@ -484,8 +612,8 @@ retry: switch (mutex->wwm_state) {
KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
"ww mutex class mismatch: %p != %p",
ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
- mutex_exit(&mutex->wwm_lock);
- return -EDEADLK;
+ ret = -EDEADLK;
+ goto out_unlock;
}
/*
@@ -499,10 +627,31 @@ locked: KASSERT((mutex->wwm_state == WW_
KASSERT(mutex->wwm_u.ctx == ctx);
WW_LOCKED(mutex);
ctx->wwx_acquired++;
+ ret = 0;
+out_unlock:
mutex_exit(&mutex->wwm_lock);
- return 0;
+out: KASSERTMSG((ret == 0 || ret == -EALREADY || ret == -EDEADLK),
+ "ret=%d", ret);
+ return ret;
}
+/*
+ * ww_mutex_lock_interruptible(mutex, ctx)
+ *
+ * Lock the mutex and return 0, or fail if impossible or
+ * interrupted.
+ *
+ * - If ctx is null, caller must not hold mutex, and ww_mutex_lock
+ * always succeeds and returns 0.
+ *
+ * - If ctx is nonnull, then:
+ * . Fail with -EALREADY if caller already holds mutex.
+ * . Fail with -EDEADLK if someone else holds mutex but there is
+ * a cycle.
+ * . Fail with -EINTR if interrupted by a signal.
+ *
+ * May sleep.
+ */
int
ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
{
@@ -517,7 +666,9 @@ ww_mutex_lock_interruptible(struct ww_mu
if (ctx == NULL) {
WW_WANTLOCK(mutex);
- return ww_mutex_lock_noctx_sig(mutex);
+ ret = ww_mutex_lock_noctx_sig(mutex);
+ KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
+ goto out;
}
KASSERTMSG((ctx->wwx_owner == curlwp),
@@ -542,15 +693,19 @@ retry: switch (mutex->wwm_state) {
KASSERTMSG((mutex->wwm_u.owner != curlwp),
"locking %p against myself: %p", mutex, curlwp);
ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
- if (ret)
- goto out;
+ if (ret) {
+ KASSERTMSG(ret == -EINTR, "ret=%d", ret);
+ goto out_unlock;
+ }
goto retry;
case WW_CTX:
break;
case WW_WANTOWN:
ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
- if (ret)
- goto out;
+ if (ret) {
+ KASSERTMSG(ret == -EINTR, "ret=%d", ret);
+ goto out_unlock;
+ }
goto retry;
default:
panic("wait/wound mutex %p in bad state: %d",
@@ -568,8 +723,8 @@ retry: switch (mutex->wwm_state) {
* for objects whose locking order is determined by
* userland.
*/
- mutex_exit(&mutex->wwm_lock);
- return -EALREADY;
+ ret = -EALREADY;
+ goto out_unlock;
}
/*
@@ -586,8 +741,8 @@ retry: switch (mutex->wwm_state) {
KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
"ww mutex class mismatch: %p != %p",
ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
- mutex_exit(&mutex->wwm_lock);
- return -EDEADLK;
+ ret = -EDEADLK;
+ goto out_unlock;
}
/*
@@ -595,8 +750,10 @@ retry: switch (mutex->wwm_state) {
* when it is done or it realizes it needs to back off.
*/
ret = ww_mutex_lock_wait_sig(mutex, ctx);
- if (ret)
- goto out;
+ if (ret) {
+ KASSERTMSG(ret == -EINTR, "ret=%d", ret);
+ goto out_unlock;
+ }
locked: KASSERT((mutex->wwm_state == WW_CTX) ||
(mutex->wwm_state == WW_WANTOWN));
@@ -604,10 +761,25 @@ locked: KASSERT((mutex->wwm_state == WW_
WW_LOCKED(mutex);
ctx->wwx_acquired++;
ret = 0;
-out: mutex_exit(&mutex->wwm_lock);
+out_unlock:
+ mutex_exit(&mutex->wwm_lock);
+out: KASSERTMSG((ret == 0 || ret == -EALREADY || ret == -EDEADLK ||
+ ret == -EINTR), "ret=%d", ret);
return ret;
}
+/*
+ * ww_mutex_lock_slow(mutex, ctx)
+ *
+ * Slow path: After ww_mutex_lock* has failed with -EDEADLK, and
+ * after the caller has ditched all its locks, wait for the owner
+ * of mutex to relinquish mutex before the caller can start over
+ * acquiring locks again.
+ *
+ * Uninterruptible; never fails.
+ *
+ * May sleep.
+ */
void
ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
{
@@ -674,6 +846,17 @@ locked: KASSERT((mutex->wwm_state == WW_
mutex_exit(&mutex->wwm_lock);
}
+/*
+ * ww_mutex_lock_slow(mutex, ctx)
+ *
+ * Slow path: After ww_mutex_lock* has failed with -EDEADLK, and
+ * after the caller has ditched all its locks, wait for the owner
+ * of mutex to relinquish mutex before the caller can start over
+ * acquiring locks again, or fail with -EINTR if interrupted by a
+ * signal.
+ *
+ * May sleep.
+ */
int
ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
struct ww_acquire_ctx *ctx)
@@ -683,8 +866,11 @@ ww_mutex_lock_slow_interruptible(struct
WW_WANTLOCK(mutex);
ASSERT_SLEEPABLE();
- if (ctx == NULL)
- return ww_mutex_lock_noctx_sig(mutex);
+ if (ctx == NULL) {
+ ret = ww_mutex_lock_noctx_sig(mutex);
+ KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
+ goto out;
+ }
KASSERTMSG((ctx->wwx_owner == curlwp),
"ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
@@ -709,15 +895,19 @@ retry: switch (mutex->wwm_state) {
KASSERTMSG((mutex->wwm_u.owner != curlwp),
"locking %p against myself: %p", mutex, curlwp);
ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
- if (ret)
- goto out;
+ if (ret) {
+ KASSERTMSG(ret == -EINTR, "ret=%d", ret);
+ goto out_unlock;
+ }
goto retry;
case WW_CTX:
break;
case WW_WANTOWN:
ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
- if (ret)
- goto out;
+ if (ret) {
+ KASSERTMSG(ret == -EINTR, "ret=%d", ret);
+ goto out_unlock;
+ }
goto retry;
default:
panic("wait/wound mutex %p in bad state: %d",
@@ -734,8 +924,10 @@ retry: switch (mutex->wwm_state) {
* wake us when it's done.
*/
ret = ww_mutex_lock_wait_sig(mutex, ctx);
- if (ret)
- goto out;
+ if (ret) {
+ KASSERTMSG(ret == -EINTR, "ret=%d", ret);
+ goto out_unlock;
+ }
locked: KASSERT((mutex->wwm_state == WW_CTX) ||
(mutex->wwm_state == WW_WANTOWN));
@@ -743,10 +935,18 @@ locked: KASSERT((mutex->wwm_state == WW_
WW_LOCKED(mutex);
ctx->wwx_acquired++;
ret = 0;
-out: mutex_exit(&mutex->wwm_lock);
+out_unlock:
+ mutex_exit(&mutex->wwm_lock);
+out: KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
return ret;
}
+/*
+ * ww_mutex_trylock(mutex)
+ *
+ * Tro to acquire mutex and return 1, but if it can't be done
+ * immediately, return 0.
+ */
int
ww_mutex_trylock(struct ww_mutex *mutex)
{
@@ -776,6 +976,17 @@ ww_mutex_trylock(struct ww_mutex *mutex)
return ret;
}
+/*
+ * ww_mutex_unlock_release(mutex)
+ *
+ * Decrement the number of mutexes acquired in the current locking
+ * context of mutex, which must be held by the caller and in
+ * WW_CTX or WW_WANTOWN state, and clear the mutex's reference.
+ * Caller must hold the internal lock of mutex, and is responsible
+ * for notifying waiters.
+ *
+ * Internal subroutine.
+ */
static void
ww_mutex_unlock_release(struct ww_mutex *mutex)
{
@@ -793,6 +1004,11 @@ ww_mutex_unlock_release(struct ww_mutex
mutex->wwm_u.ctx = NULL;
}
+/*
+ * ww_mutex_unlock(mutex)
+ *
+ * Release mutex and wake the next caller waiting, if any.
+ */
void
ww_mutex_unlock(struct ww_mutex *mutex)
{