Module Name: src
Committed By: ad
Date: Sun Oct 8 13:23:05 UTC 2023
Modified Files:
src/sys/kern: kern_condvar.c kern_sleepq.c kern_timeout.c
kern_turnstile.c sys_lwp.c sys_select.c
src/sys/rump/librump/rumpkern: sleepq.c
src/sys/sys: sleepq.h syncobj.h
Log Message:
Ensure that an LWP that has taken a legitimate wakeup never produces an
error code from sleepq_block(). Then, it's possible to make cv_signal()
work as expected and only ever wake a singular LWP.
To generate a diff of this commit:
cvs rdiff -u -r1.57 -r1.58 src/sys/kern/kern_condvar.c
cvs rdiff -u -r1.81 -r1.82 src/sys/kern/kern_sleepq.c
cvs rdiff -u -r1.78 -r1.79 src/sys/kern/kern_timeout.c
cvs rdiff -u -r1.52 -r1.53 src/sys/kern/kern_turnstile.c
cvs rdiff -u -r1.86 -r1.87 src/sys/kern/sys_lwp.c
cvs rdiff -u -r1.63 -r1.64 src/sys/kern/sys_select.c
cvs rdiff -u -r1.26 -r1.27 src/sys/rump/librump/rumpkern/sleepq.c
cvs rdiff -u -r1.39 -r1.40 src/sys/sys/sleepq.h
cvs rdiff -u -r1.16 -r1.17 src/sys/sys/syncobj.h
Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.
Modified files:
Index: src/sys/kern/kern_condvar.c
diff -u src/sys/kern/kern_condvar.c:1.57 src/sys/kern/kern_condvar.c:1.58
--- src/sys/kern/kern_condvar.c:1.57 Wed Oct 4 20:29:18 2023
+++ src/sys/kern/kern_condvar.c Sun Oct 8 13:23:05 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_condvar.c,v 1.57 2023/10/04 20:29:18 ad Exp $ */
+/* $NetBSD: kern_condvar.c,v 1.58 2023/10/08 13:23:05 ad Exp $ */
/*-
* Copyright (c) 2006, 2007, 2008, 2019, 2020, 2023
@@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_condvar.c,v 1.57 2023/10/04 20:29:18 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_condvar.c,v 1.58 2023/10/08 13:23:05 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -478,27 +478,13 @@ cv_wakeup_one(kcondvar_t *cv)
kmutex_t *mp;
lwp_t *l;
- /*
- * Keep waking LWPs until a non-interruptable waiter is found. An
- * interruptable waiter could fail to do something useful with the
- * wakeup due to an error return from cv_[timed]wait_sig(), and the
- * caller of cv_signal() may not expect such a scenario.
- *
- * This isn't a problem for non-interruptable waits (untimed and
- * timed), because if such a waiter is woken here it will not return
- * an error.
- */
mp = sleepq_hashlock(cv);
sq = CV_SLEEPQ(cv);
- while ((l = LIST_FIRST(sq)) != NULL) {
+ if (__predict_true((l = LIST_FIRST(sq)) != NULL)) {
KASSERT(l->l_sleepq == sq);
KASSERT(l->l_mutex == mp);
KASSERT(l->l_wchan == cv);
- if ((l->l_flag & LW_SINTR) == 0) {
- sleepq_remove(sq, l);
- break;
- } else
- sleepq_remove(sq, l);
+ sleepq_remove(sq, l, true);
}
mutex_spin_exit(mp);
}
@@ -539,7 +525,7 @@ cv_wakeup_all(kcondvar_t *cv)
KASSERT(l->l_sleepq == sq);
KASSERT(l->l_mutex == mp);
KASSERT(l->l_wchan == cv);
- sleepq_remove(sq, l);
+ sleepq_remove(sq, l, true);
}
mutex_spin_exit(mp);
}
Index: src/sys/kern/kern_sleepq.c
diff -u src/sys/kern/kern_sleepq.c:1.81 src/sys/kern/kern_sleepq.c:1.82
--- src/sys/kern/kern_sleepq.c:1.81 Sun Oct 8 11:12:47 2023
+++ src/sys/kern/kern_sleepq.c Sun Oct 8 13:23:05 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_sleepq.c,v 1.81 2023/10/08 11:12:47 ad Exp $ */
+/* $NetBSD: kern_sleepq.c,v 1.82 2023/10/08 13:23:05 ad Exp $ */
/*-
* Copyright (c) 2006, 2007, 2008, 2009, 2019, 2020, 2023
@@ -36,7 +36,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.81 2023/10/08 11:12:47 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.82 2023/10/08 13:23:05 ad Exp $");
#include <sys/param.h>
#include <sys/kernel.h>
@@ -105,10 +105,17 @@ sleepq_init(sleepq_t *sq)
/*
* sleepq_remove:
*
- * Remove an LWP from a sleep queue and wake it up.
+ * Remove an LWP from a sleep queue and wake it up. Distinguish
+ * between deliberate wakeups (which are a valuable information) and
+ * "unsleep" (an out-of-band action must be taken).
+ *
+ * For wakeup, convert any interruptable wait into non-interruptable
+ * one before waking the LWP. Otherwise, if only one LWP is awoken it
+ * could fail to do something useful with the wakeup due to an error
+ * return and the caller of e.g. cv_signal() may not expect this.
*/
void
-sleepq_remove(sleepq_t *sq, lwp_t *l)
+sleepq_remove(sleepq_t *sq, lwp_t *l, bool wakeup)
{
struct schedstate_percpu *spc;
struct cpu_info *ci;
@@ -125,7 +132,7 @@ sleepq_remove(sleepq_t *sq, lwp_t *l)
l->l_syncobj = &sched_syncobj;
l->l_wchan = NULL;
l->l_sleepq = NULL;
- l->l_flag &= ~LW_SINTR;
+ l->l_flag &= wakeup ? ~(LW_SINTR|LW_CATCHINTR|LW_STIMO) : ~LW_SINTR;
ci = l->l_cpu;
spc = &ci->ci_schedstate;
@@ -409,7 +416,6 @@ sleepq_block(int timo, bool catch_p, syn
*/
flag = atomic_load_relaxed(&l->l_flag);
if (__predict_false((flag & mask) != 0)) {
- p = l->l_proc;
if ((flag & LW_CATCHINTR) == 0 && error != 0)
/* nothing */;
else if ((flag & (LW_CANCELLED | LW_WEXIT | LW_WCORE)) != 0)
@@ -422,6 +428,7 @@ sleepq_block(int timo, bool catch_p, syn
* on locks are non-interruptable and we will
* not recurse again.
*/
+ p = l->l_proc;
mutex_enter(p->p_lock);
if (((sig = sigispending(l, 0)) != 0 &&
(sigprop[sig] & SA_STOP) == 0) ||
@@ -456,7 +463,7 @@ sleepq_wake(sleepq_t *sq, wchan_t wchan,
next = LIST_NEXT(l, l_sleepchain);
if (l->l_wchan != wchan)
continue;
- sleepq_remove(sq, l);
+ sleepq_remove(sq, l, true);
if (--expected == 0)
break;
}
@@ -480,7 +487,7 @@ sleepq_unsleep(lwp_t *l, bool unlock)
KASSERT(lwp_locked(l, mp));
KASSERT(l->l_wchan != NULL);
- sleepq_remove(sq, l);
+ sleepq_remove(sq, l, false);
if (unlock) {
mutex_spin_exit(mp);
}
@@ -503,8 +510,12 @@ sleepq_timeout(void *arg)
*/
lwp_lock(l);
- if (l->l_wchan == NULL) {
- /* Somebody beat us to it. */
+ if (l->l_wchan == NULL || l->l_syncobj == &callout_syncobj) {
+ /*
+ * Somebody beat us to it, or the LWP is blocked in
+ * callout_halt() waiting for us to finish here. In
+ * neither case should the LWP produce EWOULDBLOCK.
+ */
lwp_unlock(l);
return;
}
Index: src/sys/kern/kern_timeout.c
diff -u src/sys/kern/kern_timeout.c:1.78 src/sys/kern/kern_timeout.c:1.79
--- src/sys/kern/kern_timeout.c:1.78 Wed Oct 4 20:29:18 2023
+++ src/sys/kern/kern_timeout.c Sun Oct 8 13:23:05 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_timeout.c,v 1.78 2023/10/04 20:29:18 ad Exp $ */
+/* $NetBSD: kern_timeout.c,v 1.79 2023/10/08 13:23:05 ad Exp $ */
/*-
* Copyright (c) 2003, 2006, 2007, 2008, 2009, 2019, 2023
@@ -60,7 +60,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.78 2023/10/04 20:29:18 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.79 2023/10/08 13:23:05 ad Exp $");
/*
* Timeouts are kept in a hierarchical timing wheel. The c_time is the
@@ -246,6 +246,16 @@ SDT_PROBE_DEFINE5(sdt, kernel, callout,
"unsigned"/*flags*/,
"bool"/*expired*/);
+syncobj_t callout_syncobj = {
+ .sobj_name = "callout",
+ .sobj_flag = SOBJ_SLEEPQ_SORTED,
+ .sobj_boostpri = PRI_KERNEL,
+ .sobj_unsleep = sleepq_unsleep,
+ .sobj_changepri = sleepq_changepri,
+ .sobj_lendpri = sleepq_lendpri,
+ .sobj_owner = syncobj_noowner,
+};
+
static inline kmutex_t *
callout_lock(callout_impl_t *c)
{
@@ -612,8 +622,8 @@ callout_wait(callout_impl_t *c, void *in
cc->cc_ev_block.ev_count++;
nlocks = sleepq_enter(&cc->cc_sleepq, l, cc->cc_lock);
sleepq_enqueue(&cc->cc_sleepq, cc, "callout",
- &sleep_syncobj, false);
- sleepq_block(0, false, &sleep_syncobj, nlocks);
+ &callout_syncobj, false);
+ sleepq_block(0, false, &callout_syncobj, nlocks);
}
/*
Index: src/sys/kern/kern_turnstile.c
diff -u src/sys/kern/kern_turnstile.c:1.52 src/sys/kern/kern_turnstile.c:1.53
--- src/sys/kern/kern_turnstile.c:1.52 Wed Oct 4 20:39:35 2023
+++ src/sys/kern/kern_turnstile.c Sun Oct 8 13:23:05 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_turnstile.c,v 1.52 2023/10/04 20:39:35 ad Exp $ */
+/* $NetBSD: kern_turnstile.c,v 1.53 2023/10/08 13:23:05 ad Exp $ */
/*-
* Copyright (c) 2002, 2006, 2007, 2009, 2019, 2020, 2023
@@ -61,7 +61,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.52 2023/10/04 20:39:35 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.53 2023/10/08 13:23:05 ad Exp $");
#include <sys/param.h>
#include <sys/lockdebug.h>
@@ -149,7 +149,7 @@ turnstile_remove(turnstile_t *ts, lwp_t
}
ts->ts_waiters[q]--;
- sleepq_remove(&ts->ts_sleepq[q], l);
+ sleepq_remove(&ts->ts_sleepq[q], l, true);
}
/*
Index: src/sys/kern/sys_lwp.c
diff -u src/sys/kern/sys_lwp.c:1.86 src/sys/kern/sys_lwp.c:1.87
--- src/sys/kern/sys_lwp.c:1.86 Wed Oct 4 20:29:18 2023
+++ src/sys/kern/sys_lwp.c Sun Oct 8 13:23:05 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: sys_lwp.c,v 1.86 2023/10/04 20:29:18 ad Exp $ */
+/* $NetBSD: sys_lwp.c,v 1.87 2023/10/08 13:23:05 ad Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007, 2008, 2019, 2020, 2023
@@ -36,7 +36,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.86 2023/10/04 20:29:18 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.87 2023/10/08 13:23:05 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -466,6 +466,7 @@ int
lwp_unpark(const lwpid_t *tp, const u_int ntargets)
{
u_int target;
+ kmutex_t *mp;
int error, s;
proc_t *p;
lwp_t *t;
@@ -484,11 +485,10 @@ lwp_unpark(const lwpid_t *tp, const u_in
KASSERT(lwp_locked(t, NULL));
if (__predict_true(t->l_syncobj == &lwp_park_syncobj)) {
- /*
- * As expected it's parked, so wake it up.
- * lwp_unsleep() will release the LWP lock.
- */
- lwp_unsleep(t, true);
+ /* As expected it's parked, so wake it up. */
+ mp = t->l_mutex;
+ sleepq_remove(NULL, t, true);
+ mutex_spin_exit(mp);
} else if (__predict_false(t->l_stat == LSZOMB)) {
lwp_unlock(t);
error = ESRCH;
Index: src/sys/kern/sys_select.c
diff -u src/sys/kern/sys_select.c:1.63 src/sys/kern/sys_select.c:1.64
--- src/sys/kern/sys_select.c:1.63 Wed Oct 4 20:29:18 2023
+++ src/sys/kern/sys_select.c Sun Oct 8 13:23:05 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: sys_select.c,v 1.63 2023/10/04 20:29:18 ad Exp $ */
+/* $NetBSD: sys_select.c,v 1.64 2023/10/08 13:23:05 ad Exp $ */
/*-
* Copyright (c) 2007, 2008, 2009, 2010, 2019, 2020, 2023
@@ -85,7 +85,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sys_select.c,v 1.63 2023/10/04 20:29:18 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sys_select.c,v 1.64 2023/10/08 13:23:05 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -807,7 +807,7 @@ selnotify(struct selinfo *sip, int event
*/
if (oflag == SEL_BLOCKING && l->l_mutex == lock) {
KASSERT(l->l_wchan == sc);
- sleepq_unsleep(l, false);
+ sleepq_remove(l->l_sleepq, l, true);
}
}
mutex_spin_exit(lock);
Index: src/sys/rump/librump/rumpkern/sleepq.c
diff -u src/sys/rump/librump/rumpkern/sleepq.c:1.26 src/sys/rump/librump/rumpkern/sleepq.c:1.27
--- src/sys/rump/librump/rumpkern/sleepq.c:1.26 Thu Oct 5 19:28:30 2023
+++ src/sys/rump/librump/rumpkern/sleepq.c Sun Oct 8 13:23:05 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: sleepq.c,v 1.26 2023/10/05 19:28:30 ad Exp $ */
+/* $NetBSD: sleepq.c,v 1.27 2023/10/08 13:23:05 ad Exp $ */
/*
* Copyright (c) 2008 Antti Kantee. All Rights Reserved.
@@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sleepq.c,v 1.26 2023/10/05 19:28:30 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sleepq.c,v 1.27 2023/10/08 13:23:05 ad Exp $");
#include <sys/param.h>
#include <sys/condvar.h>
@@ -146,6 +146,13 @@ sleepq_unsleep(struct lwp *l, bool clean
}
}
+void
+sleepq_remove(sleepq_t *sq, struct lwp *l, bool wakeup)
+{
+
+ sleepq_unsleep(l, true);
+}
+
/*
* Thread scheduler handles priorities. Therefore no action here.
* (maybe do something if we're deperate?)
Index: src/sys/sys/sleepq.h
diff -u src/sys/sys/sleepq.h:1.39 src/sys/sys/sleepq.h:1.40
--- src/sys/sys/sleepq.h:1.39 Wed Oct 4 20:29:18 2023
+++ src/sys/sys/sleepq.h Sun Oct 8 13:23:05 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: sleepq.h,v 1.39 2023/10/04 20:29:18 ad Exp $ */
+/* $NetBSD: sleepq.h,v 1.40 2023/10/08 13:23:05 ad Exp $ */
/*-
* Copyright (c) 2002, 2006, 2007, 2008, 2009, 2019, 2020, 2023
@@ -50,7 +50,7 @@ struct syncobj;
typedef struct sleepq sleepq_t;
void sleepq_init(sleepq_t *);
-void sleepq_remove(sleepq_t *, lwp_t *);
+void sleepq_remove(sleepq_t *, lwp_t *, bool);
int sleepq_enter(sleepq_t *, lwp_t *, kmutex_t *);
void sleepq_enqueue(sleepq_t *, wchan_t, const char *,
const struct syncobj *, bool);
Index: src/sys/sys/syncobj.h
diff -u src/sys/sys/syncobj.h:1.16 src/sys/sys/syncobj.h:1.17
--- src/sys/sys/syncobj.h:1.16 Sat Sep 23 18:48:05 2023
+++ src/sys/sys/syncobj.h Sun Oct 8 13:23:05 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: syncobj.h,v 1.16 2023/09/23 18:48:05 ad Exp $ */
+/* $NetBSD: syncobj.h,v 1.17 2023/10/08 13:23:05 ad Exp $ */
/*-
* Copyright (c) 2007, 2008, 2020, 2023 The NetBSD Foundation, Inc.
@@ -57,6 +57,7 @@ struct lwp *syncobj_noowner(wchan_t);
#define SOBJ_SLEEPQ_LIFO 0x04
#define SOBJ_SLEEPQ_NULL 0x08
+extern syncobj_t callout_syncobj;
extern syncobj_t cv_syncobj;
extern syncobj_t kpause_syncobj;
extern syncobj_t lwp_park_syncobj;