Module Name: src Committed By: riastradh Date: Sat Apr 9 23:45:37 UTC 2022
Modified Files: src/sys/kern: kern_lwp.c kern_turnstile.c src/sys/sys: lwp.h Log Message: kern: Handle l_mutex with atomic_store_release, atomic_load_consume. - Where the lock is held and known to be correct, no atomic. - In loops to acquire the lock, use atomic_load_relaxed before we restart with atomic_load_consume. Nix membar_exit. (Who knows, using atomic_load_consume here might fix bugs on Alpha!) To generate a diff of this commit: cvs rdiff -u -r1.247 -r1.248 src/sys/kern/kern_lwp.c cvs rdiff -u -r1.41 -r1.42 src/sys/kern/kern_turnstile.c cvs rdiff -u -r1.214 -r1.215 src/sys/sys/lwp.h Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/kern/kern_lwp.c diff -u src/sys/kern/kern_lwp.c:1.247 src/sys/kern/kern_lwp.c:1.248 --- src/sys/kern/kern_lwp.c:1.247 Thu Mar 10 12:21:25 2022 +++ src/sys/kern/kern_lwp.c Sat Apr 9 23:45:36 2022 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_lwp.c,v 1.247 2022/03/10 12:21:25 riastradh Exp $ */ +/* $NetBSD: kern_lwp.c,v 1.248 2022/04/09 23:45:36 riastradh Exp $ */ /*- * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019, 2020 @@ -217,7 +217,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.247 2022/03/10 12:21:25 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.248 2022/04/09 23:45:36 riastradh Exp $"); #include "opt_ddb.h" #include "opt_lockdebug.h" @@ -1565,8 +1565,7 @@ lwp_setlock(struct lwp *l, kmutex_t *mtx KASSERT(mutex_owned(oldmtx)); - membar_exit(); - l->l_mutex = mtx; + atomic_store_release(&l->l_mutex, mtx); return oldmtx; } @@ -1582,8 +1581,7 @@ lwp_unlock_to(struct lwp *l, kmutex_t *m KASSERT(lwp_locked(l, NULL)); old = l->l_mutex; - membar_exit(); - l->l_mutex = mtx; + atomic_store_release(&l->l_mutex, mtx); mutex_spin_exit(old); } @@ -1593,9 +1591,9 @@ lwp_trylock(struct lwp *l) kmutex_t *old; for (;;) { - if (!mutex_tryenter(old = l->l_mutex)) + if (!mutex_tryenter(old = atomic_load_consume(&l->l_mutex))) return 0; - if (__predict_true(l->l_mutex == old)) + if (__predict_true(atomic_load_relaxed(&l->l_mutex) == old)) return 1; mutex_spin_exit(old); } Index: src/sys/kern/kern_turnstile.c diff -u src/sys/kern/kern_turnstile.c:1.41 src/sys/kern/kern_turnstile.c:1.42 --- src/sys/kern/kern_turnstile.c:1.41 Wed Feb 23 21:54:41 2022 +++ src/sys/kern/kern_turnstile.c Sat Apr 9 23:45:36 2022 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_turnstile.c,v 1.41 2022/02/23 21:54:41 andvar Exp $ */ +/* $NetBSD: kern_turnstile.c,v 1.42 2022/04/09 23:45:36 riastradh Exp $ */ /*- * Copyright (c) 2002, 2006, 2007, 2009, 2019, 2020 @@ -61,7 +61,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.41 2022/02/23 21:54:41 andvar Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.42 2022/04/09 23:45:36 riastradh Exp $"); #include <sys/param.h> #include <sys/lockdebug.h> @@ -252,7 +252,7 @@ turnstile_lendpri(lwp_t *cur) * Because we already have another LWP lock (l->l_mutex) held, * we need to play a try lock dance to avoid deadlock. */ - dolock = l->l_mutex != owner->l_mutex; + dolock = l->l_mutex != atomic_load_relaxed(&owner->l_mutex); if (l == owner || (dolock && !lwp_trylock(owner))) { /* * The owner was changed behind us or trylock failed. @@ -299,7 +299,7 @@ turnstile_lendpri(lwp_t *cur) l = owner; } LOCKDEBUG_BARRIER(l->l_mutex, 1); - if (cur->l_mutex != l->l_mutex) { + if (cur->l_mutex != atomic_load_relaxed(&l->l_mutex)) { lwp_unlock(l); lwp_lock(cur); } @@ -322,7 +322,8 @@ turnstile_unlendpri(turnstile_t *ts) KASSERT(ts->ts_inheritor != NULL); ts->ts_inheritor = NULL; - dolock = l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock; + dolock = (atomic_load_relaxed(&l->l_mutex) == + l->l_cpu->ci_schedstate.spc_lwplock); if (dolock) { lwp_lock(l); } Index: src/sys/sys/lwp.h diff -u src/sys/sys/lwp.h:1.214 src/sys/sys/lwp.h:1.215 --- src/sys/sys/lwp.h:1.214 Sat Apr 9 13:38:15 2022 +++ src/sys/sys/lwp.h Sat Apr 9 23:45:37 2022 @@ -1,4 +1,4 @@ -/* $NetBSD: lwp.h,v 1.214 2022/04/09 13:38:15 riastradh Exp $ */ +/* $NetBSD: lwp.h,v 1.215 2022/04/09 23:45:37 riastradh Exp $ */ /* * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010, 2019, 2020 @@ -53,6 +53,7 @@ struct lwp; /* forward declare this for <machine/cpu.h> so it can get l_cpu. */ static __inline struct cpu_info *lwp_getcpu(struct lwp *); #include <machine/cpu.h> /* curcpu() and cpu_info */ +#include <sys/atomic.h> #ifdef _KERNEL_OPT #include "opt_kcov.h" #include "opt_kmsan.h" @@ -407,16 +408,16 @@ void lwp_whatis(uintptr_t, void (*)(cons static __inline void lwp_lock(lwp_t *l) { - kmutex_t *old = l->l_mutex; + kmutex_t *old = atomic_load_consume(&l->l_mutex); /* * Note: mutex_spin_enter() will have posted a read barrier. * Re-test l->l_mutex. If it has changed, we need to try again. */ mutex_spin_enter(old); - while (__predict_false(l->l_mutex != old)) { + while (__predict_false(atomic_load_relaxed(&l->l_mutex) != old)) { mutex_spin_exit(old); - old = l->l_mutex; + old = atomic_load_consume(&l->l_mutex); mutex_spin_enter(old); } }