Author: mjg Date: Wed Nov 4 21:18:08 2020 New Revision: 367341 URL: https://svnweb.freebsd.org/changeset/base/367341
Log: rms: fixup concurrent writer handling and add more features Previously the code had one wait channel for all pending writers. This could result in a buggy scenario where after a writer switches the lock mode form readers to writers goes off CPU, another writer queues itself and then the last reader wakes up the latter instead of the former. Use a separate channel. While here add features to reliably detect whether curthread has the lock write-owned. This will be used by ZFS. Modified: head/sys/kern/kern_rmlock.c head/sys/sys/_rmlock.h head/sys/sys/rmlock.h Modified: head/sys/kern/kern_rmlock.c ============================================================================== --- head/sys/kern/kern_rmlock.c Wed Nov 4 20:15:14 2020 (r367340) +++ head/sys/kern/kern_rmlock.c Wed Nov 4 21:18:08 2020 (r367341) @@ -878,10 +878,15 @@ db_show_rm(const struct lock_object *lock) * problem at some point. The easiest way to lessen it is to provide a bitmap. */ +#define RMS_NOOWNER ((void *)0x1) +#define RMS_TRANSIENT ((void *)0x2) +#define RMS_FLAGMASK 0xf + void rms_init(struct rmslock *rms, const char *name) { + rms->owner = RMS_NOOWNER; rms->writers = 0; rms->readers = 0; mtx_init(&rms->mtx, name, NULL, MTX_DEF | MTX_NEW); @@ -922,6 +927,7 @@ rms_rlock(struct rmslock *rms) { WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); + MPASS(atomic_load_ptr(&rms->owner) != curthread); critical_enter(); zpcpu_set_protected(rms->readers_influx, 1); @@ -941,6 +947,8 @@ int rms_try_rlock(struct rmslock *rms) { + MPASS(atomic_load_ptr(&rms->owner) != curthread); + critical_enter(); zpcpu_set_protected(rms->readers_influx, 1); __compiler_membar(); @@ -1054,23 +1062,33 @@ rms_wlock(struct rmslock *rms) { WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); + MPASS(atomic_load_ptr(&rms->owner) != curthread); mtx_lock(&rms->mtx); rms->writers++; if (rms->writers > 1) { - msleep(&rms->writers, &rms->mtx, (PUSER - 1) | PDROP, + msleep(&rms->owner, &rms->mtx, (PUSER - 1), mtx_name(&rms->mtx), 0); MPASS(rms->readers == 0); - return; + KASSERT(rms->owner == RMS_TRANSIENT, + ("%s: unexpected owner value %p\n", __func__, + rms->owner)); + goto out_grab; } + KASSERT(rms->owner == RMS_NOOWNER, + ("%s: unexpected owner value %p\n", __func__, rms->owner)); + rms_wlock_switch(rms); - if (rms->readers > 0) - msleep(&rms->writers, &rms->mtx, (PUSER - 1) | PDROP, + if (rms->readers > 0) { + msleep(&rms->writers, &rms->mtx, (PUSER - 1), mtx_name(&rms->mtx), 0); - else - mtx_unlock(&rms->mtx); + } + +out_grab: + rms->owner = curthread; + mtx_unlock(&rms->mtx); MPASS(rms->readers == 0); } @@ -1079,12 +1097,27 @@ rms_wunlock(struct rmslock *rms) { mtx_lock(&rms->mtx); + KASSERT(rms->owner == curthread, + ("%s: unexpected owner value %p\n", __func__, rms->owner)); MPASS(rms->writers >= 1); MPASS(rms->readers == 0); rms->writers--; - if (rms->writers > 0) - wakeup_one(&rms->writers); - else + if (rms->writers > 0) { + wakeup_one(&rms->owner); + rms->owner = RMS_TRANSIENT; + } else { wakeup(&rms->readers); + rms->owner = RMS_NOOWNER; + } mtx_unlock(&rms->mtx); +} + +void +rms_unlock(struct rmslock *rms) +{ + + if (rms_wowned(rms)) + rms_wunlock(rms); + else + rms_runlock(rms); } Modified: head/sys/sys/_rmlock.h ============================================================================== --- head/sys/sys/_rmlock.h Wed Nov 4 20:15:14 2020 (r367340) +++ head/sys/sys/_rmlock.h Wed Nov 4 21:18:08 2020 (r367341) @@ -72,6 +72,7 @@ struct rm_priotracker { struct rmslock { struct mtx mtx; + struct thread *owner; int writers; int readers; int *readers_pcpu; Modified: head/sys/sys/rmlock.h ============================================================================== --- head/sys/sys/rmlock.h Wed Nov 4 20:15:14 2020 (r367340) +++ head/sys/sys/rmlock.h Wed Nov 4 21:18:08 2020 (r367341) @@ -140,16 +140,33 @@ int rms_try_rlock(struct rmslock *rms); void rms_runlock(struct rmslock *rms); void rms_wlock(struct rmslock *rms); void rms_wunlock(struct rmslock *rms); +void rms_unlock(struct rmslock *rms); +static inline int +rms_wowned(struct rmslock *rms) +{ + + return (rms->owner == curthread); +} + /* - * Writers are not explicitly tracked, thus until that changes the best we can - * do is indicate the lock is taken for writing by *someone*. + * Only valid to call if you hold the lock in some manner. */ static inline int -rms_wowned(struct rmslock *rms) +rms_rowned(struct rmslock *rms) { - return (rms->writers > 0); + return (rms->readers > 0); +} + +static inline int +rms_owned_any(struct rmslock *rms) +{ + + if (rms_wowned(rms)) + return (1); + + return (rms_rowned(rms)); } #endif /* _KERNEL */ _______________________________________________ svn-src-head@freebsd.org mailing list https://lists.freebsd.org/mailman/listinfo/svn-src-head To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"