> Date: Mon, 23 Jan 2023 13:29:07 +0000 > From: Taylor R Campbell <campbell+netbsd-tech-k...@mumble.net> > > The attached patch series largely eliminates condiitonals on > __HAVE_ATOMIC_AS_MEMBAR by introducing wrappers > membar_release_preatomic and membar_acquire_postatomic with the > obvious definitions.
Sorry, wrong arguments to git format-patch. Let's try that again! (I was very careful to make sure I actually attached the file, so of course the file had the wrong contents -- we couldn't have me successfully sending the right patch the first time around, now, could we?)
>From dfdd56f129263128de6abba5220409fcddaca975 Mon Sep 17 00:00:00 2001 From: Taylor R Campbell <riastr...@netbsd.org> Date: Mon, 23 Jan 2023 12:48:58 +0000 Subject: [PATCH 1/6] membar_ops(3): New membar_release_preatomic, membar_acquire_postatomic. These should replace __HAVE_ATOMIC_AS_MEMBAR conditionals. --- lib/libc/atomic/membar_ops.3 | 47 ++++++++++++++++++++++++++++-------- sys/sys/atomic.h | 8 ++++++ 2 files changed, 45 insertions(+), 10 deletions(-) diff --git a/lib/libc/atomic/membar_ops.3 b/lib/libc/atomic/membar_ops.3 index b9935c7104d6..e839b9bb3b3d 100644 --- a/lib/libc/atomic/membar_ops.3 +++ b/lib/libc/atomic/membar_ops.3 @@ -32,8 +32,10 @@ .Os .Sh NAME .Nm membar_ops , -.Nm membar_acquire , .Nm membar_release , +.Nm membar_release_preatomic , +.Nm membar_acquire , +.Nm membar_acquire_postatomic , .Nm membar_producer , .Nm membar_consumer , .Nm membar_datadep_consumer , @@ -45,9 +47,13 @@ .In sys/atomic.h .\" .Ft void +.Fn membar_release "void" +.Ft void +.Fn membar_release_preatomic "void" +.Ft void .Fn membar_acquire "void" .Ft void -.Fn membar_release "void" +.Fn membar_acquire_postatomic "void" .Ft void .Fn membar_producer "void" .Ft void @@ -92,18 +98,28 @@ not just C11 atomic operations on .Vt _Atomic\^ Ns -qualified objects. .Bl -tag -width abcd -.It Fn membar_acquire +.It Fn membar_acquire , Fn membar_acquire_postatomic Any load preceding .Fn membar_acquire will happen before all memory operations following it. +Any load as part of an atomic read/modify/write preceding +.Fn membar_acquire_postatomic +will happen before all memory operations following it. +.Pp +.Fn membar_acquire_postatomic +may be cheaper than +.Fn membar_acquire +on machines where atomic read/modify/write operations imply the +ordering anyway, such as x86. .Pp A load followed by a .Fn membar_acquire implies a .Em load-acquire operation in the language of C11. -.Fn membar_acquire -should only be used after atomic read/modify/write, such as +Normally you should only use +.Fn membar_acquire_postatomic +after an atomic read/modify/write, such as .Xr atomic_cas_uint 3 . For regular loads, instead of .Li "x = *p; membar_acquire()" , @@ -115,18 +131,29 @@ is typically used in code that implements locking primitives to ensure that a lock protects its data, and is typically paired with .Fn membar_release ; see below for an example. -.It Fn membar_release +.It Fn membar_release , Fn membar_release_preatomic All memory operations preceding .Fn membar_release will happen before any store that follows it. +All memory operations preceding +.Fn membar_release_preatomic +will happen before any store as part of an atomic read/modify/write +following it. +.Pp +.Fn membar_release_preatomic +is cheaper than +.Fn membar_release +on machines where atomic read/modify/write operations imply the +ordering anyway, such as x86. .Pp A .Fn membar_release followed by a store implies a .Em store-release operation in the language of C11. -.Fn membar_release -should only be used before atomic read/modify/write, such as +Normally you should only use +.Fn membar_release_preatomic +before atomic read/modify/write, such as .Xr atomic_inc_uint 3 . For regular stores, instead of .Li "membar_release(); *p = x" , @@ -150,7 +177,7 @@ For example: /* thread A -- release a reference */ obj->state.mumblefrotz = 42; KASSERT(valid(&obj->state)); -membar_release(); +membar_release_preatomic(); atomic_dec_uint(&obj->refcnt); /* @@ -159,7 +186,7 @@ atomic_dec_uint(&obj->refcnt); */ while (atomic_cas_uint(&obj->refcnt, 0, -1) != 0) continue; -membar_acquire(); +membar_acquire_postatomic(); KASSERT(valid(&obj->state)); obj->state.mumblefrotz--; .Ed diff --git a/sys/sys/atomic.h b/sys/sys/atomic.h index 6c8af6d531e3..f935d27ff772 100644 --- a/sys/sys/atomic.h +++ b/sys/sys/atomic.h @@ -187,6 +187,14 @@ void membar_producer(void); void membar_consumer(void); void membar_sync(void); +#ifdef __HAVE_ATOMIC_AS_MEMBAR +#define membar_release_preatomic() membar_release() +#define membar_acquire_postatomic() membar_acquire() +#else +#define membar_release_preatomic() __nothing +#define membar_acquire_postatomic() __nothing +#endif + /* * Deprecated memory barriers */ >From e991599c6b6d6979e02d6da48f06207a3e4715c2 Mon Sep 17 00:00:00 2001 From: Taylor R Campbell <riastr...@netbsd.org> Date: Mon, 23 Jan 2023 13:14:59 +0000 Subject: [PATCH 2/6] drm: Eliminate __HAVE_ATOMIC_AS_MEMBAR conditionals. --- sys/external/bsd/common/linux/linux_tasklet.c | 24 +++++-------------- sys/external/bsd/drm2/include/linux/kref.h | 24 +++++-------------- 2 files changed, 12 insertions(+), 36 deletions(-) diff --git a/sys/external/bsd/common/linux/linux_tasklet.c b/sys/external/bsd/common/linux/linux_tasklet.c index 310ce3469d5c..8d086b1acd9a 100644 --- a/sys/external/bsd/common/linux/linux_tasklet.c +++ b/sys/external/bsd/common/linux/linux_tasklet.c @@ -395,9 +395,7 @@ tasklet_disable_nosync(struct tasklet_struct *tasklet) KASSERT(disablecount != 0); /* Pairs with membar_release in __tasklet_enable. */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); } /* @@ -515,9 +513,7 @@ tasklet_trylock(struct tasklet_struct *tasklet) state | TASKLET_RUNNING) != state); /* Pairs with membar_release in tasklet_unlock. */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); return true; } @@ -539,9 +535,7 @@ tasklet_unlock(struct tasklet_struct *tasklet) * Pairs with membar_acquire in tasklet_trylock and with * atomic_load_acquire in tasklet_unlock_wait. */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); atomic_and_uint(&tasklet->tl_state, ~TASKLET_RUNNING); } @@ -590,9 +584,7 @@ __tasklet_disable_sync_once(struct tasklet_struct *tasklet) KASSERT(disablecount != 0); /* Pairs with membar_release in __tasklet_enable_sync_once. */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); /* * If it was zero, wait for it to finish running. If it was @@ -614,9 +606,7 @@ __tasklet_enable_sync_once(struct tasklet_struct *tasklet) unsigned int disablecount; /* Pairs with membar_acquire in __tasklet_disable_sync_once. */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); /* Decrement the disable count. */ disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount); @@ -683,9 +673,7 @@ __tasklet_enable(struct tasklet_struct *tasklet) * Pairs with atomic_load_acquire in tasklet_softintr and with * membar_acquire in tasklet_disable. */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); /* Decrement the disable count. */ disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount); diff --git a/sys/external/bsd/drm2/include/linux/kref.h b/sys/external/bsd/drm2/include/linux/kref.h index f9d019143dfc..79477ccd6ad5 100644 --- a/sys/external/bsd/drm2/include/linux/kref.h +++ b/sys/external/bsd/drm2/include/linux/kref.h @@ -80,9 +80,7 @@ kref_sub(struct kref *kref, unsigned int count, void (*release)(struct kref *)) { unsigned int old, new; -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); do { old = atomic_load_relaxed(&kref->kr_count); @@ -92,9 +90,7 @@ kref_sub(struct kref *kref, unsigned int count, void (*release)(struct kref *)) } while (atomic_cas_uint(&kref->kr_count, old, new) != old); if (new == 0) { -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); (*release)(kref); return 1; } @@ -108,9 +104,7 @@ kref_put_lock(struct kref *kref, void (*release)(struct kref *), { unsigned int old, new; -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); do { old = atomic_load_relaxed(&kref->kr_count); @@ -118,9 +112,7 @@ kref_put_lock(struct kref *kref, void (*release)(struct kref *), if (old == 1) { spin_lock(interlock); if (atomic_add_int_nv(&kref->kr_count, -1) == 0) { -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); (*release)(kref); return 1; } @@ -146,9 +138,7 @@ kref_put_mutex(struct kref *kref, void (*release)(struct kref *), { unsigned int old, new; -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); do { old = atomic_load_relaxed(&kref->kr_count); @@ -156,9 +146,7 @@ kref_put_mutex(struct kref *kref, void (*release)(struct kref *), if (old == 1) { mutex_lock(interlock); if (atomic_add_int_nv(&kref->kr_count, -1) == 0) { -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); (*release)(kref); return 1; } >From 9c0911ca99f304a24815fd4a00467ec11b0345aa Mon Sep 17 00:00:00 2001 From: Taylor R Campbell <riastr...@netbsd.org> Date: Mon, 23 Jan 2023 13:16:52 +0000 Subject: [PATCH 3/6] kern: Eliminate most __HAVE_ATOMIC_AS_MEMBAR conditionals. - Replace #ifdef __HAVE_ATOMIC_AS_MEMBAR membar_release/acquire #endif by membar_release_preatomic/acquire_postatomic. - Leave comments on all the handful that remain explaining what I think it should be instead, or where I am confused and this is probably wrong but I'm not even sure what the code is trying to go for. --- sys/kern/kern_auth.c | 8 ++------ sys/kern/kern_descrip.c | 30 ++++++++---------------------- sys/kern/kern_lock.c | 2 +- sys/kern/kern_mutex.c | 8 +++----- sys/kern/kern_mutex_obj.c | 8 ++------ sys/kern/kern_rwlock.c | 11 +++++++---- sys/kern/kern_rwlock_obj.c | 8 ++------ sys/kern/subr_copy.c | 4 +--- sys/kern/subr_ipi.c | 24 ++++++------------------ sys/kern/subr_pcq.c | 3 ++- sys/kern/subr_pool.c | 8 +------- sys/kern/sys_futex.c | 8 ++------ sys/kern/uipc_mbuf.c | 8 ++------ sys/kern/vfs_mount.c | 8 ++------ sys/kern/vfs_vnode.c | 24 ++++++------------------ 15 files changed, 47 insertions(+), 115 deletions(-) diff --git a/sys/kern/kern_auth.c b/sys/kern/kern_auth.c index 08a132ca5a2b..1a4212039635 100644 --- a/sys/kern/kern_auth.c +++ b/sys/kern/kern_auth.c @@ -144,14 +144,10 @@ kauth_cred_free(kauth_cred_t cred) KASSERT(cred->cr_refcnt > 0); ASSERT_SLEEPABLE(); -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); if (atomic_dec_uint_nv(&cred->cr_refcnt) > 0) return; -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); kauth_cred_hook(cred, KAUTH_CRED_FREE, NULL, NULL); specificdata_fini(kauth_domain, &cred->cr_sd); diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c index a1fa3bcdbdeb..29abf4ed4a23 100644 --- a/sys/kern/kern_descrip.c +++ b/sys/kern/kern_descrip.c @@ -398,7 +398,7 @@ fd_getfile(unsigned fd) */ atomic_inc_uint(&ff->ff_refcnt); #ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_enter(); + membar_enter(); /* XXX ??? */ #endif } @@ -453,9 +453,7 @@ fd_putfile(unsigned fd) * the file after it has been freed or recycled by another * CPU. */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); /* * Be optimistic and start out with the assumption that no other @@ -605,9 +603,7 @@ fd_close(unsigned fd) * waiting for other users of the file to drain. Release * our reference, and wake up the closer. */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); atomic_dec_uint(&ff->ff_refcnt); cv_broadcast(&ff->ff_closing); mutex_exit(&fdp->fd_lock); @@ -642,9 +638,7 @@ fd_close(unsigned fd) refcnt = --(ff->ff_refcnt); } else { /* Multi threaded. */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); refcnt = atomic_dec_uint_nv(&ff->ff_refcnt); } if (__predict_false(refcnt != 0)) { @@ -689,13 +683,9 @@ fd_close(unsigned fd) cv_wait(&ff->ff_closing, &fdp->fd_lock); } atomic_and_uint(&ff->ff_refcnt, ~FR_CLOSING); -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); } else { -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); /* If no references, there must be no knotes. */ KASSERT(SLIST_EMPTY(&ff->ff_knlist)); } @@ -1543,14 +1533,10 @@ fd_free(void) KASSERT(fdp->fd_dtbuiltin.dt_nfiles == NDFILE); KASSERT(fdp->fd_dtbuiltin.dt_link == NULL); -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); if (atomic_dec_uint_nv(&fdp->fd_refcnt) > 0) return; -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); /* * Close any files that the process holds open. diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index 65eb42367820..065c24ecaaf2 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -306,7 +306,7 @@ _kernel_lock(int nlocks) * to occur before our store to ci_biglock_wanted above. */ #ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_enter(); + membar_enter(); /* XXX ??? */ #endif #ifdef KERNEL_LOCK_BLAME diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index ec2077fc777c..313fbc67eb94 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -167,13 +167,11 @@ do { \ */ #ifdef __HAVE_ATOMIC_AS_MEMBAR #define MUTEX_MEMBAR_ENTER() -#define MUTEX_MEMBAR_ACQUIRE() -#define MUTEX_MEMBAR_RELEASE() #else #define MUTEX_MEMBAR_ENTER() membar_enter() -#define MUTEX_MEMBAR_ACQUIRE() membar_acquire() -#define MUTEX_MEMBAR_RELEASE() membar_release() #endif +#define MUTEX_MEMBAR_ACQUIRE() membar_acquire_postatomic() +#define MUTEX_MEMBAR_RELEASE() membar_release_preatomic() /* * For architectures that provide 'simple' mutexes: they provide a @@ -247,7 +245,7 @@ MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner) { int rv; rv = MUTEX_CAS(&mtx->mtx_owner, owner, owner | MUTEX_BIT_WAITERS); - MUTEX_MEMBAR_ENTER(); + MUTEX_MEMBAR_ENTER(); /* XXX ??? */ return rv; } diff --git a/sys/kern/kern_mutex_obj.c b/sys/kern/kern_mutex_obj.c index 0fb2a656f14d..7782551ea250 100644 --- a/sys/kern/kern_mutex_obj.c +++ b/sys/kern/kern_mutex_obj.c @@ -155,15 +155,11 @@ mutex_obj_free(kmutex_t *lock) "%s: lock %p: mo->mo_refcnt (%#x) == 0", __func__, mo, mo->mo_refcnt); -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); if (atomic_dec_uint_nv(&mo->mo_refcnt) > 0) { return false; } -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); mutex_destroy(&mo->mo_lock); pool_cache_put(mutex_obj_cache, mo); return true; diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c index b45bdaec7103..d6fbcd2cc0a9 100644 --- a/sys/kern/kern_rwlock.c +++ b/sys/kern/kern_rwlock.c @@ -100,13 +100,11 @@ do { \ /* * Memory barriers. */ +#define RW_MEMBAR_ACQUIRE() membar_acquire_postatomic() +#define RW_MEMBAR_RELEASE() membar_release_preatomic() #ifdef __HAVE_ATOMIC_AS_MEMBAR -#define RW_MEMBAR_ACQUIRE() -#define RW_MEMBAR_RELEASE() #define RW_MEMBAR_PRODUCER() #else -#define RW_MEMBAR_ACQUIRE() membar_acquire() -#define RW_MEMBAR_RELEASE() membar_release() #define RW_MEMBAR_PRODUCER() membar_producer() #endif @@ -614,6 +612,7 @@ rw_downgrade(krwlock_t *rw) __USE(curthread); #endif + /* XXX pretty sure this should be membar_release */ RW_MEMBAR_PRODUCER(); for (owner = rw->rw_owner;; owner = next) { @@ -713,6 +712,10 @@ rw_tryupgrade(krwlock_t *rw) newown = curthread | RW_WRITE_LOCKED | (owner & ~RW_THREAD); next = rw_cas(rw, owner, newown); if (__predict_true(next == owner)) { + /* + * XXX pretty sure either this is unnecessary + * or it should be membar_acquire + */ RW_MEMBAR_PRODUCER(); break; } diff --git a/sys/kern/kern_rwlock_obj.c b/sys/kern/kern_rwlock_obj.c index 71dda4ef90de..d54652ba1f9d 100644 --- a/sys/kern/kern_rwlock_obj.c +++ b/sys/kern/kern_rwlock_obj.c @@ -145,15 +145,11 @@ rw_obj_free(krwlock_t *lock) KASSERT(ro->ro_magic == RW_OBJ_MAGIC); KASSERT(ro->ro_refcnt > 0); -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); if (atomic_dec_uint_nv(&ro->ro_refcnt) > 0) { return false; } -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); rw_destroy(&ro->ro_lock); pool_cache_put(rw_obj_cache, ro); return true; diff --git a/sys/kern/subr_copy.c b/sys/kern/subr_copy.c index cf5768c29f64..fe7a22d9d6fe 100644 --- a/sys/kern/subr_copy.c +++ b/sys/kern/subr_copy.c @@ -412,9 +412,7 @@ ucas_critical_cpu_gate(void *arg __unused) * Matches atomic_load_acquire in ucas_critical_wait -- turns * the following atomic_dec_uint into a store-release. */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); atomic_dec_uint(&ucas_critical_pausing_cpus); /* diff --git a/sys/kern/subr_ipi.c b/sys/kern/subr_ipi.c index 540a4716df2d..13b993f5a335 100644 --- a/sys/kern/subr_ipi.c +++ b/sys/kern/subr_ipi.c @@ -189,9 +189,7 @@ ipi_mark_pending(u_int ipi_id, struct cpu_info *ci) /* Mark as pending and return true if not previously marked. */ if ((atomic_load_acquire(&ci->ci_ipipend[i]) & bitm) == 0) { -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); atomic_or_32(&ci->ci_ipipend[i], bitm); return true; } @@ -303,9 +301,7 @@ ipi_cpu_handler(void) continue; } pending = atomic_swap_32(&ci->ci_ipipend[i], 0); -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); while ((bit = ffs(pending)) != 0) { const u_int ipi_id = (i << IPI_BITW_SHIFT) | --bit; ipi_intr_t *ipi_hdl = &ipi_intrs[ipi_id]; @@ -341,9 +337,7 @@ ipi_msg_cpu_handler(void *arg __unused) msg->func(msg->arg); /* Ack the request. */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); atomic_dec_uint(&msg->_pending); } } @@ -364,9 +358,7 @@ ipi_unicast(ipi_msg_t *msg, struct cpu_info *ci) KASSERT(curcpu() != ci); msg->_pending = 1; -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); put_msg(&ipi_mboxes[id], msg); ipi_trigger(IPI_SYNCH_ID, ci); @@ -390,9 +382,7 @@ ipi_multicast(ipi_msg_t *msg, const kcpuset_t *target) local = !!kcpuset_isset(target, cpu_index(self)); msg->_pending = kcpuset_countset(target) - local; -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); for (CPU_INFO_FOREACH(cii, ci)) { cpuid_t id; @@ -428,9 +418,7 @@ ipi_broadcast(ipi_msg_t *msg, bool skip_self) KASSERT(kpreempt_disabled()); msg->_pending = ncpu - 1; -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); /* Broadcast IPIs for remote CPUs. */ for (CPU_INFO_FOREACH(cii, ci)) { diff --git a/sys/kern/subr_pcq.c b/sys/kern/subr_pcq.c index e55d60e5f0ed..f53c183c9282 100644 --- a/sys/kern/subr_pcq.c +++ b/sys/kern/subr_pcq.c @@ -117,6 +117,7 @@ pcq_put(pcq_t *pcq, void *item) * before we put it onto the list. */ #ifndef __HAVE_ATOMIC_AS_MEMBAR + /* XXX pretty sure this should be membar_release */ membar_producer(); #endif pcq->pcq_items[op] = item; @@ -185,7 +186,7 @@ pcq_get(pcq_t *pcq) * to pcq_items[] by pcq_put(). */ #ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_producer(); + membar_producer(); /* XXX ??? */ #endif while (__predict_false(atomic_cas_32(&pcq->pcq_pc, v, nv) != v)) { v = pcq->pcq_pc; diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c index 435077c9b6d3..0e889adb456e 100644 --- a/sys/kern/subr_pool.c +++ b/sys/kern/subr_pool.c @@ -2558,9 +2558,7 @@ pool_pcg_get(pcg_t *volatile *head, pcg_t **pcgp) n = atomic_cas_ptr(head, o, __UNCONST(&pcg_dummy)); if (o == n) { /* Fetch pointer to next item and then unlock. */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_datadep_consumer(); /* alpha */ -#endif n = atomic_load_relaxed(&o->pcg_next); atomic_store_release(head, n); break; @@ -2592,9 +2590,7 @@ pool_pcg_trunc(pcg_t *volatile *head) n = atomic_cas_ptr(head, o, NULL); if (o == n) { splx(s); -#ifndef __HAVE_ATOMIC_AS_MEMBAR membar_datadep_consumer(); /* alpha */ -#endif return o; } } @@ -2621,9 +2617,7 @@ pool_pcg_put(pcg_t *volatile *head, pcg_t *pcg) continue; } pcg->pcg_next = o; -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); n = atomic_cas_ptr(head, o, pcg); if (o == n) { return count != SPINLOCK_BACKOFF_MIN; diff --git a/sys/kern/sys_futex.c b/sys/kern/sys_futex.c index 93708d2bd2a9..abbf6f263fc8 100644 --- a/sys/kern/sys_futex.c +++ b/sys/kern/sys_futex.c @@ -537,18 +537,14 @@ futex_rele(struct futex *f) refcnt = atomic_load_relaxed(&f->fx_refcnt); if (refcnt == 1) goto trylast; -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); } while (atomic_cas_ulong(&f->fx_refcnt, refcnt, refcnt - 1) != refcnt); return; trylast: mutex_enter(&futex_tab.lock); if (atomic_dec_ulong_nv(&f->fx_refcnt) == 0) { -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); if (f->fx_on_tree) { if (__predict_false(f->fx_shared)) rb_tree_remove_node(&futex_tab.oa, f); diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c index abd512408f07..0ac8d411fd8f 100644 --- a/sys/kern/uipc_mbuf.c +++ b/sys/kern/uipc_mbuf.c @@ -1967,9 +1967,7 @@ m_ext_free(struct mbuf *m) if (__predict_true(m->m_ext.ext_refcnt == 1)) { refcnt = m->m_ext.ext_refcnt = 0; } else { -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); refcnt = atomic_dec_uint_nv(&m->m_ext.ext_refcnt); } @@ -1986,9 +1984,7 @@ m_ext_free(struct mbuf *m) /* * dropping the last reference */ -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); if (!embedded) { m->m_ext.ext_refcnt++; /* XXX */ m_ext_free(m->m_ext_ref); diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c index 60d49f31dc77..70d7e378e50f 100644 --- a/sys/kern/vfs_mount.c +++ b/sys/kern/vfs_mount.c @@ -302,15 +302,11 @@ void vfs_rele(struct mount *mp) { -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); if (__predict_true((int)atomic_dec_uint_nv(&mp->mnt_refcnt) > 0)) { return; } -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); /* * Nothing else has visibility of the mount: we can now diff --git a/sys/kern/vfs_vnode.c b/sys/kern/vfs_vnode.c index 1e29bc5ade46..bfc25a596c3a 100644 --- a/sys/kern/vfs_vnode.c +++ b/sys/kern/vfs_vnode.c @@ -355,9 +355,7 @@ vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to, /* Open/close the gate for vcache_tryvget(). */ if (to == VS_LOADED) { -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE); } else { atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE); @@ -401,9 +399,7 @@ vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to) /* Open/close the gate for vcache_tryvget(). */ if (to == VS_LOADED) { -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE); } else { atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE); @@ -737,9 +733,7 @@ vtryrele(vnode_t *vp) { u_int use, next; -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) { if (__predict_false((use & VUSECOUNT_MASK) == 1)) { return false; @@ -837,9 +831,7 @@ retry: break; } } -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); if (vrefcnt(vp) <= 0 || vp->v_writecount != 0) { vnpanic(vp, "%s: bad ref count", __func__); } @@ -1004,9 +996,7 @@ out: break; } } -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) { /* @@ -1479,9 +1469,7 @@ vcache_tryvget(vnode_t *vp) next = atomic_cas_uint(&vp->v_usecount, use, (use + 1) | VUSECOUNT_VGET); if (__predict_true(next == use)) { -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); return 0; } } >From 5b4b371c06584a4a871a08a57d9cf7be6a3cc39f Mon Sep 17 00:00:00 2001 From: Taylor R Campbell <riastr...@netbsd.org> Date: Mon, 23 Jan 2023 13:18:32 +0000 Subject: [PATCH 4/6] sys/net/if.c: Eliminate __HAVE_ATOMIC_AS_MEMBAR conditionals. --- sys/net/if.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/sys/net/if.c b/sys/net/if.c index 332c8f02b42b..f0b450a9234c 100644 --- a/sys/net/if.c +++ b/sys/net/if.c @@ -1810,14 +1810,10 @@ ifafree(struct ifaddr *ifa) KASSERT(ifa != NULL); KASSERTMSG(ifa->ifa_refcnt > 0, "ifa_refcnt=%d", ifa->ifa_refcnt); -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); if (atomic_dec_uint_nv(&ifa->ifa_refcnt) != 0) return; -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); free(ifa, M_IFADDR); } >From 81b55d19ce1b9d3831522ac9e7bfb7fdac2eed4a Mon Sep 17 00:00:00 2001 From: Taylor R Campbell <riastr...@netbsd.org> Date: Mon, 23 Jan 2023 13:18:44 +0000 Subject: [PATCH 5/6] npf: Eliminate __HAVE_ATOMIC_AS_MEMBAR conditionals. --- sys/net/npf/npf_nat.c | 8 ++------ sys/net/npf/npf_rproc.c | 9 +++------ sys/net/npf/npf_tableset.c | 8 ++------ 3 files changed, 7 insertions(+), 18 deletions(-) diff --git a/sys/net/npf/npf_nat.c b/sys/net/npf/npf_nat.c index 0887caf6a244..5d78876bcdca 100644 --- a/sys/net/npf/npf_nat.c +++ b/sys/net/npf/npf_nat.c @@ -279,15 +279,11 @@ npf_natpolicy_release(npf_natpolicy_t *np) { KASSERT(atomic_load_relaxed(&np->n_refcnt) > 0); -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); if (atomic_dec_uint_nv(&np->n_refcnt) != 0) { return; } -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); KASSERT(LIST_EMPTY(&np->n_nat_list)); mutex_destroy(&np->n_lock); kmem_free(np, sizeof(npf_natpolicy_t)); diff --git a/sys/net/npf/npf_rproc.c b/sys/net/npf/npf_rproc.c index 24a368eca0e6..a14fa8d37d34 100644 --- a/sys/net/npf/npf_rproc.c +++ b/sys/net/npf/npf_rproc.c @@ -330,15 +330,12 @@ npf_rproc_release(npf_rproc_t *rp) { KASSERT(atomic_load_relaxed(&rp->rp_refcnt) > 0); -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); if (atomic_dec_uint_nv(&rp->rp_refcnt) != 0) { return; } -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); + /* XXXintr */ for (unsigned i = 0; i < rp->rp_ext_count; i++) { npf_ext_t *ext = rp->rp_ext[i]; diff --git a/sys/net/npf/npf_tableset.c b/sys/net/npf/npf_tableset.c index c09a0919009b..e9d0bcbf11b6 100644 --- a/sys/net/npf/npf_tableset.c +++ b/sys/net/npf/npf_tableset.c @@ -160,14 +160,10 @@ npf_tableset_destroy(npf_tableset_t *ts) if (t == NULL) continue; -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); if (atomic_dec_uint_nv(&t->t_refcnt) > 0) continue; -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); npf_table_destroy(t); } kmem_free(ts, NPF_TABLESET_SIZE(ts->ts_nitems)); >From 2ca1130c200ed108311dcb627c9f32b57a5e321f Mon Sep 17 00:00:00 2001 From: Taylor R Campbell <riastr...@netbsd.org> Date: Mon, 23 Jan 2023 13:18:56 +0000 Subject: [PATCH 6/6] uvm: Eliminate __HAVE_ATOMIC_AS_MEMBAR conditionals. --- sys/uvm/uvm_aobj.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/sys/uvm/uvm_aobj.c b/sys/uvm/uvm_aobj.c index 235931750a2f..5a171ebeeb5a 100644 --- a/sys/uvm/uvm_aobj.c +++ b/sys/uvm/uvm_aobj.c @@ -604,16 +604,12 @@ uao_detach(struct uvm_object *uobj) KASSERT(uobj->uo_refs > 0); UVMHIST_LOG(maphist," (uobj=%#jx) ref=%jd", (uintptr_t)uobj, uobj->uo_refs, 0, 0); -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_release(); -#endif + membar_release_preatomic(); if (atomic_dec_uint_nv(&uobj->uo_refs) > 0) { UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0); return; } -#ifndef __HAVE_ATOMIC_AS_MEMBAR - membar_acquire(); -#endif + membar_acquire_postatomic(); /* * Remove the aobj from the global list.