Instead of using CMM_ACCESS_ONCE() with memory barriers, use __atomic builtins with relaxed memory ordering to implement CMM_LOAD_SHARED() and CMM_STORE_SHARED().
Signed-off-by: Ondřej Surý <ond...@sury.org> --- include/urcu/system.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/include/urcu/system.h b/include/urcu/system.h index faae390..99e7443 100644 --- a/include/urcu/system.h +++ b/include/urcu/system.h @@ -26,7 +26,7 @@ * Identify a shared load. A cmm_smp_rmc() or cmm_smp_mc() should come * before the load. */ -#define _CMM_LOAD_SHARED(p) CMM_ACCESS_ONCE(p) +#define _CMM_LOAD_SHARED(p) __atomic_load_n(&(p), __ATOMIC_RELAXED) /* * Load a data from shared memory, doing a cache flush if required. @@ -42,7 +42,7 @@ * Identify a shared store. A cmm_smp_wmc() or cmm_smp_mc() should * follow the store. */ -#define _CMM_STORE_SHARED(x, v) __extension__ ({ CMM_ACCESS_ONCE(x) = (v); }) +#define _CMM_STORE_SHARED(x, v) __atomic_store_n(&(x), (v), __ATOMIC_RELAXED) /* * Store v into x, where x is located in shared memory. Performs the @@ -51,9 +51,8 @@ #define CMM_STORE_SHARED(x, v) \ __extension__ \ ({ \ - __typeof__(x) _v = _CMM_STORE_SHARED(x, v); \ + _CMM_STORE_SHARED(x, v); \ cmm_smp_wmc(); \ - _v = _v; /* Work around clang "unused result" */ \ }) #endif /* _URCU_SYSTEM_H */ -- 2.39.2 _______________________________________________ lttng-dev mailing list lttng-dev@lists.lttng.org https://lists.lttng.org/cgi-bin/mailman/listinfo/lttng-dev