Replace the custom assembly code in include/urcu/uatomic/ with __atomic
builtins provided by C11-compatible compiler.

Signed-off-by: Ondřej Surý <ond...@sury.org>
---
 include/Makefile.am            |  16 -
 include/urcu/uatomic.h         |  84 +++--
 include/urcu/uatomic/aarch64.h |  41 ---
 include/urcu/uatomic/alpha.h   |  32 --
 include/urcu/uatomic/arm.h     |  57 ---
 include/urcu/uatomic/gcc.h     |  46 ---
 include/urcu/uatomic/generic.h | 613 -------------------------------
 include/urcu/uatomic/hppa.h    |  10 -
 include/urcu/uatomic/ia64.h    |  41 ---
 include/urcu/uatomic/m68k.h    |  44 ---
 include/urcu/uatomic/mips.h    |  32 --
 include/urcu/uatomic/nios2.h   |  32 --
 include/urcu/uatomic/ppc.h     | 237 ------------
 include/urcu/uatomic/riscv.h   |  44 ---
 include/urcu/uatomic/s390.h    | 170 ---------
 include/urcu/uatomic/sparc64.h |  81 -----
 include/urcu/uatomic/tile.h    |  41 ---
 include/urcu/uatomic/x86.h     | 646 ---------------------------------
 18 files changed, 53 insertions(+), 2214 deletions(-)
 delete mode 100644 include/urcu/uatomic/aarch64.h
 delete mode 100644 include/urcu/uatomic/alpha.h
 delete mode 100644 include/urcu/uatomic/arm.h
 delete mode 100644 include/urcu/uatomic/gcc.h
 delete mode 100644 include/urcu/uatomic/generic.h
 delete mode 100644 include/urcu/uatomic/hppa.h
 delete mode 100644 include/urcu/uatomic/ia64.h
 delete mode 100644 include/urcu/uatomic/m68k.h
 delete mode 100644 include/urcu/uatomic/mips.h
 delete mode 100644 include/urcu/uatomic/nios2.h
 delete mode 100644 include/urcu/uatomic/ppc.h
 delete mode 100644 include/urcu/uatomic/riscv.h
 delete mode 100644 include/urcu/uatomic/s390.h
 delete mode 100644 include/urcu/uatomic/sparc64.h
 delete mode 100644 include/urcu/uatomic/tile.h
 delete mode 100644 include/urcu/uatomic/x86.h

diff --git a/include/Makefile.am b/include/Makefile.am
index ba1fe60..53a28fd 100644
--- a/include/Makefile.am
+++ b/include/Makefile.am
@@ -59,24 +59,8 @@ nobase_include_HEADERS = \
        urcu/syscall-compat.h \
        urcu/system.h \
        urcu/tls-compat.h \
-       urcu/uatomic/aarch64.h \
-       urcu/uatomic/alpha.h \
        urcu/uatomic_arch.h \
-       urcu/uatomic/arm.h \
-       urcu/uatomic/gcc.h \
-       urcu/uatomic/generic.h \
        urcu/uatomic.h \
-       urcu/uatomic/hppa.h \
-       urcu/uatomic/ia64.h \
-       urcu/uatomic/m68k.h \
-       urcu/uatomic/mips.h \
-       urcu/uatomic/nios2.h \
-       urcu/uatomic/ppc.h \
-       urcu/uatomic/riscv.h \
-       urcu/uatomic/s390.h \
-       urcu/uatomic/sparc64.h \
-       urcu/uatomic/tile.h \
-       urcu/uatomic/x86.h \
        urcu/urcu-bp.h \
        urcu/urcu-futex.h \
        urcu/urcu.h \
diff --git a/include/urcu/uatomic.h b/include/urcu/uatomic.h
index 2fb5fd4..0327810 100644
--- a/include/urcu/uatomic.h
+++ b/include/urcu/uatomic.h
@@ -22,37 +22,59 @@
 #define _URCU_UATOMIC_H
 
 #include <urcu/arch.h>
+#include <urcu/system.h>
 
-#if defined(URCU_ARCH_X86)
-#include <urcu/uatomic/x86.h>
-#elif defined(URCU_ARCH_PPC)
-#include <urcu/uatomic/ppc.h>
-#elif defined(URCU_ARCH_S390)
-#include <urcu/uatomic/s390.h>
-#elif defined(URCU_ARCH_SPARC64)
-#include <urcu/uatomic/sparc64.h>
-#elif defined(URCU_ARCH_ALPHA)
-#include <urcu/uatomic/alpha.h>
-#elif defined(URCU_ARCH_IA64)
-#include <urcu/uatomic/ia64.h>
-#elif defined(URCU_ARCH_ARM)
-#include <urcu/uatomic/arm.h>
-#elif defined(URCU_ARCH_AARCH64)
-#include <urcu/uatomic/aarch64.h>
-#elif defined(URCU_ARCH_MIPS)
-#include <urcu/uatomic/mips.h>
-#elif defined(URCU_ARCH_NIOS2)
-#include <urcu/uatomic/nios2.h>
-#elif defined(URCU_ARCH_TILE)
-#include <urcu/uatomic/tile.h>
-#elif defined(URCU_ARCH_HPPA)
-#include <urcu/uatomic/hppa.h>
-#elif defined(URCU_ARCH_M68K)
-#include <urcu/uatomic/m68k.h>
-#elif defined(URCU_ARCH_RISCV)
-#include <urcu/uatomic/riscv.h>
-#else
-#error "Cannot build: unrecognized architecture, see <urcu/arch.h>."
-#endif
+#define UATOMIC_HAS_ATOMIC_BYTE
+#define UATOMIC_HAS_ATOMIC_SHORT
+
+#define uatomic_set(addr, v) __atomic_store_n(addr, v, __ATOMIC_RELEASE)
+
+#define uatomic_read(addr) __atomic_load_n((addr), __ATOMIC_CONSUME)
+
+#define uatomic_xchg(addr, v) __atomic_exchange_n((addr), (v), 
__ATOMIC_SEQ_CST)
+
+#define uatomic_cmpxchg(addr, old, new) \
+       ({                                                                      
\
+               __typeof__(*(addr)) __old = old;                                
\
+               __atomic_compare_exchange_n(addr, &__old, new, 0,               
\
+                                           __ATOMIC_SEQ_CST, 
__ATOMIC_SEQ_CST);                \
+               __old;                                                          
\
+       })
+
+#define uatomic_add_return(addr, v) \
+       __atomic_add_fetch((addr), (v), __ATOMIC_SEQ_CST)
+
+#define uatomic_add(addr, v) \
+       (void)__atomic_add_fetch((addr), (v), __ATOMIC_RELAXED)
+
+#define uatomic_sub_return(addr, v) \
+       __atomic_sub_fetch((addr), (v), __ATOMIC_SEQ_CST)
+
+#define uatomic_sub(addr, v) \
+       (void)__atomic_sub_fetch((addr), (v), __ATOMIC_RELAXED)
+
+#define uatomic_and(addr, mask) \
+       (void)__atomic_and_fetch((addr), (mask), __ATOMIC_RELAXED)
+
+#define uatomic_or(addr, mask)                                         \
+       (void)__atomic_or_fetch((addr), (mask), __ATOMIC_RELAXED)
+
+#define uatomic_inc(addr) (void)__atomic_add_fetch((addr), 1, __ATOMIC_RELAXED)
+#define uatomic_dec(addr) (void)__atomic_sub_fetch((addr), 1, __ATOMIC_RELAXED)
+
+#define cmm_smp_mb__before_uatomic_and()       
__atomic_thread_fence(__ATOMIC_SEQ_CST)
+#define cmm_smp_mb__after_uatomic_and()                
__atomic_thread_fence(__ATOMIC_SEQ_CST)
+#define cmm_smp_mb__before_uatomic_or()                
__atomic_thread_fence(__ATOMIC_SEQ_CST)
+#define cmm_smp_mb__after_uatomic_or()         
__atomic_thread_fence(__ATOMIC_SEQ_CST)
+#define cmm_smp_mb__before_uatomic_add()       
__atomic_thread_fence(__ATOMIC_SEQ_CST)
+#define cmm_smp_mb__after_uatomic_add()                
__atomic_thread_fence(__ATOMIC_SEQ_CST)
+#define cmm_smp_mb__before_uatomic_sub()       cmm_smp_mb__before_uatomic_add()
+#define cmm_smp_mb__after_uatomic_sub()                
cmm_smp_mb__after_uatomic_add()
+#define cmm_smp_mb__before_uatomic_inc()       cmm_smp_mb__before_uatomic_add()
+#define cmm_smp_mb__after_uatomic_inc()                
cmm_smp_mb__after_uatomic_add()
+#define cmm_smp_mb__before_uatomic_dec()       cmm_smp_mb__before_uatomic_add()
+#define cmm_smp_mb__after_uatomic_dec()                
cmm_smp_mb__after_uatomic_add()
+
+#define cmm_smp_mb()                           cmm_mb()
 
 #endif /* _URCU_UATOMIC_H */
diff --git a/include/urcu/uatomic/aarch64.h b/include/urcu/uatomic/aarch64.h
deleted file mode 100644
index 58698ce..0000000
--- a/include/urcu/uatomic/aarch64.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef _URCU_ARCH_UATOMIC_AARCH64_H
-#define _URCU_ARCH_UATOMIC_AARCH64_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009-2015 Mathieu Desnoyers
- * Copyright (c) 2010      Paul E. McKenney, IBM Corporation
- *                        (Adapted from uatomic_arch_ppc.h)
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose,  provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define UATOMIC_HAS_ATOMIC_BYTE
-#define UATOMIC_HAS_ATOMIC_SHORT
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic/generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_AARCH64_H */
diff --git a/include/urcu/uatomic/alpha.h b/include/urcu/uatomic/alpha.h
deleted file mode 100644
index 5dceb90..0000000
--- a/include/urcu/uatomic/alpha.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef _URCU_UATOMIC_ARCH_ALPHA_H
-#define _URCU_UATOMIC_ARCH_ALPHA_H
-
-/*
- * Atomic exchange operations for the Alpha architecture. Let GCC do it.
- *
- * Copyright (c) 2010 Paolo Bonzini <pbonz...@redhat.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-#include <urcu/uatomic/generic.h>
-
-#endif /* _URCU_UATOMIC_ARCH_ALPHA_H */
diff --git a/include/urcu/uatomic/arm.h b/include/urcu/uatomic/arm.h
deleted file mode 100644
index 95f32f3..0000000
--- a/include/urcu/uatomic/arm.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef _URCU_ARCH_UATOMIC_ARM_H
-#define _URCU_ARCH_UATOMIC_ARM_H
-
-/*
- * Atomics for ARM.  This approach is usable on kernels back to 2.6.15.
- *
- * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009      Mathieu Desnoyers
- * Copyright (c) 2010      Paul E. McKenney, IBM Corporation
- *                        (Adapted from uatomic_arch_ppc.h)
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose,  provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-#include <urcu/arch.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* xchg */
-
-/*
- * Based on [1], __sync_lock_test_and_set() is not a full barrier, but
- * instead only an acquire barrier. Given that uatomic_xchg() acts as
- * both release and acquire barriers, we therefore need to have our own
- * release barrier before this operation.
- *
- * [1] https://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html
- */
-#define uatomic_xchg(addr, v)                          \
-       ({                                              \
-               cmm_smp_mb();                           \
-               __sync_lock_test_and_set(addr, v);      \
-       })
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic/generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_ARM_H */
diff --git a/include/urcu/uatomic/gcc.h b/include/urcu/uatomic/gcc.h
deleted file mode 100644
index 438e039..0000000
--- a/include/urcu/uatomic/gcc.h
+++ /dev/null
@@ -1,46 +0,0 @@
-#ifndef _URCU_ARCH_UATOMIC_GCC_H
-#define _URCU_ARCH_UATOMIC_GCC_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009      Mathieu Desnoyers
- * Copyright (c) 2010      Paul E. McKenney, IBM Corporation
- *                        (Adapted from uatomic_arch_ppc.h)
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose,  provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * If your platform doesn't have a full set of atomics, you will need
- * a separate uatomic_arch_*.h file for your architecture.  Otherwise,
- * just rely on the definitions in uatomic/generic.h.
- */
-#define UATOMIC_HAS_ATOMIC_BYTE
-#define UATOMIC_HAS_ATOMIC_SHORT
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic/generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_GCC_H */
diff --git a/include/urcu/uatomic/generic.h b/include/urcu/uatomic/generic.h
deleted file mode 100644
index c3762b0..0000000
--- a/include/urcu/uatomic/generic.h
+++ /dev/null
@@ -1,613 +0,0 @@
-#ifndef _URCU_UATOMIC_GENERIC_H
-#define _URCU_UATOMIC_GENERIC_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009      Mathieu Desnoyers
- * Copyright (c) 2010      Paolo Bonzini
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose,  provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <stdint.h>
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifndef uatomic_set
-#define uatomic_set(addr, v)   ((void) CMM_STORE_SHARED(*(addr), (v)))
-#endif
-
-#ifndef uatomic_read
-#define uatomic_read(addr)     CMM_LOAD_SHARED(*(addr))
-#endif
-
-#if !defined __OPTIMIZE__  || defined UATOMIC_NO_LINK_ERROR
-static inline __attribute__((always_inline, __noreturn__))
-void _uatomic_link_error(void)
-{
-#ifdef ILLEGAL_INSTR
-       /*
-        * generate an illegal instruction. Cannot catch this with
-        * linker tricks when optimizations are disabled.
-        */
-       __asm__ __volatile__(ILLEGAL_INSTR);
-#else
-       __builtin_trap();
-#endif
-}
-
-#else /* #if !defined __OPTIMIZE__  || defined UATOMIC_NO_LINK_ERROR */
-extern void _uatomic_link_error(void);
-#endif /* #else #if !defined __OPTIMIZE__  || defined UATOMIC_NO_LINK_ERROR */
-
-/* cmpxchg */
-
-#ifndef uatomic_cmpxchg
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
-                             unsigned long _new, int len)
-{
-       switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
-       case 1:
-               return __sync_val_compare_and_swap_1((uint8_t *) addr, old,
-                               _new);
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
-       case 2:
-               return __sync_val_compare_and_swap_2((uint16_t *) addr, old,
-                               _new);
-#endif
-       case 4:
-               return __sync_val_compare_and_swap_4((uint32_t *) addr, old,
-                               _new);
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-               return __sync_val_compare_and_swap_8((uint64_t *) addr, old,
-                               _new);
-#endif
-       }
-       _uatomic_link_error();
-       return 0;
-}
-
-
-#define uatomic_cmpxchg(addr, old, _new)                                     \
-       ((__typeof__(*(addr))) _uatomic_cmpxchg((addr),                       \
-                                               caa_cast_long_keep_sign(old), \
-                                               caa_cast_long_keep_sign(_new),\
-                                               sizeof(*(addr))))
-
-
-/* uatomic_and */
-
-#ifndef uatomic_and
-static inline __attribute__((always_inline))
-void _uatomic_and(void *addr, unsigned long val,
-                 int len)
-{
-       switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
-       case 1:
-               __sync_and_and_fetch_1((uint8_t *) addr, val);
-               return;
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
-       case 2:
-               __sync_and_and_fetch_2((uint16_t *) addr, val);
-               return;
-#endif
-       case 4:
-               __sync_and_and_fetch_4((uint32_t *) addr, val);
-               return;
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-               __sync_and_and_fetch_8((uint64_t *) addr, val);
-               return;
-#endif
-       }
-       _uatomic_link_error();
-}
-
-#define uatomic_and(addr, v)                   \
-       (_uatomic_and((addr),                   \
-               caa_cast_long_keep_sign(v),     \
-               sizeof(*(addr))))
-#define cmm_smp_mb__before_uatomic_and()       cmm_barrier()
-#define cmm_smp_mb__after_uatomic_and()                cmm_barrier()
-
-#endif
-
-/* uatomic_or */
-
-#ifndef uatomic_or
-static inline __attribute__((always_inline))
-void _uatomic_or(void *addr, unsigned long val,
-                int len)
-{
-       switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
-       case 1:
-               __sync_or_and_fetch_1((uint8_t *) addr, val);
-               return;
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
-       case 2:
-               __sync_or_and_fetch_2((uint16_t *) addr, val);
-               return;
-#endif
-       case 4:
-               __sync_or_and_fetch_4((uint32_t *) addr, val);
-               return;
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-               __sync_or_and_fetch_8((uint64_t *) addr, val);
-               return;
-#endif
-       }
-       _uatomic_link_error();
-       return;
-}
-
-#define uatomic_or(addr, v)                    \
-       (_uatomic_or((addr),                    \
-               caa_cast_long_keep_sign(v),     \
-               sizeof(*(addr))))
-#define cmm_smp_mb__before_uatomic_or()                cmm_barrier()
-#define cmm_smp_mb__after_uatomic_or()         cmm_barrier()
-
-#endif
-
-
-/* uatomic_add_return */
-
-#ifndef uatomic_add_return
-static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val,
-                                int len)
-{
-       switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
-       case 1:
-               return __sync_add_and_fetch_1((uint8_t *) addr, val);
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
-       case 2:
-               return __sync_add_and_fetch_2((uint16_t *) addr, val);
-#endif
-       case 4:
-               return __sync_add_and_fetch_4((uint32_t *) addr, val);
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-               return __sync_add_and_fetch_8((uint64_t *) addr, val);
-#endif
-       }
-       _uatomic_link_error();
-       return 0;
-}
-
-
-#define uatomic_add_return(addr, v)                                        \
-       ((__typeof__(*(addr))) _uatomic_add_return((addr),                  \
-                                               caa_cast_long_keep_sign(v), \
-                                               sizeof(*(addr))))
-#endif /* #ifndef uatomic_add_return */
-
-#ifndef uatomic_xchg
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
-{
-       switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
-       case 1:
-       {
-               uint8_t old;
-
-               do {
-                       old = uatomic_read((uint8_t *) addr);
-               } while (!__sync_bool_compare_and_swap_1((uint8_t *) addr,
-                               old, val));
-
-               return old;
-       }
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
-       case 2:
-       {
-               uint16_t old;
-
-               do {
-                       old = uatomic_read((uint16_t *) addr);
-               } while (!__sync_bool_compare_and_swap_2((uint16_t *) addr,
-                               old, val));
-
-               return old;
-       }
-#endif
-       case 4:
-       {
-               uint32_t old;
-
-               do {
-                       old = uatomic_read((uint32_t *) addr);
-               } while (!__sync_bool_compare_and_swap_4((uint32_t *) addr,
-                               old, val));
-
-               return old;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               uint64_t old;
-
-               do {
-                       old = uatomic_read((uint64_t *) addr);
-               } while (!__sync_bool_compare_and_swap_8((uint64_t *) addr,
-                               old, val));
-
-               return old;
-       }
-#endif
-       }
-       _uatomic_link_error();
-       return 0;
-}
-
-#define uatomic_xchg(addr, v)                                              \
-       ((__typeof__(*(addr))) _uatomic_exchange((addr),                    \
-                                               caa_cast_long_keep_sign(v), \
-                                               sizeof(*(addr))))
-#endif /* #ifndef uatomic_xchg */
-
-#else /* #ifndef uatomic_cmpxchg */
-
-#ifndef uatomic_and
-/* uatomic_and */
-
-static inline __attribute__((always_inline))
-void _uatomic_and(void *addr, unsigned long val, int len)
-{
-       switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
-       case 1:
-       {
-               uint8_t old, oldt;
-
-               oldt = uatomic_read((uint8_t *) addr);
-               do {
-                       old = oldt;
-                       oldt = _uatomic_cmpxchg(addr, old, old & val, 1);
-               } while (oldt != old);
-
-               return;
-       }
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
-       case 2:
-       {
-               uint16_t old, oldt;
-
-               oldt = uatomic_read((uint16_t *) addr);
-               do {
-                       old = oldt;
-                       oldt = _uatomic_cmpxchg(addr, old, old & val, 2);
-               } while (oldt != old);
-       }
-#endif
-       case 4:
-       {
-               uint32_t old, oldt;
-
-               oldt = uatomic_read((uint32_t *) addr);
-               do {
-                       old = oldt;
-                       oldt = _uatomic_cmpxchg(addr, old, old & val, 4);
-               } while (oldt != old);
-
-               return;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               uint64_t old, oldt;
-
-               oldt = uatomic_read((uint64_t *) addr);
-               do {
-                       old = oldt;
-                       oldt = _uatomic_cmpxchg(addr, old, old & val, 8);
-               } while (oldt != old);
-
-               return;
-       }
-#endif
-       }
-       _uatomic_link_error();
-}
-
-#define uatomic_and(addr, v)                   \
-       (_uatomic_and((addr),                   \
-               caa_cast_long_keep_sign(v),     \
-               sizeof(*(addr))))
-#define cmm_smp_mb__before_uatomic_and()       cmm_barrier()
-#define cmm_smp_mb__after_uatomic_and()                cmm_barrier()
-
-#endif /* #ifndef uatomic_and */
-
-#ifndef uatomic_or
-/* uatomic_or */
-
-static inline __attribute__((always_inline))
-void _uatomic_or(void *addr, unsigned long val, int len)
-{
-       switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
-       case 1:
-       {
-               uint8_t old, oldt;
-
-               oldt = uatomic_read((uint8_t *) addr);
-               do {
-                       old = oldt;
-                       oldt = _uatomic_cmpxchg(addr, old, old | val, 1);
-               } while (oldt != old);
-
-               return;
-       }
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
-       case 2:
-       {
-               uint16_t old, oldt;
-
-               oldt = uatomic_read((uint16_t *) addr);
-               do {
-                       old = oldt;
-                       oldt = _uatomic_cmpxchg(addr, old, old | val, 2);
-               } while (oldt != old);
-
-               return;
-       }
-#endif
-       case 4:
-       {
-               uint32_t old, oldt;
-
-               oldt = uatomic_read((uint32_t *) addr);
-               do {
-                       old = oldt;
-                       oldt = _uatomic_cmpxchg(addr, old, old | val, 4);
-               } while (oldt != old);
-
-               return;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               uint64_t old, oldt;
-
-               oldt = uatomic_read((uint64_t *) addr);
-               do {
-                       old = oldt;
-                       oldt = _uatomic_cmpxchg(addr, old, old | val, 8);
-               } while (oldt != old);
-
-               return;
-       }
-#endif
-       }
-       _uatomic_link_error();
-}
-
-#define uatomic_or(addr, v)                    \
-       (_uatomic_or((addr),                    \
-               caa_cast_long_keep_sign(v),     \
-               sizeof(*(addr))))
-#define cmm_smp_mb__before_uatomic_or()                cmm_barrier()
-#define cmm_smp_mb__after_uatomic_or()         cmm_barrier()
-
-#endif /* #ifndef uatomic_or */
-
-#ifndef uatomic_add_return
-/* uatomic_add_return */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
-{
-       switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
-       case 1:
-       {
-               uint8_t old, oldt;
-
-               oldt = uatomic_read((uint8_t *) addr);
-               do {
-                       old = oldt;
-                       oldt = uatomic_cmpxchg((uint8_t *) addr,
-                                               old, old + val);
-               } while (oldt != old);
-
-               return old + val;
-       }
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
-       case 2:
-       {
-               uint16_t old, oldt;
-
-               oldt = uatomic_read((uint16_t *) addr);
-               do {
-                       old = oldt;
-                       oldt = uatomic_cmpxchg((uint16_t *) addr,
-                                               old, old + val);
-               } while (oldt != old);
-
-               return old + val;
-       }
-#endif
-       case 4:
-       {
-               uint32_t old, oldt;
-
-               oldt = uatomic_read((uint32_t *) addr);
-               do {
-                       old = oldt;
-                       oldt = uatomic_cmpxchg((uint32_t *) addr,
-                                               old, old + val);
-               } while (oldt != old);
-
-               return old + val;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               uint64_t old, oldt;
-
-               oldt = uatomic_read((uint64_t *) addr);
-               do {
-                       old = oldt;
-                       oldt = uatomic_cmpxchg((uint64_t *) addr,
-                                               old, old + val);
-               } while (oldt != old);
-
-               return old + val;
-       }
-#endif
-       }
-       _uatomic_link_error();
-       return 0;
-}
-
-#define uatomic_add_return(addr, v)                                        \
-       ((__typeof__(*(addr))) _uatomic_add_return((addr),                  \
-                                               caa_cast_long_keep_sign(v), \
-                                               sizeof(*(addr))))
-#endif /* #ifndef uatomic_add_return */
-
-#ifndef uatomic_xchg
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
-{
-       switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
-       case 1:
-       {
-               uint8_t old, oldt;
-
-               oldt = uatomic_read((uint8_t *) addr);
-               do {
-                       old = oldt;
-                       oldt = uatomic_cmpxchg((uint8_t *) addr,
-                                               old, val);
-               } while (oldt != old);
-
-               return old;
-       }
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
-       case 2:
-       {
-               uint16_t old, oldt;
-
-               oldt = uatomic_read((uint16_t *) addr);
-               do {
-                       old = oldt;
-                       oldt = uatomic_cmpxchg((uint16_t *) addr,
-                                               old, val);
-               } while (oldt != old);
-
-               return old;
-       }
-#endif
-       case 4:
-       {
-               uint32_t old, oldt;
-
-               oldt = uatomic_read((uint32_t *) addr);
-               do {
-                       old = oldt;
-                       oldt = uatomic_cmpxchg((uint32_t *) addr,
-                                               old, val);
-               } while (oldt != old);
-
-               return old;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               uint64_t old, oldt;
-
-               oldt = uatomic_read((uint64_t *) addr);
-               do {
-                       old = oldt;
-                       oldt = uatomic_cmpxchg((uint64_t *) addr,
-                                               old, val);
-               } while (oldt != old);
-
-               return old;
-       }
-#endif
-       }
-       _uatomic_link_error();
-       return 0;
-}
-
-#define uatomic_xchg(addr, v)                                              \
-       ((__typeof__(*(addr))) _uatomic_exchange((addr),                    \
-                                               caa_cast_long_keep_sign(v), \
-                                               sizeof(*(addr))))
-#endif /* #ifndef uatomic_xchg */
-
-#endif /* #else #ifndef uatomic_cmpxchg */
-
-/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
-
-#ifndef uatomic_add
-#define uatomic_add(addr, v)           (void)uatomic_add_return((addr), (v))
-#define cmm_smp_mb__before_uatomic_add()       cmm_barrier()
-#define cmm_smp_mb__after_uatomic_add()                cmm_barrier()
-#endif
-
-#define uatomic_sub_return(addr, v)    \
-       uatomic_add_return((addr), -(caa_cast_long_keep_sign(v)))
-#define uatomic_sub(addr, v)           \
-       uatomic_add((addr), -(caa_cast_long_keep_sign(v)))
-#define cmm_smp_mb__before_uatomic_sub()       cmm_smp_mb__before_uatomic_add()
-#define cmm_smp_mb__after_uatomic_sub()                
cmm_smp_mb__after_uatomic_add()
-
-#ifndef uatomic_inc
-#define uatomic_inc(addr)              uatomic_add((addr), 1)
-#define cmm_smp_mb__before_uatomic_inc()       cmm_smp_mb__before_uatomic_add()
-#define cmm_smp_mb__after_uatomic_inc()                
cmm_smp_mb__after_uatomic_add()
-#endif
-
-#ifndef uatomic_dec
-#define uatomic_dec(addr)              uatomic_add((addr), -1)
-#define cmm_smp_mb__before_uatomic_dec()       cmm_smp_mb__before_uatomic_add()
-#define cmm_smp_mb__after_uatomic_dec()                
cmm_smp_mb__after_uatomic_add()
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _URCU_UATOMIC_GENERIC_H */
diff --git a/include/urcu/uatomic/hppa.h b/include/urcu/uatomic/hppa.h
deleted file mode 100644
index 2102153..0000000
--- a/include/urcu/uatomic/hppa.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _URCU_ARCH_UATOMIC_HPPA_H
-#define _URCU_ARCH_UATOMIC_HPPA_H
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#define UATOMIC_HAS_ATOMIC_SHORT
-#include <urcu/uatomic/generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_HPPA_H */
diff --git a/include/urcu/uatomic/ia64.h b/include/urcu/uatomic/ia64.h
deleted file mode 100644
index b5db8cc..0000000
--- a/include/urcu/uatomic/ia64.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef _URCU_ARCH_UATOMIC_IA64_H
-#define _URCU_ARCH_UATOMIC_IA64_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009-2015 Mathieu Desnoyers
- * Copyright (c) 2010      Paul E. McKenney, IBM Corporation
- *                        (Adapted from uatomic_arch_ppc.h)
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose,  provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define UATOMIC_HAS_ATOMIC_BYTE
-#define UATOMIC_HAS_ATOMIC_SHORT
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic/generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_IA64_H */
diff --git a/include/urcu/uatomic/m68k.h b/include/urcu/uatomic/m68k.h
deleted file mode 100644
index 60b01c7..0000000
--- a/include/urcu/uatomic/m68k.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Atomic exchange operations for the m68k architecture. Let GCC do it.
- *
- * Copyright (c) 2017 Michael Jeanson <mjean...@efficios.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef _URCU_ARCH_UATOMIC_M68K_H
-#define _URCU_ARCH_UATOMIC_M68K_H
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define UATOMIC_HAS_ATOMIC_BYTE
-#define UATOMIC_HAS_ATOMIC_SHORT
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic/generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_M68K_H */
diff --git a/include/urcu/uatomic/mips.h b/include/urcu/uatomic/mips.h
deleted file mode 100644
index bd7ca7f..0000000
--- a/include/urcu/uatomic/mips.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef _URCU_UATOMIC_ARCH_MIPS_H
-#define _URCU_UATOMIC_ARCH_MIPS_H
-
-/*
- * Atomic exchange operations for the MIPS architecture. Let GCC do it.
- *
- * Copyright (c) 2010 Paolo Bonzini <pbonz...@redhat.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-#include <urcu/uatomic/generic.h>
-
-#endif /* _URCU_UATOMIC_ARCH_MIPS_H */
diff --git a/include/urcu/uatomic/nios2.h b/include/urcu/uatomic/nios2.h
deleted file mode 100644
index 5b3c303..0000000
--- a/include/urcu/uatomic/nios2.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef _URCU_UATOMIC_ARCH_NIOS2_H
-#define _URCU_UATOMIC_ARCH_NIOS2_H
-
-/*
- * Atomic exchange operations for the NIOS2 architecture. Let GCC do it.
- *
- * Copyright (c) 2016 Marek Vasut <ma...@denx.de>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-#include <urcu/uatomic/generic.h>
-
-#endif /* _URCU_UATOMIC_ARCH_NIOS2_H */
diff --git a/include/urcu/uatomic/ppc.h b/include/urcu/uatomic/ppc.h
deleted file mode 100644
index 0e672f5..0000000
--- a/include/urcu/uatomic/ppc.h
+++ /dev/null
@@ -1,237 +0,0 @@
-#ifndef _URCU_ARCH_UATOMIC_PPC_H
-#define _URCU_ARCH_UATOMIC_PPC_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009      Mathieu Desnoyers
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose,  provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define ILLEGAL_INSTR  ".long  0xd00d00"
-
-/*
- * Providing sequential consistency semantic with respect to other
- * instructions for cmpxchg and add_return family of atomic primitives.
- *
- * This is achieved with:
- *   lwsync (prior loads can be reordered after following load)
- *   lwarx
- *   stwcx.
- *   test if success (retry)
- *   sync
- *
- * Explanation of the sequential consistency provided by this scheme
- * from Paul E. McKenney:
- *
- * The reason we can get away with the lwsync before is that if a prior
- * store reorders with the lwarx, then you have to store to the atomic
- * variable from some other CPU to detect it.
- *
- * And if you do that, the lwarx will lose its reservation, so the stwcx
- * will fail.  The atomic operation will retry, so that the caller won't be
- * able to see the misordering.
- */
-
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
-{
-       switch (len) {
-       case 4:
-       {
-               unsigned int result;
-
-               __asm__ __volatile__(
-                       LWSYNC_OPCODE
-               "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
-                       "stwcx. %2,0,%1\n"      /* else store conditional */
-                       "bne- 1b\n"             /* retry if lost reservation */
-                       "sync\n"
-                               : "=&r"(result)
-                               : "r"(addr), "r"(val)
-                               : "memory", "cc");
-
-               return result;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               unsigned long result;
-
-               __asm__ __volatile__(
-                       LWSYNC_OPCODE
-               "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
-                       "stdcx. %2,0,%1\n"      /* else store conditional */
-                       "bne- 1b\n"             /* retry if lost reservation */
-                       "sync\n"
-                               : "=&r"(result)
-                               : "r"(addr), "r"(val)
-                               : "memory", "cc");
-
-               return result;
-       }
-#endif
-       }
-       /*
-        * generate an illegal instruction. Cannot catch this with
-        * linker tricks when optimizations are disabled.
-        */
-       __asm__ __volatile__(ILLEGAL_INSTR);
-       return 0;
-}
-
-#define uatomic_xchg(addr, v)                                              \
-       ((__typeof__(*(addr))) _uatomic_exchange((addr),                    \
-                                               caa_cast_long_keep_sign(v), \
-                                               sizeof(*(addr))))
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
-                             unsigned long _new, int len)
-{
-       switch (len) {
-       case 4:
-       {
-               unsigned int old_val;
-
-               __asm__ __volatile__(
-                       LWSYNC_OPCODE
-               "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
-                       "cmpw %0,%3\n"          /* if load is not equal to */
-                       "bne 2f\n"              /* old, fail */
-                       "stwcx. %2,0,%1\n"      /* else store conditional */
-                       "bne- 1b\n"             /* retry if lost reservation */
-                       "sync\n"
-               "2:\n"
-                               : "=&r"(old_val)
-                               : "r"(addr), "r"((unsigned int)_new),
-                                 "r"((unsigned int)old)
-                               : "memory", "cc");
-
-               return old_val;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               unsigned long old_val;
-
-               __asm__ __volatile__(
-                       LWSYNC_OPCODE
-               "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
-                       "cmpd %0,%3\n"          /* if load is not equal to */
-                       "bne 2f\n"              /* old, fail */
-                       "stdcx. %2,0,%1\n"      /* else store conditional */
-                       "bne- 1b\n"             /* retry if lost reservation */
-                       "sync\n"
-               "2:\n"
-                               : "=&r"(old_val)
-                               : "r"(addr), "r"((unsigned long)_new),
-                                 "r"((unsigned long)old)
-                               : "memory", "cc");
-
-               return old_val;
-       }
-#endif
-       }
-       /*
-        * generate an illegal instruction. Cannot catch this with
-        * linker tricks when optimizations are disabled.
-        */
-       __asm__ __volatile__(ILLEGAL_INSTR);
-       return 0;
-}
-
-
-#define uatomic_cmpxchg(addr, old, _new)                                     \
-       ((__typeof__(*(addr))) _uatomic_cmpxchg((addr),                       \
-                                               caa_cast_long_keep_sign(old), \
-                                               caa_cast_long_keep_sign(_new),\
-                                               sizeof(*(addr))))
-
-/* uatomic_add_return */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val,
-                                int len)
-{
-       switch (len) {
-       case 4:
-       {
-               unsigned int result;
-
-               __asm__ __volatile__(
-                       LWSYNC_OPCODE
-               "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
-                       "add %0,%2,%0\n"        /* add val to value loaded */
-                       "stwcx. %0,0,%1\n"      /* store conditional */
-                       "bne- 1b\n"             /* retry if lost reservation */
-                       "sync\n"
-                               : "=&r"(result)
-                               : "r"(addr), "r"(val)
-                               : "memory", "cc");
-
-               return result;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               unsigned long result;
-
-               __asm__ __volatile__(
-                       LWSYNC_OPCODE
-               "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
-                       "add %0,%2,%0\n"        /* add val to value loaded */
-                       "stdcx. %0,0,%1\n"      /* store conditional */
-                       "bne- 1b\n"             /* retry if lost reservation */
-                       "sync\n"
-                               : "=&r"(result)
-                               : "r"(addr), "r"(val)
-                               : "memory", "cc");
-
-               return result;
-       }
-#endif
-       }
-       /*
-        * generate an illegal instruction. Cannot catch this with
-        * linker tricks when optimizations are disabled.
-        */
-       __asm__ __volatile__(ILLEGAL_INSTR);
-       return 0;
-}
-
-
-#define uatomic_add_return(addr, v)                                        \
-       ((__typeof__(*(addr))) _uatomic_add_return((addr),                  \
-                                               caa_cast_long_keep_sign(v), \
-                                               sizeof(*(addr))))
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic/generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_PPC_H */
diff --git a/include/urcu/uatomic/riscv.h b/include/urcu/uatomic/riscv.h
deleted file mode 100644
index a6700e1..0000000
--- a/include/urcu/uatomic/riscv.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Atomic exchange operations for the RISC-V architecture. Let GCC do it.
- *
- * Copyright (c) 2018 Michael Jeanson <mjean...@efficios.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef _URCU_ARCH_UATOMIC_RISCV_H
-#define _URCU_ARCH_UATOMIC_RISCV_H
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define UATOMIC_HAS_ATOMIC_BYTE
-#define UATOMIC_HAS_ATOMIC_SHORT
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic/generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_RISCV_H */
diff --git a/include/urcu/uatomic/s390.h b/include/urcu/uatomic/s390.h
deleted file mode 100644
index 42f23e7..0000000
--- a/include/urcu/uatomic/s390.h
+++ /dev/null
@@ -1,170 +0,0 @@
-#ifndef _URCU_UATOMIC_ARCH_S390_H
-#define _URCU_UATOMIC_ARCH_S390_H
-
-/*
- * Atomic exchange operations for the S390 architecture. Based on information
- * taken from the Principles of Operation Appendix A "Conditional Swapping
- * Instructions (CS, CDS)".
- *
- * Copyright (c) 2009 Novell, Inc.
- * Author: Jan Blunck <jblu...@suse.de>
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoy...@efficios.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
-#define COMPILER_HAVE_SHORT_MEM_OPERAND
-#endif
-
-/*
- * MEMOP assembler operand rules:
- * - op refer to MEMOP_IN operand
- * - MEMOP_IN can expand to more than a single operand. Use it at the end of
- *   operand list only.
- */
-
-#ifdef COMPILER_HAVE_SHORT_MEM_OPERAND
-
-#define MEMOP_OUT(addr)        "=Q" (*(addr))
-#define MEMOP_IN(addr) "Q" (*(addr))
-#define MEMOP_REF(op)  #op             /* op refer to MEMOP_IN operand */
-
-#else /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
-
-#define MEMOP_OUT(addr)        "=m" (*(addr))
-#define MEMOP_IN(addr) "a" (addr), "m" (*(addr))
-#define MEMOP_REF(op)  "0(" #op ")"    /* op refer to MEMOP_IN operand */
-
-#endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
-
-/*
- * The __hp() macro casts the void pointer @x to a pointer to a structure
- * containing an array of char of the specified size. This allows passing the
- * @addr arguments of the following inline functions as "m" and "+m" operands
- * to the assembly. The @size parameter should be a constant to support
- * compilers such as clang which do not support VLA. Create typedefs because
- * C++ does not allow types be defined in casts.
- */
-
-typedef struct { char v[4]; } __hp_4;
-typedef struct { char v[8]; } __hp_8;
-
-#define __hp(size, x)  ((__hp_##size *)(x))
-
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int 
len)
-{
-       switch (len) {
-       case 4:
-       {
-               unsigned int old_val;
-
-               __asm__ __volatile__(
-                       "0:     cs %0,%2," MEMOP_REF(%3) "\n"
-                       "       brc 4,0b\n"
-                       : "=&r" (old_val), MEMOP_OUT (__hp(4, addr))
-                       : "r" (val), MEMOP_IN (__hp(4, addr))
-                       : "memory", "cc");
-               return old_val;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               unsigned long old_val;
-
-               __asm__ __volatile__(
-                       "0:     csg %0,%2," MEMOP_REF(%3) "\n"
-                       "       brc 4,0b\n"
-                       : "=&r" (old_val), MEMOP_OUT (__hp(8, addr))
-                       : "r" (val), MEMOP_IN (__hp(8, addr))
-                       : "memory", "cc");
-               return old_val;
-       }
-#endif
-       default:
-               __asm__ __volatile__(".long     0xd00d00");
-       }
-
-       return 0;
-}
-
-#define uatomic_xchg(addr, v)                                              \
-       (__typeof__(*(addr))) _uatomic_exchange((addr),                     \
-                                               caa_cast_long_keep_sign(v), \
-                                               sizeof(*(addr)))
-
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
-                              unsigned long _new, int len)
-{
-       switch (len) {
-       case 4:
-       {
-               unsigned int old_val = (unsigned int)old;
-
-               __asm__ __volatile__(
-                       "       cs %0,%2," MEMOP_REF(%3) "\n"
-                       : "+r" (old_val), MEMOP_OUT (__hp(4, addr))
-                       : "r" (_new), MEMOP_IN (__hp(4, addr))
-                       : "memory", "cc");
-               return old_val;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               __asm__ __volatile__(
-                       "       csg %0,%2," MEMOP_REF(%3) "\n"
-                       : "+r" (old), MEMOP_OUT (__hp(8, addr))
-                       : "r" (_new), MEMOP_IN (__hp(8, addr))
-                       : "memory", "cc");
-               return old;
-       }
-#endif
-       default:
-               __asm__ __volatile__(".long     0xd00d00");
-       }
-
-       return 0;
-}
-
-#define uatomic_cmpxchg(addr, old, _new)                                    \
-       (__typeof__(*(addr))) _uatomic_cmpxchg((addr),                       \
-                                              caa_cast_long_keep_sign(old), \
-                                              caa_cast_long_keep_sign(_new),\
-                                              sizeof(*(addr)))
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic/generic.h>
-
-#endif /* _URCU_UATOMIC_ARCH_S390_H */
diff --git a/include/urcu/uatomic/sparc64.h b/include/urcu/uatomic/sparc64.h
deleted file mode 100644
index a9f2795..0000000
--- a/include/urcu/uatomic/sparc64.h
+++ /dev/null
@@ -1,81 +0,0 @@
-#ifndef _URCU_ARCH_UATOMIC_SPARC64_H
-#define _URCU_ARCH_UATOMIC_SPARC64_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
- * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
- * Copyright (c) 2009      Mathieu Desnoyers
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose,  provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
-                             unsigned long _new, int len)
-{
-       switch (len) {
-       case 4:
-       {
-               __asm__ __volatile__ (
-                       "membar #StoreLoad | #LoadLoad\n\t"
-                        "cas [%1],%2,%0\n\t"
-                        "membar #StoreLoad | #StoreStore\n\t"
-                        : "+&r" (_new)
-                        : "r" (addr), "r" (old)
-                        : "memory");
-
-               return _new;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               __asm__ __volatile__ (
-                       "membar #StoreLoad | #LoadLoad\n\t"
-                        "casx [%1],%2,%0\n\t"
-                        "membar #StoreLoad | #StoreStore\n\t"
-                        : "+&r" (_new)
-                        : "r" (addr), "r" (old)
-                        : "memory");
-
-               return _new;
-       }
-#endif
-       }
-       __builtin_trap();
-       return 0;
-}
-
-
-#define uatomic_cmpxchg(addr, old, _new)                                      \
-       ((__typeof__(*(addr))) _uatomic_cmpxchg((addr),                        \
-                                               caa_cast_long_keep_sign(old),  \
-                                               caa_cast_long_keep_sign(_new), \
-                                               sizeof(*(addr))))
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic/generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_PPC_H */
diff --git a/include/urcu/uatomic/tile.h b/include/urcu/uatomic/tile.h
deleted file mode 100644
index 830f260..0000000
--- a/include/urcu/uatomic/tile.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef _URCU_ARCH_UATOMIC_TILE_H
-#define _URCU_ARCH_UATOMIC_TILE_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009-2015 Mathieu Desnoyers
- * Copyright (c) 2010      Paul E. McKenney, IBM Corporation
- *                        (Adapted from uatomic_arch_ppc.h)
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose,  provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define UATOMIC_HAS_ATOMIC_BYTE
-#define UATOMIC_HAS_ATOMIC_SHORT
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic/generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_TILE_H */
diff --git a/include/urcu/uatomic/x86.h b/include/urcu/uatomic/x86.h
deleted file mode 100644
index d416963..0000000
--- a/include/urcu/uatomic/x86.h
+++ /dev/null
@@ -1,646 +0,0 @@
-#ifndef _URCU_ARCH_UATOMIC_X86_H
-#define _URCU_ARCH_UATOMIC_X86_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009      Mathieu Desnoyers
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose,  provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/arch.h>
-#include <urcu/config.h>
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#define UATOMIC_HAS_ATOMIC_BYTE
-#define UATOMIC_HAS_ATOMIC_SHORT
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * Derived from AO_compare_and_swap() and AO_test_and_set_full().
- */
-
-/*
- * The __hp() macro casts the void pointer @x to a pointer to a structure
- * containing an array of char of the specified size. This allows passing the
- * @addr arguments of the following inline functions as "m" and "+m" operands
- * to the assembly. The @size parameter should be a constant to support
- * compilers such as clang which do not support VLA. Create typedefs because
- * C++ does not allow types be defined in casts.
- */
-
-typedef struct { char v[1]; } __hp_1;
-typedef struct { char v[2]; } __hp_2;
-typedef struct { char v[4]; } __hp_4;
-typedef struct { char v[8]; } __hp_8;
-
-#define __hp(size, x)  ((__hp_##size *)(x))
-
-#define _uatomic_set(addr, v)  ((void) CMM_STORE_SHARED(*(addr), (v)))
-
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
-                             unsigned long _new, int len)
-{
-       switch (len) {
-       case 1:
-       {
-               unsigned char result = old;
-
-               __asm__ __volatile__(
-               "lock; cmpxchgb %2, %1"
-                       : "+a"(result), "+m"(*__hp(1, addr))
-                       : "q"((unsigned char)_new)
-                       : "memory");
-               return result;
-       }
-       case 2:
-       {
-               unsigned short result = old;
-
-               __asm__ __volatile__(
-               "lock; cmpxchgw %2, %1"
-                       : "+a"(result), "+m"(*__hp(2, addr))
-                       : "r"((unsigned short)_new)
-                       : "memory");
-               return result;
-       }
-       case 4:
-       {
-               unsigned int result = old;
-
-               __asm__ __volatile__(
-               "lock; cmpxchgl %2, %1"
-                       : "+a"(result), "+m"(*__hp(4, addr))
-                       : "r"((unsigned int)_new)
-                       : "memory");
-               return result;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               unsigned long result = old;
-
-               __asm__ __volatile__(
-               "lock; cmpxchgq %2, %1"
-                       : "+a"(result), "+m"(*__hp(8, addr))
-                       : "r"((unsigned long)_new)
-                       : "memory");
-               return result;
-       }
-#endif
-       }
-       /*
-        * generate an illegal instruction. Cannot catch this with
-        * linker tricks when optimizations are disabled.
-        */
-       __asm__ __volatile__("ud2");
-       return 0;
-}
-
-#define _uatomic_cmpxchg(addr, old, _new)                                    \
-       ((__typeof__(*(addr))) __uatomic_cmpxchg((addr),                      \
-                                               caa_cast_long_keep_sign(old), \
-                                               caa_cast_long_keep_sign(_new),\
-                                               sizeof(*(addr))))
-
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
-{
-       /* Note: the "xchg" instruction does not need a "lock" prefix. */
-       switch (len) {
-       case 1:
-       {
-               unsigned char result;
-               __asm__ __volatile__(
-               "xchgb %0, %1"
-                       : "=q"(result), "+m"(*__hp(1, addr))
-                       : "0" ((unsigned char)val)
-                       : "memory");
-               return result;
-       }
-       case 2:
-       {
-               unsigned short result;
-               __asm__ __volatile__(
-               "xchgw %0, %1"
-                       : "=r"(result), "+m"(*__hp(2, addr))
-                       : "0" ((unsigned short)val)
-                       : "memory");
-               return result;
-       }
-       case 4:
-       {
-               unsigned int result;
-               __asm__ __volatile__(
-               "xchgl %0, %1"
-                       : "=r"(result), "+m"(*__hp(4, addr))
-                       : "0" ((unsigned int)val)
-                       : "memory");
-               return result;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               unsigned long result;
-               __asm__ __volatile__(
-               "xchgq %0, %1"
-                       : "=r"(result), "+m"(*__hp(8, addr))
-                       : "0" ((unsigned long)val)
-                       : "memory");
-               return result;
-       }
-#endif
-       }
-       /*
-        * generate an illegal instruction. Cannot catch this with
-        * linker tricks when optimizations are disabled.
-        */
-       __asm__ __volatile__("ud2");
-       return 0;
-}
-
-#define _uatomic_xchg(addr, v)                                               \
-       ((__typeof__(*(addr))) __uatomic_exchange((addr),                     \
-                                               caa_cast_long_keep_sign(v),   \
-                                               sizeof(*(addr))))
-
-/* uatomic_add_return */
-
-static inline __attribute__((always_inline))
-unsigned long __uatomic_add_return(void *addr, unsigned long val,
-                                int len)
-{
-       switch (len) {
-       case 1:
-       {
-               unsigned char result = val;
-
-               __asm__ __volatile__(
-               "lock; xaddb %1, %0"
-                       : "+m"(*__hp(1, addr)), "+q" (result)
-                       :
-                       : "memory");
-               return result + (unsigned char)val;
-       }
-       case 2:
-       {
-               unsigned short result = val;
-
-               __asm__ __volatile__(
-               "lock; xaddw %1, %0"
-                       : "+m"(*__hp(2, addr)), "+r" (result)
-                       :
-                       : "memory");
-               return result + (unsigned short)val;
-       }
-       case 4:
-       {
-               unsigned int result = val;
-
-               __asm__ __volatile__(
-               "lock; xaddl %1, %0"
-                       : "+m"(*__hp(4, addr)), "+r" (result)
-                       :
-                       : "memory");
-               return result + (unsigned int)val;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               unsigned long result = val;
-
-               __asm__ __volatile__(
-               "lock; xaddq %1, %0"
-                       : "+m"(*__hp(8, addr)), "+r" (result)
-                       :
-                       : "memory");
-               return result + (unsigned long)val;
-       }
-#endif
-       }
-       /*
-        * generate an illegal instruction. Cannot catch this with
-        * linker tricks when optimizations are disabled.
-        */
-       __asm__ __volatile__("ud2");
-       return 0;
-}
-
-#define _uatomic_add_return(addr, v)                                       \
-       ((__typeof__(*(addr))) __uatomic_add_return((addr),                 \
-                                               caa_cast_long_keep_sign(v), \
-                                               sizeof(*(addr))))
-
-/* uatomic_and */
-
-static inline __attribute__((always_inline))
-void __uatomic_and(void *addr, unsigned long val, int len)
-{
-       switch (len) {
-       case 1:
-       {
-               __asm__ __volatile__(
-               "lock; andb %1, %0"
-                       : "=m"(*__hp(1, addr))
-                       : "iq" ((unsigned char)val)
-                       : "memory");
-               return;
-       }
-       case 2:
-       {
-               __asm__ __volatile__(
-               "lock; andw %1, %0"
-                       : "=m"(*__hp(2, addr))
-                       : "ir" ((unsigned short)val)
-                       : "memory");
-               return;
-       }
-       case 4:
-       {
-               __asm__ __volatile__(
-               "lock; andl %1, %0"
-                       : "=m"(*__hp(4, addr))
-                       : "ir" ((unsigned int)val)
-                       : "memory");
-               return;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               __asm__ __volatile__(
-               "lock; andq %1, %0"
-                       : "=m"(*__hp(8, addr))
-                       : "er" ((unsigned long)val)
-                       : "memory");
-               return;
-       }
-#endif
-       }
-       /*
-        * generate an illegal instruction. Cannot catch this with
-        * linker tricks when optimizations are disabled.
-        */
-       __asm__ __volatile__("ud2");
-       return;
-}
-
-#define _uatomic_and(addr, v)                                             \
-       (__uatomic_and((addr), caa_cast_long_keep_sign(v), sizeof(*(addr))))
-
-/* uatomic_or */
-
-static inline __attribute__((always_inline))
-void __uatomic_or(void *addr, unsigned long val, int len)
-{
-       switch (len) {
-       case 1:
-       {
-               __asm__ __volatile__(
-               "lock; orb %1, %0"
-                       : "=m"(*__hp(1, addr))
-                       : "iq" ((unsigned char)val)
-                       : "memory");
-               return;
-       }
-       case 2:
-       {
-               __asm__ __volatile__(
-               "lock; orw %1, %0"
-                       : "=m"(*__hp(2, addr))
-                       : "ir" ((unsigned short)val)
-                       : "memory");
-               return;
-       }
-       case 4:
-       {
-               __asm__ __volatile__(
-               "lock; orl %1, %0"
-                       : "=m"(*__hp(4, addr))
-                       : "ir" ((unsigned int)val)
-                       : "memory");
-               return;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               __asm__ __volatile__(
-               "lock; orq %1, %0"
-                       : "=m"(*__hp(8, addr))
-                       : "er" ((unsigned long)val)
-                       : "memory");
-               return;
-       }
-#endif
-       }
-       /*
-        * generate an illegal instruction. Cannot catch this with
-        * linker tricks when optimizations are disabled.
-        */
-       __asm__ __volatile__("ud2");
-       return;
-}
-
-#define _uatomic_or(addr, v)                                              \
-       (__uatomic_or((addr), caa_cast_long_keep_sign(v), sizeof(*(addr))))
-
-/* uatomic_add */
-
-static inline __attribute__((always_inline))
-void __uatomic_add(void *addr, unsigned long val, int len)
-{
-       switch (len) {
-       case 1:
-       {
-               __asm__ __volatile__(
-               "lock; addb %1, %0"
-                       : "=m"(*__hp(1, addr))
-                       : "iq" ((unsigned char)val)
-                       : "memory");
-               return;
-       }
-       case 2:
-       {
-               __asm__ __volatile__(
-               "lock; addw %1, %0"
-                       : "=m"(*__hp(2, addr))
-                       : "ir" ((unsigned short)val)
-                       : "memory");
-               return;
-       }
-       case 4:
-       {
-               __asm__ __volatile__(
-               "lock; addl %1, %0"
-                       : "=m"(*__hp(4, addr))
-                       : "ir" ((unsigned int)val)
-                       : "memory");
-               return;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               __asm__ __volatile__(
-               "lock; addq %1, %0"
-                       : "=m"(*__hp(8, addr))
-                       : "er" ((unsigned long)val)
-                       : "memory");
-               return;
-       }
-#endif
-       }
-       /*
-        * generate an illegal instruction. Cannot catch this with
-        * linker tricks when optimizations are disabled.
-        */
-       __asm__ __volatile__("ud2");
-       return;
-}
-
-#define _uatomic_add(addr, v)                                             \
-       (__uatomic_add((addr), caa_cast_long_keep_sign(v), sizeof(*(addr))))
-
-
-/* uatomic_inc */
-
-static inline __attribute__((always_inline))
-void __uatomic_inc(void *addr, int len)
-{
-       switch (len) {
-       case 1:
-       {
-               __asm__ __volatile__(
-               "lock; incb %0"
-                       : "=m"(*__hp(1, addr))
-                       :
-                       : "memory");
-               return;
-       }
-       case 2:
-       {
-               __asm__ __volatile__(
-               "lock; incw %0"
-                       : "=m"(*__hp(2, addr))
-                       :
-                       : "memory");
-               return;
-       }
-       case 4:
-       {
-               __asm__ __volatile__(
-               "lock; incl %0"
-                       : "=m"(*__hp(4, addr))
-                       :
-                       : "memory");
-               return;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               __asm__ __volatile__(
-               "lock; incq %0"
-                       : "=m"(*__hp(8, addr))
-                       :
-                       : "memory");
-               return;
-       }
-#endif
-       }
-       /* generate an illegal instruction. Cannot catch this with linker tricks
-        * when optimizations are disabled. */
-       __asm__ __volatile__("ud2");
-       return;
-}
-
-#define _uatomic_inc(addr)     (__uatomic_inc((addr), sizeof(*(addr))))
-
-/* uatomic_dec */
-
-static inline __attribute__((always_inline))
-void __uatomic_dec(void *addr, int len)
-{
-       switch (len) {
-       case 1:
-       {
-               __asm__ __volatile__(
-               "lock; decb %0"
-                       : "=m"(*__hp(1, addr))
-                       :
-                       : "memory");
-               return;
-       }
-       case 2:
-       {
-               __asm__ __volatile__(
-               "lock; decw %0"
-                       : "=m"(*__hp(2, addr))
-                       :
-                       : "memory");
-               return;
-       }
-       case 4:
-       {
-               __asm__ __volatile__(
-               "lock; decl %0"
-                       : "=m"(*__hp(4, addr))
-                       :
-                       : "memory");
-               return;
-       }
-#if (CAA_BITS_PER_LONG == 64)
-       case 8:
-       {
-               __asm__ __volatile__(
-               "lock; decq %0"
-                       : "=m"(*__hp(8, addr))
-                       :
-                       : "memory");
-               return;
-       }
-#endif
-       }
-       /*
-        * generate an illegal instruction. Cannot catch this with
-        * linker tricks when optimizations are disabled.
-        */
-       __asm__ __volatile__("ud2");
-       return;
-}
-
-#define _uatomic_dec(addr)     (__uatomic_dec((addr), sizeof(*(addr))))
-
-#ifdef URCU_ARCH_X86_NO_CAS
-
-/* For backwards compat */
-#define CONFIG_RCU_COMPAT_ARCH 1
-
-extern int __rcu_cas_avail;
-extern int __rcu_cas_init(void);
-
-#define UATOMIC_COMPAT(insn)                                                   
\
-       ((caa_likely(__rcu_cas_avail > 0))                                      
        \
-       ? (_uatomic_##insn)                                                     
\
-               : ((caa_unlikely(__rcu_cas_avail < 0)                           
\
-                       ? ((__rcu_cas_init() > 0)                               
\
-                               ? (_uatomic_##insn)                             
\
-                               : (compat_uatomic_##insn))                      
\
-                       : (compat_uatomic_##insn))))
-
-/*
- * We leave the return value so we don't break the ABI, but remove the
- * return value from the API.
- */
-extern unsigned long _compat_uatomic_set(void *addr,
-                                        unsigned long _new, int len);
-#define compat_uatomic_set(addr, _new)                                        \
-       ((void) _compat_uatomic_set((addr),                                    \
-                               caa_cast_long_keep_sign(_new),                 \
-                               sizeof(*(addr))))
-
-
-extern unsigned long _compat_uatomic_xchg(void *addr,
-                                         unsigned long _new, int len);
-#define compat_uatomic_xchg(addr, _new)                                        
       \
-       ((__typeof__(*(addr))) _compat_uatomic_xchg((addr),                    \
-                                               caa_cast_long_keep_sign(_new), \
-                                               sizeof(*(addr))))
-
-extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
-                                            unsigned long _new, int len);
-#define compat_uatomic_cmpxchg(addr, old, _new)                                
       \
-       ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr),                 \
-                                               caa_cast_long_keep_sign(old),  \
-                                               caa_cast_long_keep_sign(_new), \
-                                               sizeof(*(addr))))
-
-extern void _compat_uatomic_and(void *addr, unsigned long _new, int len);
-#define compat_uatomic_and(addr, v)                                   \
-       (_compat_uatomic_and((addr),                                   \
-                       caa_cast_long_keep_sign(v),                    \
-                       sizeof(*(addr))))
-
-extern void _compat_uatomic_or(void *addr, unsigned long _new, int len);
-#define compat_uatomic_or(addr, v)                                    \
-       (_compat_uatomic_or((addr),                                    \
-                         caa_cast_long_keep_sign(v),                  \
-                         sizeof(*(addr))))
-
-extern unsigned long _compat_uatomic_add_return(void *addr,
-                                               unsigned long _new, int len);
-#define compat_uatomic_add_return(addr, v)                                 \
-       ((__typeof__(*(addr))) _compat_uatomic_add_return((addr),           \
-                                               caa_cast_long_keep_sign(v), \
-                                               sizeof(*(addr))))
-
-#define compat_uatomic_add(addr, v)                                           \
-               ((void)compat_uatomic_add_return((addr), (v)))
-#define compat_uatomic_inc(addr)                                              \
-               (compat_uatomic_add((addr), 1))
-#define compat_uatomic_dec(addr)                                              \
-               (compat_uatomic_add((addr), -1))
-
-#else
-#define UATOMIC_COMPAT(insn)   (_uatomic_##insn)
-#endif
-
-/* Read is atomic even in compat mode */
-#define uatomic_set(addr, v)                   \
-               UATOMIC_COMPAT(set(addr, v))
-
-#define uatomic_cmpxchg(addr, old, _new)       \
-               UATOMIC_COMPAT(cmpxchg(addr, old, _new))
-#define uatomic_xchg(addr, v)                  \
-               UATOMIC_COMPAT(xchg(addr, v))
-
-#define uatomic_and(addr, v)           \
-               UATOMIC_COMPAT(and(addr, v))
-#define cmm_smp_mb__before_uatomic_and()       cmm_barrier()
-#define cmm_smp_mb__after_uatomic_and()                cmm_barrier()
-
-#define uatomic_or(addr, v)            \
-               UATOMIC_COMPAT(or(addr, v))
-#define cmm_smp_mb__before_uatomic_or()                cmm_barrier()
-#define cmm_smp_mb__after_uatomic_or()         cmm_barrier()
-
-#define uatomic_add_return(addr, v)            \
-               UATOMIC_COMPAT(add_return(addr, v))
-
-#define uatomic_add(addr, v)   UATOMIC_COMPAT(add(addr, v))
-#define cmm_smp_mb__before_uatomic_add()       cmm_barrier()
-#define cmm_smp_mb__after_uatomic_add()                cmm_barrier()
-
-#define uatomic_inc(addr)      UATOMIC_COMPAT(inc(addr))
-#define cmm_smp_mb__before_uatomic_inc()       cmm_barrier()
-#define cmm_smp_mb__after_uatomic_inc()                cmm_barrier()
-
-#define uatomic_dec(addr)      UATOMIC_COMPAT(dec(addr))
-#define cmm_smp_mb__before_uatomic_dec()       cmm_barrier()
-#define cmm_smp_mb__after_uatomic_dec()                cmm_barrier()
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic/generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_X86_H */
-- 
2.39.2

_______________________________________________
lttng-dev mailing list
lttng-dev@lists.lttng.org
https://lists.lttng.org/cgi-bin/mailman/listinfo/lttng-dev

Reply via email to