Author: andrew
Date: Thu Jul 16 13:33:03 2015
New Revision: 285631
URL: https://svnweb.freebsd.org/changeset/base/285631

Log:
  Split out the arm and armv6 parts of atomic.h to new files. While here use
  __ARM_ARCH to determine which revision of the architecture is applicable.
  
  Sponsored by: ABT Systems Ltd

Added:
  head/sys/arm/include/atomic-v4.h
     - copied, changed from r285531, head/sys/arm/include/atomic.h
  head/sys/arm/include/atomic-v6.h
     - copied, changed from r285531, head/sys/arm/include/atomic.h
Modified:
  head/sys/arm/include/atomic.h

Copied and modified: head/sys/arm/include/atomic-v4.h (from r285531, 
head/sys/arm/include/atomic.h)
==============================================================================
--- head/sys/arm/include/atomic.h       Tue Jul 14 10:49:36 2015        
(r285531, copy source)
+++ head/sys/arm/include/atomic-v4.h    Thu Jul 16 13:33:03 2015        
(r285631)
@@ -36,659 +36,25 @@
  * $FreeBSD$
  */
 
-#ifndef        _MACHINE_ATOMIC_H_
-#define        _MACHINE_ATOMIC_H_
+#ifndef _MACHINE_ATOMIC_V4_H_
+#define        _MACHINE_ATOMIC_V4_H_
 
-#include <sys/types.h>
-#include <machine/armreg.h>
-
-#ifndef _KERNEL
-#include <machine/sysarch.h>
-#else
-#include <machine/cpuconf.h>
+#ifndef _MACHINE_ATOMIC_H_
+#error Do not include this file directly, use <machine/atomic.h>
 #endif
 
-#if defined (__ARM_ARCH_7__) || defined (__ARM_ARCH_7A__)
-#define isb()  __asm __volatile("isb" : : : "memory")
-#define dsb()  __asm __volatile("dsb" : : : "memory")
-#define dmb()  __asm __volatile("dmb" : : : "memory")
-#elif defined (__ARM_ARCH_6__) || defined (__ARM_ARCH_6J__) || \
-  defined (__ARM_ARCH_6K__) || defined (__ARM_ARCH_6T2__) || \
-  defined (__ARM_ARCH_6Z__) || defined (__ARM_ARCH_6ZK__)
-#define isb()  __asm __volatile("mcr p15, 0, %0, c7, c5, 4" : : "r" (0) : 
"memory")
-#define dsb()  __asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : 
"memory")
-#define dmb()  __asm __volatile("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : 
"memory")
-#else
+#if __ARM_ARCH <= 5
 #define isb()  __asm __volatile("mcr p15, 0, %0, c7, c5, 4" : : "r" (0) : 
"memory")
 #define dsb()  __asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : 
"memory")
 #define dmb()  dsb()
+#else
+#error Only use this file with ARMv5 and earlier
 #endif
 
 #define mb()   dmb()
 #define wmb()  dmb()
 #define rmb()  dmb()
 
-
-
-/*
- * It would be nice to use _HAVE_ARMv6_INSTRUCTIONS from machine/asm.h
- * here, but that header can't be included here because this is C
- * code.  I would like to move the _HAVE_ARMv6_INSTRUCTIONS definition
- * out of asm.h so it can be used in both asm and C code. - kientzle@
- */
-#if defined (__ARM_ARCH_7__) || \
-       defined (__ARM_ARCH_7A__)  || \
-       defined (__ARM_ARCH_6__)   || \
-       defined (__ARM_ARCH_6J__)  || \
-       defined (__ARM_ARCH_6K__)  || \
-       defined (__ARM_ARCH_6T2__) || \
-       defined (__ARM_ARCH_6Z__)  || \
-       defined (__ARM_ARCH_6ZK__)
-#define        ARM_HAVE_ATOMIC64
-
-static __inline void
-__do_dmb(void)
-{
-
-#if defined (__ARM_ARCH_7__) || defined (__ARM_ARCH_7A__)
-       __asm __volatile("dmb" : : : "memory");
-#else
-       __asm __volatile("mcr p15, 0, r0, c7, c10, 5" : : : "memory");
-#endif
-}
-
-#define ATOMIC_ACQ_REL_LONG(NAME)                                      \
-static __inline void                                                   \
-atomic_##NAME##_acq_long(__volatile u_long *p, u_long v)               \
-{                                                                      \
-       atomic_##NAME##_long(p, v);                                     \
-       __do_dmb();                                                     \
-}                                                                      \
-                                                                       \
-static __inline  void                                                  \
-atomic_##NAME##_rel_long(__volatile u_long *p, u_long v)               \
-{                                                                      \
-       __do_dmb();                                                     \
-       atomic_##NAME##_long(p, v);                                     \
-}
-
-#define        ATOMIC_ACQ_REL(NAME, WIDTH)                                     
\
-static __inline  void                                                  \
-atomic_##NAME##_acq_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
-{                                                                      \
-       atomic_##NAME##_##WIDTH(p, v);                                  \
-       __do_dmb();                                                     \
-}                                                                      \
-                                                                       \
-static __inline  void                                                  \
-atomic_##NAME##_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
-{                                                                      \
-       __do_dmb();                                                     \
-       atomic_##NAME##_##WIDTH(p, v);                                  \
-}
-
-static __inline void
-atomic_set_32(volatile uint32_t *address, uint32_t setmask)
-{
-       uint32_t tmp = 0, tmp2 = 0;
-
-       __asm __volatile("1: ldrex %0, [%2]\n"
-                           "orr %0, %0, %3\n"
-                           "strex %1, %0, [%2]\n"
-                           "cmp %1, #0\n"
-                           "it ne\n"
-                           "bne        1b\n"
-                          : "=&r" (tmp), "+r" (tmp2)
-                          , "+r" (address), "+r" (setmask) : : "cc", "memory");
-                            
-}
-
-static __inline void
-atomic_set_64(volatile uint64_t *p, uint64_t val)
-{
-       uint64_t tmp;
-       uint32_t exflag;
-
-       __asm __volatile(
-               "1:          \n"
-               "   ldrexd   %Q[tmp], %R[tmp], [%[ptr]]\n"
-               "   orr      %Q[tmp], %Q[val]\n"
-               "   orr      %R[tmp], %R[val]\n"
-               "   strexd   %[exf], %Q[tmp], %R[tmp], [%[ptr]]\n"
-               "   teq      %[exf], #0\n"
-               "   it ne    \n"
-               "   bne      1b\n"
-               :   [exf]    "=&r"  (exflag), 
-                   [tmp]    "=&r"  (tmp)
-               :   [ptr]    "r"    (p), 
-                   [val]    "r"    (val)
-               :   "cc", "memory");
-}
-
-static __inline void
-atomic_set_long(volatile u_long *address, u_long setmask)
-{
-       u_long tmp = 0, tmp2 = 0;
-
-       __asm __volatile("1: ldrex %0, [%2]\n"
-                           "orr %0, %0, %3\n"
-                           "strex %1, %0, [%2]\n"
-                           "cmp %1, #0\n"
-                           "it ne\n"
-                           "bne        1b\n"
-                          : "=&r" (tmp), "+r" (tmp2)
-                          , "+r" (address), "+r" (setmask) : : "cc", "memory");
-                            
-}
-
-static __inline void
-atomic_clear_32(volatile uint32_t *address, uint32_t setmask)
-{
-       uint32_t tmp = 0, tmp2 = 0;
-
-       __asm __volatile("1: ldrex %0, [%2]\n"
-                           "bic %0, %0, %3\n"
-                           "strex %1, %0, [%2]\n"
-                           "cmp %1, #0\n"
-                           "it ne\n"
-                           "bne        1b\n"
-                          : "=&r" (tmp), "+r" (tmp2)
-                          ,"+r" (address), "+r" (setmask) : : "cc", "memory");
-}
-
-static __inline void
-atomic_clear_64(volatile uint64_t *p, uint64_t val)
-{
-       uint64_t tmp;
-       uint32_t exflag;
-
-       __asm __volatile(
-               "1:          \n"
-               "   ldrexd   %Q[tmp], %R[tmp], [%[ptr]]\n"
-               "   bic      %Q[tmp], %Q[val]\n"
-               "   bic      %R[tmp], %R[val]\n"
-               "   strexd   %[exf], %Q[tmp], %R[tmp], [%[ptr]]\n"
-               "   teq      %[exf], #0\n"
-               "   it ne    \n"
-               "   bne      1b\n"
-               :   [exf]    "=&r"  (exflag), 
-                   [tmp]    "=&r"  (tmp)
-               :   [ptr]    "r"    (p), 
-                   [val]    "r"    (val)
-               :   "cc", "memory");
-}
-
-static __inline void
-atomic_clear_long(volatile u_long *address, u_long setmask)
-{
-       u_long tmp = 0, tmp2 = 0;
-
-       __asm __volatile("1: ldrex %0, [%2]\n"
-                           "bic %0, %0, %3\n"
-                           "strex %1, %0, [%2]\n"
-                           "cmp %1, #0\n"
-                           "it ne\n"
-                           "bne        1b\n"
-                          : "=&r" (tmp), "+r" (tmp2)
-                          ,"+r" (address), "+r" (setmask) : : "cc", "memory");
-}
-
-static __inline u_int32_t
-atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile 
u_int32_t newval)
-{
-       uint32_t ret;
-       
-       __asm __volatile("1: ldrex %0, [%1]\n"
-                        "cmp %0, %2\n"
-                        "itt ne\n"
-                        "movne %0, #0\n"
-                        "bne 2f\n"
-                        "strex %0, %3, [%1]\n"
-                        "cmp %0, #0\n"
-                        "ite eq\n"
-                        "moveq %0, #1\n"
-                        "bne   1b\n"
-                        "2:"
-                        : "=&r" (ret)
-                        ,"+r" (p), "+r" (cmpval), "+r" (newval) : : "cc",
-                        "memory");
-       return (ret);
-}
-
-static __inline int
-atomic_cmpset_64(volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
-{
-       uint64_t tmp;
-       uint32_t ret;
-
-       __asm __volatile(
-               "1:          \n"
-               "   ldrexd   %Q[tmp], %R[tmp], [%[ptr]]\n"
-               "   teq      %Q[tmp], %Q[cmpval]\n"
-               "   itee eq  \n"
-               "   teqeq    %R[tmp], %R[cmpval]\n"
-               "   movne    %[ret], #0\n"
-               "   bne      2f\n"
-               "   strexd   %[ret], %Q[newval], %R[newval], [%[ptr]]\n"
-               "   teq      %[ret], #0\n"
-               "   it ne    \n"
-               "   bne      1b\n"
-               "   mov      %[ret], #1\n"
-               "2:          \n"
-               :   [ret]    "=&r"  (ret), 
-                   [tmp]    "=&r"  (tmp)
-               :   [ptr]    "r"    (p), 
-                   [cmpval] "r"    (cmpval), 
-                   [newval] "r"    (newval)
-               :   "cc", "memory");
-       return (ret);
-}
-
-static __inline u_long
-atomic_cmpset_long(volatile u_long *p, volatile u_long cmpval, volatile u_long 
newval)
-{
-       u_long ret;
-       
-       __asm __volatile("1: ldrex %0, [%1]\n"
-                        "cmp %0, %2\n"
-                        "itt ne\n"
-                        "movne %0, #0\n"
-                        "bne 2f\n"
-                        "strex %0, %3, [%1]\n"
-                        "cmp %0, #0\n"
-                        "ite eq\n"
-                        "moveq %0, #1\n"
-                        "bne   1b\n"
-                        "2:"
-                        : "=&r" (ret)
-                        ,"+r" (p), "+r" (cmpval), "+r" (newval) : : "cc",
-                        "memory");
-       return (ret);
-}
-
-static __inline u_int32_t
-atomic_cmpset_acq_32(volatile u_int32_t *p, volatile u_int32_t cmpval, 
volatile u_int32_t newval)
-{
-       u_int32_t ret = atomic_cmpset_32(p, cmpval, newval);
-
-       __do_dmb();
-       return (ret);
-}
-
-static __inline uint64_t
-atomic_cmpset_acq_64(volatile uint64_t *p, volatile uint64_t cmpval, volatile 
uint64_t newval)
-{
-       uint64_t ret = atomic_cmpset_64(p, cmpval, newval);
-
-       __do_dmb();
-       return (ret);
-}
-
-static __inline u_long
-atomic_cmpset_acq_long(volatile u_long *p, volatile u_long cmpval, volatile 
u_long newval)
-{
-       u_long ret = atomic_cmpset_long(p, cmpval, newval);
-
-       __do_dmb();
-       return (ret);
-}
-
-static __inline u_int32_t
-atomic_cmpset_rel_32(volatile u_int32_t *p, volatile u_int32_t cmpval, 
volatile u_int32_t newval)
-{
-       
-       __do_dmb();
-       return (atomic_cmpset_32(p, cmpval, newval));
-}
-
-static __inline uint64_t
-atomic_cmpset_rel_64(volatile uint64_t *p, volatile uint64_t cmpval, volatile 
uint64_t newval)
-{
-       
-       __do_dmb();
-       return (atomic_cmpset_64(p, cmpval, newval));
-}
-
-static __inline u_long
-atomic_cmpset_rel_long(volatile u_long *p, volatile u_long cmpval, volatile 
u_long newval)
-{
-       
-       __do_dmb();
-       return (atomic_cmpset_long(p, cmpval, newval));
-}
-
-
-static __inline void
-atomic_add_32(volatile u_int32_t *p, u_int32_t val)
-{
-       uint32_t tmp = 0, tmp2 = 0;
-
-       __asm __volatile("1: ldrex %0, [%2]\n"
-                           "add %0, %0, %3\n"
-                           "strex %1, %0, [%2]\n"
-                           "cmp %1, #0\n"
-                           "it ne\n"
-                           "bne        1b\n"
-                           : "=&r" (tmp), "+r" (tmp2)
-                           ,"+r" (p), "+r" (val) : : "cc", "memory");
-}
-
-static __inline void
-atomic_add_64(volatile uint64_t *p, uint64_t val)
-{
-       uint64_t tmp;
-       uint32_t exflag;
-
-       __asm __volatile(
-               "1:          \n"
-               "   ldrexd   %Q[tmp], %R[tmp], [%[ptr]]\n"
-               "   adds     %Q[tmp], %Q[val]\n"
-               "   adc      %R[tmp], %R[tmp], %R[val]\n"
-               "   strexd   %[exf], %Q[tmp], %R[tmp], [%[ptr]]\n"
-               "   teq      %[exf], #0\n"
-               "   it ne    \n"
-               "   bne      1b\n"
-               :   [exf]    "=&r"  (exflag), 
-                   [tmp]    "=&r"  (tmp)
-               :   [ptr]    "r"    (p), 
-                   [val]    "r"    (val)
-               :   "cc", "memory");
-}
-
-static __inline void
-atomic_add_long(volatile u_long *p, u_long val)
-{
-       u_long tmp = 0, tmp2 = 0;
-
-       __asm __volatile("1: ldrex %0, [%2]\n"
-                           "add %0, %0, %3\n"
-                           "strex %1, %0, [%2]\n"
-                           "cmp %1, #0\n"
-                           "it ne\n"
-                           "bne        1b\n"
-                           : "=&r" (tmp), "+r" (tmp2)
-                           ,"+r" (p), "+r" (val) : : "cc", "memory");
-}
-
-static __inline void
-atomic_subtract_32(volatile u_int32_t *p, u_int32_t val)
-{
-       uint32_t tmp = 0, tmp2 = 0;
-
-       __asm __volatile("1: ldrex %0, [%2]\n"
-                           "sub %0, %0, %3\n"
-                           "strex %1, %0, [%2]\n"
-                           "cmp %1, #0\n"
-                           "it ne\n"
-                           "bne        1b\n"
-                           : "=&r" (tmp), "+r" (tmp2)
-                           ,"+r" (p), "+r" (val) : : "cc", "memory");
-}
-
-static __inline void
-atomic_subtract_64(volatile uint64_t *p, uint64_t val)
-{
-       uint64_t tmp;
-       uint32_t exflag;
-
-       __asm __volatile(
-               "1:          \n"
-               "   ldrexd   %Q[tmp], %R[tmp], [%[ptr]]\n"
-               "   subs     %Q[tmp], %Q[val]\n"
-               "   sbc      %R[tmp], %R[tmp], %R[val]\n"
-               "   strexd   %[exf], %Q[tmp], %R[tmp], [%[ptr]]\n"
-               "   teq      %[exf], #0\n"
-               "   it ne    \n"
-               "   bne      1b\n"
-               :   [exf]    "=&r"  (exflag), 
-                   [tmp]    "=&r"  (tmp)
-               :   [ptr]    "r"    (p), 
-                   [val]    "r"    (val)
-               :   "cc", "memory");
-}
-
-static __inline void
-atomic_subtract_long(volatile u_long *p, u_long val)
-{
-       u_long tmp = 0, tmp2 = 0;
-
-       __asm __volatile("1: ldrex %0, [%2]\n"
-                           "sub %0, %0, %3\n"
-                           "strex %1, %0, [%2]\n"
-                           "cmp %1, #0\n"
-                           "it ne\n"
-                           "bne        1b\n"
-                           : "=&r" (tmp), "+r" (tmp2)
-                           ,"+r" (p), "+r" (val) : : "cc", "memory");
-}
-
-ATOMIC_ACQ_REL(clear, 32)
-ATOMIC_ACQ_REL(add, 32)
-ATOMIC_ACQ_REL(subtract, 32)
-ATOMIC_ACQ_REL(set, 32)
-ATOMIC_ACQ_REL(clear, 64)
-ATOMIC_ACQ_REL(add, 64)
-ATOMIC_ACQ_REL(subtract, 64)
-ATOMIC_ACQ_REL(set, 64)
-ATOMIC_ACQ_REL_LONG(clear)
-ATOMIC_ACQ_REL_LONG(add)
-ATOMIC_ACQ_REL_LONG(subtract)
-ATOMIC_ACQ_REL_LONG(set)
-
-#undef ATOMIC_ACQ_REL
-#undef ATOMIC_ACQ_REL_LONG
-
-static __inline uint32_t
-atomic_fetchadd_32(volatile uint32_t *p, uint32_t val)
-{
-       uint32_t tmp = 0, tmp2 = 0, ret = 0;
-
-       __asm __volatile("1: ldrex %0, [%3]\n"
-                           "add %1, %0, %4\n"
-                           "strex %2, %1, [%3]\n"
-                           "cmp %2, #0\n"
-                           "it ne\n"
-                           "bne        1b\n"
-                          : "+r" (ret), "=&r" (tmp), "+r" (tmp2)
-                          ,"+r" (p), "+r" (val) : : "cc", "memory");
-       return (ret);
-}
-
-static __inline uint32_t
-atomic_readandclear_32(volatile u_int32_t *p)
-{
-       uint32_t ret, tmp = 0, tmp2 = 0;
-
-       __asm __volatile("1: ldrex %0, [%3]\n"
-                        "mov %1, #0\n"
-                        "strex %2, %1, [%3]\n"
-                        "cmp %2, #0\n"
-                        "it ne\n"
-                        "bne 1b\n"
-                        : "=r" (ret), "=&r" (tmp), "+r" (tmp2)
-                        ,"+r" (p) : : "cc", "memory");
-       return (ret);
-}
-
-static __inline uint32_t
-atomic_load_acq_32(volatile uint32_t *p)
-{
-       uint32_t v;
-
-       v = *p;
-       __do_dmb();
-       return (v);
-}
-
-static __inline void
-atomic_store_rel_32(volatile uint32_t *p, uint32_t v)
-{
-       
-       __do_dmb();
-       *p = v;
-}
-
-static __inline uint64_t
-atomic_fetchadd_64(volatile uint64_t *p, uint64_t val)
-{
-       uint64_t ret, tmp;
-       uint32_t exflag;
-
-       __asm __volatile(
-               "1:          \n"
-               "   ldrexd   %Q[tmp], %R[tmp], [%[ptr]]\n"
-               "   adds     %Q[tmp], %Q[ret], %Q[val]\n"
-               "   adc      %R[tmp], %R[ret], %R[val]\n"
-               "   strexd   %[exf], %Q[tmp], %R[tmp], [%[ptr]]\n"
-               "   teq      %[exf], #0\n"
-               "   it ne    \n"
-               "   bne      1b\n"
-               :   [ret]    "=&r"  (ret),
-                   [exf]    "=&r"  (exflag),
-                   [tmp]    "=&r"  (tmp)
-               :   [ptr]    "r"    (p), 
-                   [val]    "r"    (val)
-               :   "cc", "memory");
-       return (ret);
-}
-
-static __inline uint64_t
-atomic_readandclear_64(volatile uint64_t *p)
-{
-       uint64_t ret, tmp;
-       uint32_t exflag;
-
-       __asm __volatile(
-               "1:          \n"
-               "   ldrexd   %Q[ret], %R[ret], [%[ptr]]\n"
-               "   mov      %Q[tmp], #0\n"
-               "   mov      %R[tmp], #0\n"
-               "   strexd   %[exf], %Q[tmp], %R[tmp], [%[ptr]]\n"
-               "   teq      %[exf], #0\n"
-               "   it ne    \n"
-               "   bne      1b\n"
-               :   [ret]    "=&r"  (ret),
-                   [exf]    "=&r"  (exflag),
-                   [tmp]    "=&r"  (tmp)
-               :   [ptr]    "r"    (p)
-               :   "cc", "memory");
-       return (ret);
-}
-
-static __inline uint64_t
-atomic_load_64(volatile uint64_t *p)
-{
-       uint64_t ret;
-
-       /*
-        * The only way to atomically load 64 bits is with LDREXD which puts the
-        * exclusive monitor into the exclusive state, so reset it to open state
-        * with CLREX because we don't actually need to store anything.
-        */
-       __asm __volatile(
-               "1:          \n"
-               "   ldrexd   %Q[ret], %R[ret], [%[ptr]]\n"
-               "   clrex    \n"
-               :   [ret]    "=&r"  (ret)
-               :   [ptr]    "r"    (p)
-               :   "cc", "memory");
-       return (ret);
-}
-
-static __inline uint64_t
-atomic_load_acq_64(volatile uint64_t *p)
-{
-       uint64_t ret;
-
-       ret = atomic_load_64(p);
-       __do_dmb();
-       return (ret);
-}
-
-static __inline void
-atomic_store_64(volatile uint64_t *p, uint64_t val)
-{
-       uint64_t tmp;
-       uint32_t exflag;
-
-       /*
-        * The only way to atomically store 64 bits is with STREXD, which will
-        * succeed only if paired up with a preceeding LDREXD using the same
-        * address, so we read and discard the existing value before storing.
-        */
-       __asm __volatile(
-               "1:          \n"
-               "   ldrexd   %Q[tmp], %R[tmp], [%[ptr]]\n"
-               "   strexd   %[exf], %Q[tmp], %R[tmp], [%[ptr]]\n"
-               "   teq      %[exf], #0\n"
-               "   it ne    \n"
-               "   bne      1b\n"
-               :   [tmp]    "=&r"  (tmp),
-                   [exf]    "=&r"  (exflag)
-               :   [ptr]    "r"    (p),
-                   [val]    "r"    (val)
-               :   "cc", "memory");
-}
-
-static __inline void
-atomic_store_rel_64(volatile uint64_t *p, uint64_t val)
-{
-
-       __do_dmb();
-       atomic_store_64(p, val);
-}
-
-static __inline u_long
-atomic_fetchadd_long(volatile u_long *p, u_long val)
-{
-       u_long tmp = 0, tmp2 = 0, ret = 0;
-
-       __asm __volatile("1: ldrex %0, [%3]\n"
-                           "add %1, %0, %4\n"
-                           "strex %2, %1, [%3]\n"
-                           "cmp %2, #0\n"
-                           "it ne\n"
-                           "bne        1b\n"
-                          : "+r" (ret), "=&r" (tmp), "+r" (tmp2)
-                          ,"+r" (p), "+r" (val) : : "cc", "memory");
-       return (ret);
-}
-
-static __inline u_long
-atomic_readandclear_long(volatile u_long *p)
-{
-       u_long ret, tmp = 0, tmp2 = 0;
-
-       __asm __volatile("1: ldrex %0, [%3]\n"
-                        "mov %1, #0\n"
-                        "strex %2, %1, [%3]\n"
-                        "cmp %2, #0\n"
-                        "it ne\n"
-                        "bne 1b\n"
-                        : "=r" (ret), "=&r" (tmp), "+r" (tmp2)
-                        ,"+r" (p) : : "cc", "memory");
-       return (ret);
-}
-
-static __inline u_long
-atomic_load_acq_long(volatile u_long *p)
-{
-       u_long v;
-
-       v = *p;
-       __do_dmb();
-       return (v);
-}
-
-static __inline void
-atomic_store_rel_long(volatile u_long *p, u_long v)
-{
-       
-       __do_dmb();
-       *p = v;
-}
-#else /* < armv6 */
-
 #define __with_interrupts_disabled(expr) \
        do {                                            \
                u_int cpsr_save, tmp;                   \
@@ -1073,90 +439,4 @@ atomic_subtract_long(volatile u_long *p,
        atomic_subtract_32((volatile uint32_t *)p, v);
 }
 
-
-
-#endif /* Arch >= v6 */
-
-static __inline int
-atomic_load_32(volatile uint32_t *v)
-{
-
-       return (*v);
-}
-
-static __inline void
-atomic_store_32(volatile uint32_t *dst, uint32_t src)
-{
-       *dst = src;
-}
-
-static __inline int
-atomic_load_long(volatile u_long *v)
-{
-
-       return (*v);
-}
-
-static __inline void
-atomic_store_long(volatile u_long *dst, u_long src)
-{
-       *dst = src;
-}
-
-static __inline void
-atomic_thread_fence_acq(void)
-{
-
-       dmb();
-}
-
-static __inline void
-atomic_thread_fence_rel(void)
-{
-
-       dmb();
-}
-
-static __inline void
-atomic_thread_fence_acq_rel(void)
-{
-
-       dmb();
-}
-
-static __inline void
-atomic_thread_fence_seq_cst(void)
-{
-
-       dmb();
-}
-
-#define atomic_clear_ptr               atomic_clear_32
-#define atomic_set_ptr                 atomic_set_32
-#define atomic_cmpset_ptr              atomic_cmpset_32
-#define atomic_cmpset_rel_ptr          atomic_cmpset_rel_32
-#define atomic_cmpset_acq_ptr          atomic_cmpset_acq_32
-#define atomic_store_ptr               atomic_store_32
-#define atomic_store_rel_ptr           atomic_store_rel_32
-
-#define atomic_add_int                 atomic_add_32
-#define atomic_add_acq_int             atomic_add_acq_32
-#define atomic_add_rel_int             atomic_add_rel_32
-#define atomic_subtract_int            atomic_subtract_32
-#define atomic_subtract_acq_int                atomic_subtract_acq_32
-#define atomic_subtract_rel_int                atomic_subtract_rel_32
-#define atomic_clear_int               atomic_clear_32
-#define atomic_clear_acq_int           atomic_clear_acq_32
-#define atomic_clear_rel_int           atomic_clear_rel_32
-#define atomic_set_int                 atomic_set_32
-#define atomic_set_acq_int             atomic_set_acq_32
-#define atomic_set_rel_int             atomic_set_rel_32
-#define atomic_cmpset_int              atomic_cmpset_32
-#define atomic_cmpset_acq_int          atomic_cmpset_acq_32
-#define atomic_cmpset_rel_int          atomic_cmpset_rel_32
-#define atomic_fetchadd_int            atomic_fetchadd_32
-#define atomic_readandclear_int                atomic_readandclear_32
-#define atomic_load_acq_int            atomic_load_acq_32
-#define atomic_store_rel_int           atomic_store_rel_32
-
 #endif /* _MACHINE_ATOMIC_H_ */

Copied and modified: head/sys/arm/include/atomic-v6.h (from r285531, 
head/sys/arm/include/atomic.h)
==============================================================================
--- head/sys/arm/include/atomic.h       Tue Jul 14 10:49:36 2015        
(r285531, copy source)
+++ head/sys/arm/include/atomic-v6.h    Thu Jul 16 13:33:03 2015        
(r285631)
@@ -36,61 +36,36 @@
  * $FreeBSD$
  */
 
-#ifndef        _MACHINE_ATOMIC_H_
-#define        _MACHINE_ATOMIC_H_
+#ifndef _MACHINE_ATOMIC_V6_H_
+#define        _MACHINE_ATOMIC_V6_H_
 
-#include <sys/types.h>
-#include <machine/armreg.h>
-
-#ifndef _KERNEL
-#include <machine/sysarch.h>
-#else
-#include <machine/cpuconf.h>
+#ifndef _MACHINE_ATOMIC_H_
+#error Do not include this file directly, use <machine/atomic.h>
 #endif
 
-#if defined (__ARM_ARCH_7__) || defined (__ARM_ARCH_7A__)
+#if __ARM_ARCH >= 7
 #define isb()  __asm __volatile("isb" : : : "memory")
 #define dsb()  __asm __volatile("dsb" : : : "memory")
 #define dmb()  __asm __volatile("dmb" : : : "memory")
-#elif defined (__ARM_ARCH_6__) || defined (__ARM_ARCH_6J__) || \
-  defined (__ARM_ARCH_6K__) || defined (__ARM_ARCH_6T2__) || \
-  defined (__ARM_ARCH_6Z__) || defined (__ARM_ARCH_6ZK__)
+#elif __ARM_ARCH >= 6
 #define isb()  __asm __volatile("mcr p15, 0, %0, c7, c5, 4" : : "r" (0) : 
"memory")
 #define dsb()  __asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : 
"memory")
 #define dmb()  __asm __volatile("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : 
"memory")
 #else
-#define isb()  __asm __volatile("mcr p15, 0, %0, c7, c5, 4" : : "r" (0) : 
"memory")
-#define dsb()  __asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : 
"memory")
-#define dmb()  dsb()
+#error Only use this file with ARMv6 and later
 #endif
 
 #define mb()   dmb()
 #define wmb()  dmb()
 #define rmb()  dmb()
 
-
-
-/*
- * It would be nice to use _HAVE_ARMv6_INSTRUCTIONS from machine/asm.h
- * here, but that header can't be included here because this is C
- * code.  I would like to move the _HAVE_ARMv6_INSTRUCTIONS definition
- * out of asm.h so it can be used in both asm and C code. - kientzle@
- */
-#if defined (__ARM_ARCH_7__) || \
-       defined (__ARM_ARCH_7A__)  || \
-       defined (__ARM_ARCH_6__)   || \
-       defined (__ARM_ARCH_6J__)  || \
-       defined (__ARM_ARCH_6K__)  || \
-       defined (__ARM_ARCH_6T2__) || \
-       defined (__ARM_ARCH_6Z__)  || \
-       defined (__ARM_ARCH_6ZK__)
 #define        ARM_HAVE_ATOMIC64
 
 static __inline void
 __do_dmb(void)
 {
 
-#if defined (__ARM_ARCH_7__) || defined (__ARM_ARCH_7A__)
+#if __ARM_ARCH >= 7
        __asm __volatile("dmb" : : : "memory");
 #else
        __asm __volatile("mcr p15, 0, r0, c7, c10, 5" : : : "memory");
@@ -687,476 +662,5 @@ atomic_store_rel_long(volatile u_long *p
        __do_dmb();
        *p = v;
 }
-#else /* < armv6 */
-
-#define __with_interrupts_disabled(expr) \
-       do {                                            \
-               u_int cpsr_save, tmp;                   \
-                                                       \
-               __asm __volatile(                       \
-                       "mrs  %0, cpsr;"                \
-                       "orr  %1, %0, %2;"              \
-                       "msr  cpsr_fsxc, %1;"           \
-                       : "=r" (cpsr_save), "=r" (tmp)  \
-                       : "I" (PSR_I | PSR_F)           \
-                       : "cc" );               \
-               (expr);                         \
-                __asm __volatile(              \
-                       "msr  cpsr_fsxc, %0"    \
-                       : /* no output */       \
-                       : "r" (cpsr_save)       \
-                       : "cc" );               \
-       } while(0)
-
-static __inline uint32_t
-__swp(uint32_t val, volatile uint32_t *ptr)
-{
-       __asm __volatile("swp   %0, %2, [%3]"
-           : "=&r" (val), "=m" (*ptr)
-           : "r" (val), "r" (ptr), "m" (*ptr)
-           : "memory");
-       return (val);
-}
-
-
-#ifdef _KERNEL
-#define        ARM_HAVE_ATOMIC64
-
-static __inline void
-atomic_set_32(volatile uint32_t *address, uint32_t setmask)
-{
-       __with_interrupts_disabled(*address |= setmask);
-}
-
-static __inline void
-atomic_set_64(volatile uint64_t *address, uint64_t setmask)
-{
-       __with_interrupts_disabled(*address |= setmask);
-}
-
-static __inline void
-atomic_clear_32(volatile uint32_t *address, uint32_t clearmask)
-{
-       __with_interrupts_disabled(*address &= ~clearmask);
-}
-
-static __inline void
-atomic_clear_64(volatile uint64_t *address, uint64_t clearmask)
-{
-       __with_interrupts_disabled(*address &= ~clearmask);
-}
-
-static __inline u_int32_t
-atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile 
u_int32_t newval)
-{
-       int ret;
-       
-       __with_interrupts_disabled(
-        {
-               if (*p == cmpval) {
-                       *p = newval;
-                       ret = 1;
-               } else {
-                       ret = 0;
-               }
-       });
-       return (ret);
-}
-
-static __inline u_int64_t
-atomic_cmpset_64(volatile u_int64_t *p, volatile u_int64_t cmpval, volatile 
u_int64_t newval)
-{
-       int ret;
-       
-       __with_interrupts_disabled(
-        {
-               if (*p == cmpval) {
-                       *p = newval;
-                       ret = 1;
-               } else {
-                       ret = 0;
-               }
-       });
-       return (ret);
-}
-
-static __inline void
-atomic_add_32(volatile u_int32_t *p, u_int32_t val)
-{
-       __with_interrupts_disabled(*p += val);
-}
-
-static __inline void
-atomic_add_64(volatile u_int64_t *p, u_int64_t val)
-{
-       __with_interrupts_disabled(*p += val);
-}
-
-static __inline void
-atomic_subtract_32(volatile u_int32_t *p, u_int32_t val)
-{
-       __with_interrupts_disabled(*p -= val);
-}
-
-static __inline void
-atomic_subtract_64(volatile u_int64_t *p, u_int64_t val)
-{
-       __with_interrupts_disabled(*p -= val);
-}
-
-static __inline uint32_t
-atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
-{
-       uint32_t value;
-
-       __with_interrupts_disabled(
-       {
-               value = *p;
-               *p += v;
-       });
-       return (value);
-}
-
-static __inline uint64_t
-atomic_fetchadd_64(volatile uint64_t *p, uint64_t v)
-{
-       uint64_t value;
-
-       __with_interrupts_disabled(
-       {
-               value = *p;
-               *p += v;
-       });
-       return (value);
-}
-
-static __inline uint64_t
-atomic_load_64(volatile uint64_t *p)
-{
-       uint64_t value;
-
-       __with_interrupts_disabled(value = *p);
-       return (value);
-}
-
-static __inline void
-atomic_store_64(volatile uint64_t *p, uint64_t value)
-{
-       __with_interrupts_disabled(*p = value);
-}
-
-#else /* !_KERNEL */
-
-static __inline u_int32_t
-atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile 
u_int32_t newval)
-{
-       register int done, ras_start = ARM_RAS_START;

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
_______________________________________________
svn-src-head@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to