This patch splits the atomic operations from DPDK and push them to
architecture specific arch directories, so that other processor
architecture to support DPDK can be easily adopted.

Signed-off-by: Chao Zhu <bjzhuc at cn.ibm.com>
---
 lib/librte_eal/common/Makefile                     |    2 +-
 .../common/include/i686/arch/rte_atomic_arch.h     |  378 ++++++++++++++++++++
 lib/librte_eal/common/include/rte_atomic.h         |  172 +--------
 .../common/include/x86_64/arch/rte_atomic_arch.h   |  378 ++++++++++++++++++++
 4 files changed, 772 insertions(+), 158 deletions(-)
 create mode 100644 lib/librte_eal/common/include/i686/arch/rte_atomic_arch.h
 create mode 100644 lib/librte_eal/common/include/x86_64/arch/rte_atomic_arch.h

diff --git a/lib/librte_eal/common/Makefile b/lib/librte_eal/common/Makefile
index 7f27966..d730de5 100644
--- a/lib/librte_eal/common/Makefile
+++ b/lib/librte_eal/common/Makefile
@@ -46,7 +46,7 @@ ifeq ($(CONFIG_RTE_INSECURE_FUNCTION_WARNING),y)
 INC += rte_warnings.h
 endif

-ARCH_INC := rte_atomic.h
+ARCH_INC := rte_atomic.h rte_atomic_arch.h

 SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include := $(addprefix include/,$(INC))
 SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include/arch := \
diff --git a/lib/librte_eal/common/include/i686/arch/rte_atomic_arch.h 
b/lib/librte_eal/common/include/i686/arch/rte_atomic_arch.h
new file mode 100644
index 0000000..cb2d91d
--- /dev/null
+++ b/lib/librte_eal/common/include/i686/arch/rte_atomic_arch.h
@@ -0,0 +1,378 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ATOMIC_H_
+#error "don't include this file directly, please include generic 
<rte_atomic.h>"
+#endif
+
+#ifndef _RTE_ATOMIC_ARCH_H_
+#define _RTE_ATOMIC_ARCH_H_
+
+#include <stdint.h>
+
+#if RTE_MAX_LCORE == 1
+#define MPLOCKED                        /**< No need to insert MP lock prefix. 
*/
+#else
+#define MPLOCKED        "lock ; "       /**< Insert MP lock prefix. */
+#endif
+
+/**
+ * General memory barrier.
+ *
+ * Guarantees that the LOAD and STORE operations generated before the
+ * barrier occur before the LOAD and STORE operations generated after.
+ */
+#define        rte_arch_mb() _mm_mfence()
+
+/**
+ * Write memory barrier.
+ *
+ * Guarantees that the STORE operations generated before the barrier
+ * occur before the STORE operations generated after.
+ */
+#define        rte_arch_wmb() _mm_sfence()
+
+/**
+ * Read memory barrier.
+ *
+ * Guarantees that the LOAD operations generated before the barrier
+ * occur before the LOAD operations generated after.
+ */
+#define        rte_arch_rmb() _mm_lfence()
+
+/**
+ * Compiler barrier.
+ *
+ * Guarantees that operation reordering does not occur at compile time
+ * for operations directly before and after the barrier.
+ */
+#define        rte_arch_compiler_barrier() do {                \
+       asm volatile ("" : : : "memory");       \
+} while(0)
+
+#include <emmintrin.h>
+
+/*------------------------- 16 bit atomic operations 
-------------------------*/
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+       volatile int16_t cnt; /**< An internal counter value. */
+} rte_atomic16_t;
+
+/**
+ * Atomic compare and set.
+ *
+ * (atomic) equivalent to:
+ *   if (*dst == exp)
+ *     *dst = src (all 16-bit words)
+ *
+ * @param dst
+ *   The destination location into which the value will be written.
+ * @param exp
+ *   The expected value.
+ * @param src
+ *   The new value.
+ * @return
+ *   Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_arch_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
+{
+#ifndef RTE_FORCE_INTRINSICS
+       uint8_t res;
+
+       asm volatile(
+                       MPLOCKED
+                       "cmpxchgw %[src], %[dst];"
+                       "sete %[res];"
+                       : [res] "=a" (res),     /* output */
+                         [dst] "=m" (*dst)
+                       : [src] "r" (src),      /* input */
+                         "a" (exp),
+                         "m" (*dst)
+                       : "memory");            /* no-clobber list */
+       return res;
+#else
+       return __sync_bool_compare_and_swap(dst, exp, src);
+#endif
+}
+
+/**
+ * Atomically increment a counter by one.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_arch_atomic16_inc(rte_atomic16_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+       asm volatile(
+                       MPLOCKED
+                       "incw %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+#else
+       rte_atomic16_add(v, 1);
+#endif
+}
+
+/**
+ * Atomically decrement a counter by one.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_arch_atomic16_dec(rte_atomic16_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+       asm volatile(
+                       MPLOCKED
+                       "decw %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+#else
+       rte_atomic16_sub(v, 1);
+#endif
+}
+
+/**
+ * Atomically increment a 16-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the increment operation is 0; false otherwise.
+ */
+static inline int rte_arch_atomic16_inc_and_test(rte_atomic16_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+       uint8_t ret;
+
+       asm volatile(
+                       MPLOCKED
+                       "incw %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return (ret != 0);
+#else
+       return (__sync_add_and_fetch(&v->cnt, 1) == 0);
+#endif
+}
+
+/**
+ * Atomically decrement a 16-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the decrement operation is 0; false otherwise.
+ */
+static inline int rte_arch_atomic16_dec_and_test(rte_atomic16_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+       uint8_t ret;
+
+       asm volatile(MPLOCKED
+                       "decw %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return (ret != 0);
+#else
+       return (__sync_sub_and_fetch(&v->cnt, 1) == 0);
+#endif
+}
+
+/*------------------------- 32 bit atomic operations 
-------------------------*/
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+       volatile int32_t cnt; /**< An internal counter value. */
+} rte_atomic32_t;
+
+/**
+ * Atomic compare and set.
+ *
+ * (atomic) equivalent to:
+ *   if (*dst == exp)
+ *     *dst = src (all 32-bit words)
+ *
+ * @param dst
+ *   The destination location into which the value will be written.
+ * @param exp
+ *   The expected value.
+ * @param src
+ *   The new value.
+ * @return
+ *   Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_arch_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
+{
+#ifndef RTE_FORCE_INTRINSICS
+       uint8_t res;
+
+       asm volatile(
+                       MPLOCKED
+                       "cmpxchgl %[src], %[dst];"
+                       "sete %[res];"
+                       : [res] "=a" (res),     /* output */
+                         [dst] "=m" (*dst)
+                       : [src] "r" (src),      /* input */
+                         "a" (exp),
+                         "m" (*dst)
+                       : "memory");            /* no-clobber list */
+       return res;
+#else
+       return __sync_bool_compare_and_swap(dst, exp, src);
+#endif
+}
+
+/**
+ * Atomically increment a counter by one.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_arch_atomic32_inc(rte_atomic32_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+       asm volatile(
+                       MPLOCKED
+                       "incl %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+#else
+       rte_atomic32_add(v, 1);
+#endif
+}
+
+/**
+ * Atomically decrement a counter by one.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_arch_atomic32_dec(rte_atomic32_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+       asm volatile(
+                       MPLOCKED
+                       "decl %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+#else
+       rte_atomic32_sub(v,1);
+#endif
+}
+
+/**
+ * Atomically increment a 32-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the increment operation is 0; false otherwise.
+ */
+static inline int rte_arch_atomic32_inc_and_test(rte_atomic32_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+       uint8_t ret;
+
+       asm volatile(
+                       MPLOCKED
+                       "incl %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return (ret != 0);
+#else
+       return (__sync_add_and_fetch(&v->cnt, 1) == 0);
+#endif
+}
+
+/**
+ * Atomically decrement a 32-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the decrement operation is 0; false otherwise.
+ */
+static inline int rte_arch_atomic32_dec_and_test(rte_atomic32_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+       uint8_t ret;
+
+       asm volatile(MPLOCKED
+                       "decl %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return (ret != 0);
+#else
+       return (__sync_sub_and_fetch(&v->cnt, 1) == 0);
+#endif
+}
+
+#endif /* _RTE_ATOMIC_ARCH_H_ */
+
diff --git a/lib/librte_eal/common/include/rte_atomic.h 
b/lib/librte_eal/common/include/rte_atomic.h
index a5b6eec..24ba5d0 100644
--- a/lib/librte_eal/common/include/rte_atomic.h
+++ b/lib/librte_eal/common/include/rte_atomic.h
@@ -49,13 +49,7 @@
 extern "C" {
 #endif

-#include <stdint.h>
-
-#if RTE_MAX_LCORE == 1
-#define MPLOCKED                        /**< No need to insert MP lock prefix. 
*/
-#else
-#define MPLOCKED        "lock ; "       /**< Insert MP lock prefix. */
-#endif
+#include "arch/rte_atomic_arch.h"

 /**
  * General memory barrier.
@@ -63,7 +57,7 @@ extern "C" {
  * Guarantees that the LOAD and STORE operations generated before the
  * barrier occur before the LOAD and STORE operations generated after.
  */
-#define        rte_mb() _mm_mfence()
+#define        rte_mb() rte_arch_mb()

 /**
  * Write memory barrier.
@@ -71,7 +65,7 @@ extern "C" {
  * Guarantees that the STORE operations generated before the barrier
  * occur before the STORE operations generated after.
  */
-#define        rte_wmb() _mm_sfence()
+#define        rte_wmb() rte_arch_wmb()

 /**
  * Read memory barrier.
@@ -79,7 +73,7 @@ extern "C" {
  * Guarantees that the LOAD operations generated before the barrier
  * occur before the LOAD operations generated after.
  */
-#define        rte_rmb() _mm_lfence()
+#define        rte_rmb() rte_arch_rmb()

 /**
  * Compiler barrier.
@@ -87,11 +81,7 @@ extern "C" {
  * Guarantees that operation reordering does not occur at compile time
  * for operations directly before and after the barrier.
  */
-#define        rte_compiler_barrier() do {             \
-       asm volatile ("" : : : "memory");       \
-} while(0)
-
-#include <emmintrin.h>
+#define        rte_compiler_barrier() rte_arch_compiler_barrier()

 /**
  * @file
@@ -119,33 +109,10 @@ extern "C" {
 static inline int
 rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
 {
-#ifndef RTE_FORCE_INTRINSICS
-       uint8_t res;
-
-       asm volatile(
-                       MPLOCKED
-                       "cmpxchgw %[src], %[dst];"
-                       "sete %[res];"
-                       : [res] "=a" (res),     /* output */
-                         [dst] "=m" (*dst)
-                       : [src] "r" (src),      /* input */
-                         "a" (exp),
-                         "m" (*dst)
-                       : "memory");            /* no-clobber list */
-       return res;
-#else
-       return __sync_bool_compare_and_swap(dst, exp, src);
-#endif
+       return rte_arch_atomic16_cmpset(dst, exp, src);
 }

 /**
- * The atomic counter structure.
- */
-typedef struct {
-       volatile int16_t cnt; /**< An internal counter value. */
-} rte_atomic16_t;
-
-/**
  * Static initializer for an atomic counter.
  */
 #define RTE_ATOMIC16_INIT(val) { (val) }
@@ -227,16 +194,7 @@ rte_atomic16_sub(rte_atomic16_t *v, int16_t dec)
 static inline void
 rte_atomic16_inc(rte_atomic16_t *v)
 {
-#ifndef RTE_FORCE_INTRINSICS
-       asm volatile(
-                       MPLOCKED
-                       "incw %[cnt]"
-                       : [cnt] "=m" (v->cnt)   /* output */
-                       : "m" (v->cnt)          /* input */
-                       );
-#else
-       rte_atomic16_add(v, 1);
-#endif
+       rte_arch_atomic16_inc(v);
 }

 /**
@@ -248,16 +206,7 @@ rte_atomic16_inc(rte_atomic16_t *v)
 static inline void
 rte_atomic16_dec(rte_atomic16_t *v)
 {
-#ifndef RTE_FORCE_INTRINSICS
-       asm volatile(
-                       MPLOCKED
-                       "decw %[cnt]"
-                       : [cnt] "=m" (v->cnt)   /* output */
-                       : "m" (v->cnt)          /* input */
-                       );
-#else
-       rte_atomic16_sub(v, 1);
-#endif
+       rte_arch_atomic16_dec(v);
 }

 /**
@@ -312,20 +261,7 @@ rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec)
  */
 static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
 {
-#ifndef RTE_FORCE_INTRINSICS
-       uint8_t ret;
-
-       asm volatile(
-                       MPLOCKED
-                       "incw %[cnt] ; "
-                       "sete %[ret]"
-                       : [cnt] "+m" (v->cnt),  /* output */
-                         [ret] "=qm" (ret)
-                       );
-       return (ret != 0);
-#else
-       return (__sync_add_and_fetch(&v->cnt, 1) == 0);
-#endif
+       return rte_arch_atomic16_inc_and_test(v);
 }

 /**
@@ -341,19 +277,7 @@ static inline int rte_atomic16_inc_and_test(rte_atomic16_t 
*v)
  */
 static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
 {
-#ifndef RTE_FORCE_INTRINSICS
-       uint8_t ret;
-
-       asm volatile(MPLOCKED
-                       "decw %[cnt] ; "
-                       "sete %[ret]"
-                       : [cnt] "+m" (v->cnt),  /* output */
-                         [ret] "=qm" (ret)
-                       );
-       return (ret != 0);
-#else
-       return (__sync_sub_and_fetch(&v->cnt, 1) == 0);
-#endif
+       return rte_arch_atomic16_dec_and_test(v);
 }

 /**
@@ -404,33 +328,10 @@ static inline void rte_atomic16_clear(rte_atomic16_t *v)
 static inline int
 rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
 {
-#ifndef RTE_FORCE_INTRINSICS
-       uint8_t res;
-
-       asm volatile(
-                       MPLOCKED
-                       "cmpxchgl %[src], %[dst];"
-                       "sete %[res];"
-                       : [res] "=a" (res),     /* output */
-                         [dst] "=m" (*dst)
-                       : [src] "r" (src),      /* input */
-                         "a" (exp),
-                         "m" (*dst)
-                       : "memory");            /* no-clobber list */
-       return res;
-#else
-       return __sync_bool_compare_and_swap(dst, exp, src);
-#endif
+       return rte_arch_atomic32_cmpset(dst, exp, src);
 }

 /**
- * The atomic counter structure.
- */
-typedef struct {
-       volatile int32_t cnt; /**< An internal counter value. */
-} rte_atomic32_t;
-
-/**
  * Static initializer for an atomic counter.
  */
 #define RTE_ATOMIC32_INIT(val) { (val) }
@@ -512,16 +413,7 @@ rte_atomic32_sub(rte_atomic32_t *v, int32_t dec)
 static inline void
 rte_atomic32_inc(rte_atomic32_t *v)
 {
-#ifndef RTE_FORCE_INTRINSICS
-       asm volatile(
-                       MPLOCKED
-                       "incl %[cnt]"
-                       : [cnt] "=m" (v->cnt)   /* output */
-                       : "m" (v->cnt)          /* input */
-                       );
-#else
-       rte_atomic32_add(v, 1);
-#endif
+       rte_arch_atomic32_inc(v);
 }

 /**
@@ -533,16 +425,7 @@ rte_atomic32_inc(rte_atomic32_t *v)
 static inline void
 rte_atomic32_dec(rte_atomic32_t *v)
 {
-#ifndef RTE_FORCE_INTRINSICS
-       asm volatile(
-                       MPLOCKED
-                       "decl %[cnt]"
-                       : [cnt] "=m" (v->cnt)   /* output */
-                       : "m" (v->cnt)          /* input */
-                       );
-#else
-       rte_atomic32_sub(v,1);
-#endif
+       rte_arch_atomic32_dec(v);
 }

 /**
@@ -597,20 +480,7 @@ rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec)
  */
 static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
 {
-#ifndef RTE_FORCE_INTRINSICS
-       uint8_t ret;
-
-       asm volatile(
-                       MPLOCKED
-                       "incl %[cnt] ; "
-                       "sete %[ret]"
-                       : [cnt] "+m" (v->cnt),  /* output */
-                         [ret] "=qm" (ret)
-                       );
-       return (ret != 0);
-#else
-       return (__sync_add_and_fetch(&v->cnt, 1) == 0);
-#endif
+       return rte_arch_atomic32_inc_and_test(v);
 }

 /**
@@ -626,19 +496,7 @@ static inline int rte_atomic32_inc_and_test(rte_atomic32_t 
*v)
  */
 static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
 {
-#ifndef RTE_FORCE_INTRINSICS
-       uint8_t ret;
-
-       asm volatile(MPLOCKED
-                       "decl %[cnt] ; "
-                       "sete %[ret]"
-                       : [cnt] "+m" (v->cnt),  /* output */
-                         [ret] "=qm" (ret)
-                       );
-       return (ret != 0);
-#else
-       return (__sync_sub_and_fetch(&v->cnt, 1) == 0);
-#endif
+       return rte_arch_atomic32_dec_and_test(v);
 }

 /**
diff --git a/lib/librte_eal/common/include/x86_64/arch/rte_atomic_arch.h 
b/lib/librte_eal/common/include/x86_64/arch/rte_atomic_arch.h
new file mode 100644
index 0000000..cb2d91d
--- /dev/null
+++ b/lib/librte_eal/common/include/x86_64/arch/rte_atomic_arch.h
@@ -0,0 +1,378 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ATOMIC_H_
+#error "don't include this file directly, please include generic 
<rte_atomic.h>"
+#endif
+
+#ifndef _RTE_ATOMIC_ARCH_H_
+#define _RTE_ATOMIC_ARCH_H_
+
+#include <stdint.h>
+
+#if RTE_MAX_LCORE == 1
+#define MPLOCKED                        /**< No need to insert MP lock prefix. 
*/
+#else
+#define MPLOCKED        "lock ; "       /**< Insert MP lock prefix. */
+#endif
+
+/**
+ * General memory barrier.
+ *
+ * Guarantees that the LOAD and STORE operations generated before the
+ * barrier occur before the LOAD and STORE operations generated after.
+ */
+#define        rte_arch_mb() _mm_mfence()
+
+/**
+ * Write memory barrier.
+ *
+ * Guarantees that the STORE operations generated before the barrier
+ * occur before the STORE operations generated after.
+ */
+#define        rte_arch_wmb() _mm_sfence()
+
+/**
+ * Read memory barrier.
+ *
+ * Guarantees that the LOAD operations generated before the barrier
+ * occur before the LOAD operations generated after.
+ */
+#define        rte_arch_rmb() _mm_lfence()
+
+/**
+ * Compiler barrier.
+ *
+ * Guarantees that operation reordering does not occur at compile time
+ * for operations directly before and after the barrier.
+ */
+#define        rte_arch_compiler_barrier() do {                \
+       asm volatile ("" : : : "memory");       \
+} while(0)
+
+#include <emmintrin.h>
+
+/*------------------------- 16 bit atomic operations 
-------------------------*/
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+       volatile int16_t cnt; /**< An internal counter value. */
+} rte_atomic16_t;
+
+/**
+ * Atomic compare and set.
+ *
+ * (atomic) equivalent to:
+ *   if (*dst == exp)
+ *     *dst = src (all 16-bit words)
+ *
+ * @param dst
+ *   The destination location into which the value will be written.
+ * @param exp
+ *   The expected value.
+ * @param src
+ *   The new value.
+ * @return
+ *   Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_arch_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
+{
+#ifndef RTE_FORCE_INTRINSICS
+       uint8_t res;
+
+       asm volatile(
+                       MPLOCKED
+                       "cmpxchgw %[src], %[dst];"
+                       "sete %[res];"
+                       : [res] "=a" (res),     /* output */
+                         [dst] "=m" (*dst)
+                       : [src] "r" (src),      /* input */
+                         "a" (exp),
+                         "m" (*dst)
+                       : "memory");            /* no-clobber list */
+       return res;
+#else
+       return __sync_bool_compare_and_swap(dst, exp, src);
+#endif
+}
+
+/**
+ * Atomically increment a counter by one.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_arch_atomic16_inc(rte_atomic16_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+       asm volatile(
+                       MPLOCKED
+                       "incw %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+#else
+       rte_atomic16_add(v, 1);
+#endif
+}
+
+/**
+ * Atomically decrement a counter by one.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_arch_atomic16_dec(rte_atomic16_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+       asm volatile(
+                       MPLOCKED
+                       "decw %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+#else
+       rte_atomic16_sub(v, 1);
+#endif
+}
+
+/**
+ * Atomically increment a 16-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the increment operation is 0; false otherwise.
+ */
+static inline int rte_arch_atomic16_inc_and_test(rte_atomic16_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+       uint8_t ret;
+
+       asm volatile(
+                       MPLOCKED
+                       "incw %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return (ret != 0);
+#else
+       return (__sync_add_and_fetch(&v->cnt, 1) == 0);
+#endif
+}
+
+/**
+ * Atomically decrement a 16-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the decrement operation is 0; false otherwise.
+ */
+static inline int rte_arch_atomic16_dec_and_test(rte_atomic16_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+       uint8_t ret;
+
+       asm volatile(MPLOCKED
+                       "decw %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return (ret != 0);
+#else
+       return (__sync_sub_and_fetch(&v->cnt, 1) == 0);
+#endif
+}
+
+/*------------------------- 32 bit atomic operations 
-------------------------*/
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+       volatile int32_t cnt; /**< An internal counter value. */
+} rte_atomic32_t;
+
+/**
+ * Atomic compare and set.
+ *
+ * (atomic) equivalent to:
+ *   if (*dst == exp)
+ *     *dst = src (all 32-bit words)
+ *
+ * @param dst
+ *   The destination location into which the value will be written.
+ * @param exp
+ *   The expected value.
+ * @param src
+ *   The new value.
+ * @return
+ *   Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_arch_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
+{
+#ifndef RTE_FORCE_INTRINSICS
+       uint8_t res;
+
+       asm volatile(
+                       MPLOCKED
+                       "cmpxchgl %[src], %[dst];"
+                       "sete %[res];"
+                       : [res] "=a" (res),     /* output */
+                         [dst] "=m" (*dst)
+                       : [src] "r" (src),      /* input */
+                         "a" (exp),
+                         "m" (*dst)
+                       : "memory");            /* no-clobber list */
+       return res;
+#else
+       return __sync_bool_compare_and_swap(dst, exp, src);
+#endif
+}
+
+/**
+ * Atomically increment a counter by one.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_arch_atomic32_inc(rte_atomic32_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+       asm volatile(
+                       MPLOCKED
+                       "incl %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+#else
+       rte_atomic32_add(v, 1);
+#endif
+}
+
+/**
+ * Atomically decrement a counter by one.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_arch_atomic32_dec(rte_atomic32_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+       asm volatile(
+                       MPLOCKED
+                       "decl %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+#else
+       rte_atomic32_sub(v,1);
+#endif
+}
+
+/**
+ * Atomically increment a 32-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the increment operation is 0; false otherwise.
+ */
+static inline int rte_arch_atomic32_inc_and_test(rte_atomic32_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+       uint8_t ret;
+
+       asm volatile(
+                       MPLOCKED
+                       "incl %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return (ret != 0);
+#else
+       return (__sync_add_and_fetch(&v->cnt, 1) == 0);
+#endif
+}
+
+/**
+ * Atomically decrement a 32-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the decrement operation is 0; false otherwise.
+ */
+static inline int rte_arch_atomic32_dec_and_test(rte_atomic32_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+       uint8_t ret;
+
+       asm volatile(MPLOCKED
+                       "decl %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return (ret != 0);
+#else
+       return (__sync_sub_and_fetch(&v->cnt, 1) == 0);
+#endif
+}
+
+#endif /* _RTE_ATOMIC_ARCH_H_ */
+
-- 
1.7.1

Reply via email to