Replace the use of gcc builtin __atomic_xxx intrinsics with corresponding
rte_atomic_xxx optional stdatomic API

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
 lib/eal/common/eal_common_launch.c    |  10 +--
 lib/eal/common/eal_common_mcfg.c      |   2 +-
 lib/eal/common/eal_common_proc.c      |  14 ++--
 lib/eal/common/eal_common_thread.c    |  26 ++++----
 lib/eal/common/eal_common_trace.c     |   8 +--
 lib/eal/common/eal_common_trace_ctf.c |   4 +-
 lib/eal/common/eal_memcfg.h           |   2 +-
 lib/eal/common/eal_private.h          |   4 +-
 lib/eal/common/eal_trace.h            |   4 +-
 lib/eal/common/rte_service.c          | 122 +++++++++++++++++-----------------
 lib/eal/freebsd/eal.c                 |  20 +++---
 lib/eal/include/rte_epoll.h           |   3 +-
 lib/eal/linux/eal.c                   |  26 ++++----
 lib/eal/linux/eal_interrupts.c        |  42 ++++++------
 lib/eal/ppc/include/rte_atomic.h      |   6 +-
 lib/eal/windows/rte_thread.c          |   8 ++-
 16 files changed, 152 insertions(+), 149 deletions(-)

diff --git a/lib/eal/common/eal_common_launch.c 
b/lib/eal/common/eal_common_launch.c
index 0504598..5320c3b 100644
--- a/lib/eal/common/eal_common_launch.c
+++ b/lib/eal/common/eal_common_launch.c
@@ -18,8 +18,8 @@
 int
 rte_eal_wait_lcore(unsigned worker_id)
 {
-       while (__atomic_load_n(&lcore_config[worker_id].state,
-                       __ATOMIC_ACQUIRE) != WAIT)
+       while (rte_atomic_load_explicit(&lcore_config[worker_id].state,
+                       rte_memory_order_acquire) != WAIT)
                rte_pause();
 
        return lcore_config[worker_id].ret;
@@ -38,8 +38,8 @@
        /* Check if the worker is in 'WAIT' state. Use acquire order
         * since 'state' variable is used as the guard variable.
         */
-       if (__atomic_load_n(&lcore_config[worker_id].state,
-                       __ATOMIC_ACQUIRE) != WAIT)
+       if (rte_atomic_load_explicit(&lcore_config[worker_id].state,
+                       rte_memory_order_acquire) != WAIT)
                goto finish;
 
        lcore_config[worker_id].arg = arg;
@@ -47,7 +47,7 @@
         * before the worker thread starts running the function.
         * Use worker thread function as the guard variable.
         */
-       __atomic_store_n(&lcore_config[worker_id].f, f, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&lcore_config[worker_id].f, f, 
rte_memory_order_release);
 
        rc = eal_thread_wake_worker(worker_id);
 
diff --git a/lib/eal/common/eal_common_mcfg.c b/lib/eal/common/eal_common_mcfg.c
index 2a785e7..dabb80e 100644
--- a/lib/eal/common/eal_common_mcfg.c
+++ b/lib/eal/common/eal_common_mcfg.c
@@ -30,7 +30,7 @@
        struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
 
        /* wait until shared mem_config finish initialising */
-       rte_wait_until_equal_32(&mcfg->magic, RTE_MAGIC, __ATOMIC_RELAXED);
+       rte_wait_until_equal_32(&mcfg->magic, RTE_MAGIC, 
rte_memory_order_relaxed);
 }
 
 int
diff --git a/lib/eal/common/eal_common_proc.c b/lib/eal/common/eal_common_proc.c
index f20a348..728815c 100644
--- a/lib/eal/common/eal_common_proc.c
+++ b/lib/eal/common/eal_common_proc.c
@@ -33,7 +33,7 @@
 #include "eal_filesystem.h"
 #include "eal_internal_cfg.h"
 
-static int mp_fd = -1;
+static RTE_ATOMIC(int) mp_fd = -1;
 static rte_thread_t mp_handle_tid;
 static char mp_filter[PATH_MAX];   /* Filter for secondary process sockets */
 static char mp_dir_path[PATH_MAX]; /* The directory path for all mp sockets */
@@ -404,7 +404,7 @@ struct pending_request {
        struct sockaddr_un sa;
        int fd;
 
-       while ((fd = __atomic_load_n(&mp_fd, __ATOMIC_RELAXED)) >= 0) {
+       while ((fd = rte_atomic_load_explicit(&mp_fd, 
rte_memory_order_relaxed)) >= 0) {
                int ret;
 
                ret = read_msg(fd, &msg, &sa);
@@ -652,7 +652,7 @@ enum async_action {
                RTE_LOG(ERR, EAL, "failed to create mp thread: %s\n",
                        strerror(errno));
                close(dir_fd);
-               close(__atomic_exchange_n(&mp_fd, -1, __ATOMIC_RELAXED));
+               close(rte_atomic_exchange_explicit(&mp_fd, -1, 
rte_memory_order_relaxed));
                return -1;
        }
 
@@ -668,7 +668,7 @@ enum async_action {
 {
        int fd;
 
-       fd = __atomic_exchange_n(&mp_fd, -1, __ATOMIC_RELAXED);
+       fd = rte_atomic_exchange_explicit(&mp_fd, -1, rte_memory_order_relaxed);
        if (fd < 0)
                return;
 
@@ -1282,11 +1282,11 @@ enum mp_status {
 
        expected = MP_STATUS_UNKNOWN;
        desired = status;
-       if (__atomic_compare_exchange_n(&mcfg->mp_status, &expected, desired,
-                       false, __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+       if (rte_atomic_compare_exchange_strong_explicit(&mcfg->mp_status, 
&expected, desired,
+                       rte_memory_order_relaxed, rte_memory_order_relaxed))
                return true;
 
-       return __atomic_load_n(&mcfg->mp_status, __ATOMIC_RELAXED) == desired;
+       return rte_atomic_load_explicit(&mcfg->mp_status, 
rte_memory_order_relaxed) == desired;
 }
 
 bool
diff --git a/lib/eal/common/eal_common_thread.c 
b/lib/eal/common/eal_common_thread.c
index 668b1ed..c422ea8 100644
--- a/lib/eal/common/eal_common_thread.c
+++ b/lib/eal/common/eal_common_thread.c
@@ -191,8 +191,8 @@ unsigned rte_socket_id(void)
                /* Set the state to 'RUNNING'. Use release order
                 * since 'state' variable is used as the guard variable.
                 */
-               __atomic_store_n(&lcore_config[lcore_id].state, RUNNING,
-                       __ATOMIC_RELEASE);
+               rte_atomic_store_explicit(&lcore_config[lcore_id].state, 
RUNNING,
+                       rte_memory_order_release);
 
                eal_thread_ack_command();
 
@@ -201,8 +201,8 @@ unsigned rte_socket_id(void)
                 * are accessed only after update to 'f' is visible.
                 * Wait till the update to 'f' is visible to the worker.
                 */
-               while ((f = __atomic_load_n(&lcore_config[lcore_id].f,
-                               __ATOMIC_ACQUIRE)) == NULL)
+               while ((f = rte_atomic_load_explicit(&lcore_config[lcore_id].f,
+                               rte_memory_order_acquire)) == NULL)
                        rte_pause();
 
                rte_eal_trace_thread_lcore_running(lcore_id, f);
@@ -219,8 +219,8 @@ unsigned rte_socket_id(void)
                 * are completed before the state is updated.
                 * Use 'state' as the guard variable.
                 */
-               __atomic_store_n(&lcore_config[lcore_id].state, WAIT,
-                       __ATOMIC_RELEASE);
+               rte_atomic_store_explicit(&lcore_config[lcore_id].state, WAIT,
+                       rte_memory_order_release);
 
                rte_eal_trace_thread_lcore_stopped(lcore_id);
        }
@@ -242,7 +242,7 @@ struct control_thread_params {
        /* Control thread status.
         * If the status is CTRL_THREAD_ERROR, 'ret' has the error code.
         */
-       enum __rte_ctrl_thread_status status;
+       RTE_ATOMIC(enum __rte_ctrl_thread_status) status;
 };
 
 static int control_thread_init(void *arg)
@@ -259,13 +259,13 @@ static int control_thread_init(void *arg)
        RTE_PER_LCORE(_socket_id) = SOCKET_ID_ANY;
        params->ret = rte_thread_set_affinity_by_id(rte_thread_self(), cpuset);
        if (params->ret != 0) {
-               __atomic_store_n(&params->status,
-                       CTRL_THREAD_ERROR, __ATOMIC_RELEASE);
+               rte_atomic_store_explicit(&params->status,
+                       CTRL_THREAD_ERROR, rte_memory_order_release);
                return 1;
        }
 
-       __atomic_store_n(&params->status,
-               CTRL_THREAD_RUNNING, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&params->status,
+               CTRL_THREAD_RUNNING, rte_memory_order_release);
 
        return 0;
 }
@@ -310,8 +310,8 @@ static uint32_t control_thread_start(void *arg)
 
        /* Wait for the control thread to initialize successfully */
        while ((ctrl_thread_status =
-                       __atomic_load_n(&params->status,
-                       __ATOMIC_ACQUIRE)) == CTRL_THREAD_LAUNCHING) {
+                       rte_atomic_load_explicit(&params->status,
+                       rte_memory_order_acquire)) == CTRL_THREAD_LAUNCHING) {
                rte_delay_us_sleep(1);
        }
 
diff --git a/lib/eal/common/eal_common_trace.c 
b/lib/eal/common/eal_common_trace.c
index d2eac2d..6ad87fc 100644
--- a/lib/eal/common/eal_common_trace.c
+++ b/lib/eal/common/eal_common_trace.c
@@ -97,7 +97,7 @@ struct trace_point_head *
 bool
 rte_trace_is_enabled(void)
 {
-       return __atomic_load_n(&trace.status, __ATOMIC_ACQUIRE) != 0;
+       return rte_atomic_load_explicit(&trace.status, 
rte_memory_order_acquire) != 0;
 }
 
 static void
@@ -157,7 +157,7 @@ rte_trace_mode rte_trace_mode_get(void)
        prev = rte_atomic_fetch_or_explicit(t, __RTE_TRACE_FIELD_ENABLE_MASK,
                rte_memory_order_release);
        if ((prev & __RTE_TRACE_FIELD_ENABLE_MASK) == 0)
-               __atomic_fetch_add(&trace.status, 1, __ATOMIC_RELEASE);
+               rte_atomic_fetch_add_explicit(&trace.status, 1, 
rte_memory_order_release);
        return 0;
 }
 
@@ -172,7 +172,7 @@ rte_trace_mode rte_trace_mode_get(void)
        prev = rte_atomic_fetch_and_explicit(t, ~__RTE_TRACE_FIELD_ENABLE_MASK,
                rte_memory_order_release);
        if ((prev & __RTE_TRACE_FIELD_ENABLE_MASK) != 0)
-               __atomic_fetch_sub(&trace.status, 1, __ATOMIC_RELEASE);
+               rte_atomic_fetch_sub_explicit(&trace.status, 1, 
rte_memory_order_release);
        return 0;
 }
 
@@ -526,7 +526,7 @@ rte_trace_mode rte_trace_mode_get(void)
 
        /* Add the trace point at tail */
        STAILQ_INSERT_TAIL(&tp_list, tp, next);
-       __atomic_thread_fence(__ATOMIC_RELEASE);
+       __atomic_thread_fence(rte_memory_order_release);
 
        /* All Good !!! */
        return 0;
diff --git a/lib/eal/common/eal_common_trace_ctf.c 
b/lib/eal/common/eal_common_trace_ctf.c
index c6775c3..04c4f71 100644
--- a/lib/eal/common/eal_common_trace_ctf.c
+++ b/lib/eal/common/eal_common_trace_ctf.c
@@ -361,10 +361,10 @@
        if (ctf_meta == NULL)
                return -EINVAL;
 
-       if (!__atomic_load_n(&trace->ctf_fixup_done, __ATOMIC_SEQ_CST) &&
+       if (!rte_atomic_load_explicit(&trace->ctf_fixup_done, 
rte_memory_order_seq_cst) &&
                                rte_get_timer_hz()) {
                meta_fixup(trace, ctf_meta);
-               __atomic_store_n(&trace->ctf_fixup_done, 1, __ATOMIC_SEQ_CST);
+               rte_atomic_store_explicit(&trace->ctf_fixup_done, 1, 
rte_memory_order_seq_cst);
        }
 
        rc = fprintf(f, "%s", ctf_meta);
diff --git a/lib/eal/common/eal_memcfg.h b/lib/eal/common/eal_memcfg.h
index d5c63e2..60e2089 100644
--- a/lib/eal/common/eal_memcfg.h
+++ b/lib/eal/common/eal_memcfg.h
@@ -42,7 +42,7 @@ struct rte_mem_config {
        rte_rwlock_t memory_hotplug_lock;
        /**< Indicates whether memory hotplug request is in progress. */
 
-       uint8_t mp_status; /**< Multiprocess status. */
+       RTE_ATOMIC(uint8_t) mp_status; /**< Multiprocess status. */
 
        /* memory segments and zones */
        struct rte_fbarray memzones; /**< Memzone descriptors. */
diff --git a/lib/eal/common/eal_private.h b/lib/eal/common/eal_private.h
index ebd496b..4d2e806 100644
--- a/lib/eal/common/eal_private.h
+++ b/lib/eal/common/eal_private.h
@@ -24,11 +24,11 @@ struct lcore_config {
        int pipe_main2worker[2];   /**< communication pipe with main */
        int pipe_worker2main[2];   /**< communication pipe with main */
 
-       lcore_function_t * volatile f; /**< function to call */
+       RTE_ATOMIC(lcore_function_t *) volatile f; /**< function to call */
        void * volatile arg;       /**< argument of function */
        volatile int ret;          /**< return value of function */
 
-       volatile enum rte_lcore_state_t state; /**< lcore state */
+       volatile RTE_ATOMIC(enum rte_lcore_state_t) state; /**< lcore state */
        unsigned int socket_id;    /**< physical socket id for this lcore */
        unsigned int core_id;      /**< core number on socket for this lcore */
        int core_index;            /**< relative index, starting from 0 */
diff --git a/lib/eal/common/eal_trace.h b/lib/eal/common/eal_trace.h
index d66bcfe..ace2ef3 100644
--- a/lib/eal/common/eal_trace.h
+++ b/lib/eal/common/eal_trace.h
@@ -50,7 +50,7 @@ struct trace_arg {
 struct trace {
        char *dir;
        int register_errno;
-       uint32_t status;
+       RTE_ATOMIC(uint32_t) status;
        enum rte_trace_mode mode;
        rte_uuid_t uuid;
        uint32_t buff_len;
@@ -65,7 +65,7 @@ struct trace {
        uint32_t ctf_meta_offset_freq;
        uint32_t ctf_meta_offset_freq_off_s;
        uint32_t ctf_meta_offset_freq_off;
-       uint16_t ctf_fixup_done;
+       RTE_ATOMIC(uint16_t) ctf_fixup_done;
        rte_spinlock_t lock;
 };
 
diff --git a/lib/eal/common/rte_service.c b/lib/eal/common/rte_service.c
index 9e2aa4a..3fc2b9a 100644
--- a/lib/eal/common/rte_service.c
+++ b/lib/eal/common/rte_service.c
@@ -43,8 +43,8 @@ struct rte_service_spec_impl {
        rte_spinlock_t execute_lock;
 
        /* API set/get-able variables */
-       int8_t app_runstate;
-       int8_t comp_runstate;
+       RTE_ATOMIC(int8_t) app_runstate;
+       RTE_ATOMIC(int8_t) comp_runstate;
        uint8_t internal_flags;
 
        /* per service statistics */
@@ -52,24 +52,24 @@ struct rte_service_spec_impl {
         * It does not indicate the number of cores the service is running
         * on currently.
         */
-       uint32_t num_mapped_cores;
+       RTE_ATOMIC(uint32_t) num_mapped_cores;
 } __rte_cache_aligned;
 
 struct service_stats {
-       uint64_t calls;
-       uint64_t cycles;
+       RTE_ATOMIC(uint64_t) calls;
+       RTE_ATOMIC(uint64_t) cycles;
 };
 
 /* the internal values of a service core */
 struct core_state {
        /* map of services IDs are run on this core */
        uint64_t service_mask;
-       uint8_t runstate; /* running or stopped */
-       uint8_t thread_active; /* indicates when thread is in service_run() */
+       RTE_ATOMIC(uint8_t) runstate; /* running or stopped */
+       RTE_ATOMIC(uint8_t) thread_active; /* indicates when thread is in 
service_run() */
        uint8_t is_service_core; /* set if core is currently a service core */
        uint8_t service_active_on_lcore[RTE_SERVICE_NUM_MAX];
-       uint64_t loops;
-       uint64_t cycles;
+       RTE_ATOMIC(uint64_t) loops;
+       RTE_ATOMIC(uint64_t) cycles;
        struct service_stats service_stats[RTE_SERVICE_NUM_MAX];
 } __rte_cache_aligned;
 
@@ -314,11 +314,11 @@ struct core_state {
         * service_run and service_runstate_get function.
         */
        if (runstate)
-               __atomic_store_n(&s->comp_runstate, RUNSTATE_RUNNING,
-                       __ATOMIC_RELEASE);
+               rte_atomic_store_explicit(&s->comp_runstate, RUNSTATE_RUNNING,
+                       rte_memory_order_release);
        else
-               __atomic_store_n(&s->comp_runstate, RUNSTATE_STOPPED,
-                       __ATOMIC_RELEASE);
+               rte_atomic_store_explicit(&s->comp_runstate, RUNSTATE_STOPPED,
+                       rte_memory_order_release);
 
        return 0;
 }
@@ -334,11 +334,11 @@ struct core_state {
         * service_run runstate_get function.
         */
        if (runstate)
-               __atomic_store_n(&s->app_runstate, RUNSTATE_RUNNING,
-                       __ATOMIC_RELEASE);
+               rte_atomic_store_explicit(&s->app_runstate, RUNSTATE_RUNNING,
+                       rte_memory_order_release);
        else
-               __atomic_store_n(&s->app_runstate, RUNSTATE_STOPPED,
-                       __ATOMIC_RELEASE);
+               rte_atomic_store_explicit(&s->app_runstate, RUNSTATE_STOPPED,
+                       rte_memory_order_release);
 
        rte_eal_trace_service_runstate_set(id, runstate);
        return 0;
@@ -354,14 +354,14 @@ struct core_state {
         * Use load-acquire memory order. This synchronizes with
         * store-release in service state set functions.
         */
-       if (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) ==
+       if (rte_atomic_load_explicit(&s->comp_runstate, 
rte_memory_order_acquire) ==
                        RUNSTATE_RUNNING &&
-           __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) ==
+           rte_atomic_load_explicit(&s->app_runstate, 
rte_memory_order_acquire) ==
                        RUNSTATE_RUNNING) {
                int check_disabled = !(s->internal_flags &
                        SERVICE_F_START_CHECK);
-               int lcore_mapped = (__atomic_load_n(&s->num_mapped_cores,
-                       __ATOMIC_RELAXED) > 0);
+               int lcore_mapped = 
(rte_atomic_load_explicit(&s->num_mapped_cores,
+                       rte_memory_order_relaxed) > 0);
 
                return (check_disabled | lcore_mapped);
        } else
@@ -392,15 +392,15 @@ struct core_state {
                        uint64_t end = rte_rdtsc();
                        uint64_t cycles = end - start;
 
-                       __atomic_store_n(&cs->cycles, cs->cycles + cycles,
-                               __ATOMIC_RELAXED);
-                       __atomic_store_n(&service_stats->cycles,
+                       rte_atomic_store_explicit(&cs->cycles, cs->cycles + 
cycles,
+                               rte_memory_order_relaxed);
+                       rte_atomic_store_explicit(&service_stats->cycles,
                                service_stats->cycles + cycles,
-                               __ATOMIC_RELAXED);
+                               rte_memory_order_relaxed);
                }
 
-               __atomic_store_n(&service_stats->calls,
-                       service_stats->calls + 1, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&service_stats->calls,
+                       service_stats->calls + 1, rte_memory_order_relaxed);
        } else {
                s->spec.callback(userdata);
        }
@@ -420,9 +420,9 @@ struct core_state {
         * Use load-acquire memory order. This synchronizes with
         * store-release in service state set functions.
         */
-       if (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) !=
+       if (rte_atomic_load_explicit(&s->comp_runstate, 
rte_memory_order_acquire) !=
                        RUNSTATE_RUNNING ||
-           __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) !=
+           rte_atomic_load_explicit(&s->app_runstate, 
rte_memory_order_acquire) !=
                        RUNSTATE_RUNNING ||
            !(service_mask & (UINT64_C(1) << i))) {
                cs->service_active_on_lcore[i] = 0;
@@ -472,11 +472,11 @@ struct core_state {
        /* Increment num_mapped_cores to reflect that this core is
         * now mapped capable of running the service.
         */
-       __atomic_fetch_add(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
+       rte_atomic_fetch_add_explicit(&s->num_mapped_cores, 1, 
rte_memory_order_relaxed);
 
        int ret = service_run(id, cs, UINT64_MAX, s, serialize_mt_unsafe);
 
-       __atomic_fetch_sub(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
+       rte_atomic_fetch_sub_explicit(&s->num_mapped_cores, 1, 
rte_memory_order_relaxed);
 
        return ret;
 }
@@ -489,13 +489,13 @@ struct core_state {
        const int lcore = rte_lcore_id();
        struct core_state *cs = &lcore_states[lcore];
 
-       __atomic_store_n(&cs->thread_active, 1, __ATOMIC_SEQ_CST);
+       rte_atomic_store_explicit(&cs->thread_active, 1, 
rte_memory_order_seq_cst);
 
        /* runstate act as the guard variable. Use load-acquire
         * memory order here to synchronize with store-release
         * in runstate update functions.
         */
-       while (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) ==
+       while (rte_atomic_load_explicit(&cs->runstate, 
rte_memory_order_acquire) ==
                        RUNSTATE_RUNNING) {
 
                const uint64_t service_mask = cs->service_mask;
@@ -513,7 +513,7 @@ struct core_state {
                        service_run(i, cs, service_mask, service_get(i), 1);
                }
 
-               __atomic_store_n(&cs->loops, cs->loops + 1, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&cs->loops, cs->loops + 1, 
rte_memory_order_relaxed);
        }
 
        /* Switch off this core for all services, to ensure that future
@@ -526,7 +526,7 @@ struct core_state {
         * this store, ensuring that once this store is visible, the service
         * lcore thread really is done in service cores code.
         */
-       __atomic_store_n(&cs->thread_active, 0, __ATOMIC_SEQ_CST);
+       rte_atomic_store_explicit(&cs->thread_active, 0, 
rte_memory_order_seq_cst);
        return 0;
 }
 
@@ -539,8 +539,8 @@ struct core_state {
        /* Load thread_active using ACQUIRE to avoid instructions dependent on
         * the result being re-ordered before this load completes.
         */
-       return __atomic_load_n(&lcore_states[lcore].thread_active,
-                              __ATOMIC_ACQUIRE);
+       return rte_atomic_load_explicit(&lcore_states[lcore].thread_active,
+                              rte_memory_order_acquire);
 }
 
 int32_t
@@ -646,13 +646,13 @@ struct core_state {
 
                if (*set && !lcore_mapped) {
                        lcore_states[lcore].service_mask |= sid_mask;
-                       __atomic_fetch_add(&rte_services[sid].num_mapped_cores,
-                               1, __ATOMIC_RELAXED);
+                       
rte_atomic_fetch_add_explicit(&rte_services[sid].num_mapped_cores,
+                               1, rte_memory_order_relaxed);
                }
                if (!*set && lcore_mapped) {
                        lcore_states[lcore].service_mask &= ~(sid_mask);
-                       __atomic_fetch_sub(&rte_services[sid].num_mapped_cores,
-                               1, __ATOMIC_RELAXED);
+                       
rte_atomic_fetch_sub_explicit(&rte_services[sid].num_mapped_cores,
+                               1, rte_memory_order_relaxed);
                }
        }
 
@@ -709,13 +709,13 @@ struct core_state {
                         * store-release memory order here to synchronize
                         * with load-acquire in runstate read functions.
                         */
-                       __atomic_store_n(&lcore_states[i].runstate,
-                               RUNSTATE_STOPPED, __ATOMIC_RELEASE);
+                       rte_atomic_store_explicit(&lcore_states[i].runstate,
+                               RUNSTATE_STOPPED, rte_memory_order_release);
                }
        }
        for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
-               __atomic_store_n(&rte_services[i].num_mapped_cores, 0,
-                       __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&rte_services[i].num_mapped_cores, 0,
+                       rte_memory_order_relaxed);
 
        return 0;
 }
@@ -735,8 +735,8 @@ struct core_state {
        /* Use store-release memory order here to synchronize with
         * load-acquire in runstate read functions.
         */
-       __atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
-               __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&lcore_states[lcore].runstate, 
RUNSTATE_STOPPED,
+               rte_memory_order_release);
 
        return rte_eal_wait_lcore(lcore);
 }
@@ -755,7 +755,7 @@ struct core_state {
         * memory order here to synchronize with store-release
         * in runstate update functions.
         */
-       if (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) !=
+       if (rte_atomic_load_explicit(&cs->runstate, rte_memory_order_acquire) !=
                        RUNSTATE_STOPPED)
                return -EBUSY;
 
@@ -779,7 +779,7 @@ struct core_state {
         * memory order here to synchronize with store-release
         * in runstate update functions.
         */
-       if (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) ==
+       if (rte_atomic_load_explicit(&cs->runstate, rte_memory_order_acquire) ==
                        RUNSTATE_RUNNING)
                return -EALREADY;
 
@@ -789,7 +789,7 @@ struct core_state {
        /* Use load-acquire memory order here to synchronize with
         * store-release in runstate update functions.
         */
-       __atomic_store_n(&cs->runstate, RUNSTATE_RUNNING, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&cs->runstate, RUNSTATE_RUNNING, 
rte_memory_order_release);
 
        rte_eal_trace_service_lcore_start(lcore);
 
@@ -808,7 +808,7 @@ struct core_state {
         * memory order here to synchronize with store-release
         * in runstate update functions.
         */
-       if (__atomic_load_n(&lcore_states[lcore].runstate, __ATOMIC_ACQUIRE) ==
+       if (rte_atomic_load_explicit(&lcore_states[lcore].runstate, 
rte_memory_order_acquire) ==
                        RUNSTATE_STOPPED)
                return -EALREADY;
 
@@ -820,8 +820,8 @@ struct core_state {
                int32_t enabled = service_mask & (UINT64_C(1) << i);
                int32_t service_running = rte_service_runstate_get(i);
                int32_t only_core = (1 ==
-                       __atomic_load_n(&rte_services[i].num_mapped_cores,
-                               __ATOMIC_RELAXED));
+                       
rte_atomic_load_explicit(&rte_services[i].num_mapped_cores,
+                               rte_memory_order_relaxed));
 
                /* if the core is mapped, and the service is running, and this
                 * is the only core that is mapped, the service would cease to
@@ -834,8 +834,8 @@ struct core_state {
        /* Use store-release memory order here to synchronize with
         * load-acquire in runstate read functions.
         */
-       __atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
-               __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&lcore_states[lcore].runstate, 
RUNSTATE_STOPPED,
+               rte_memory_order_release);
 
        rte_eal_trace_service_lcore_stop(lcore);
 
@@ -847,7 +847,7 @@ struct core_state {
 {
        struct core_state *cs = &lcore_states[lcore];
 
-       return __atomic_load_n(&cs->loops, __ATOMIC_RELAXED);
+       return rte_atomic_load_explicit(&cs->loops, rte_memory_order_relaxed);
 }
 
 static uint64_t
@@ -855,7 +855,7 @@ struct core_state {
 {
        struct core_state *cs = &lcore_states[lcore];
 
-       return __atomic_load_n(&cs->cycles, __ATOMIC_RELAXED);
+       return rte_atomic_load_explicit(&cs->cycles, rte_memory_order_relaxed);
 }
 
 static uint64_t
@@ -863,8 +863,8 @@ struct core_state {
 {
        struct core_state *cs = &lcore_states[lcore];
 
-       return __atomic_load_n(&cs->service_stats[service_id].calls,
-               __ATOMIC_RELAXED);
+       return rte_atomic_load_explicit(&cs->service_stats[service_id].calls,
+               rte_memory_order_relaxed);
 }
 
 static uint64_t
@@ -872,8 +872,8 @@ struct core_state {
 {
        struct core_state *cs = &lcore_states[lcore];
 
-       return __atomic_load_n(&cs->service_stats[service_id].cycles,
-               __ATOMIC_RELAXED);
+       return rte_atomic_load_explicit(&cs->service_stats[service_id].cycles,
+               rte_memory_order_relaxed);
 }
 
 typedef uint64_t (*lcore_attr_get_fun)(uint32_t service_id,
diff --git a/lib/eal/freebsd/eal.c b/lib/eal/freebsd/eal.c
index 39a2868..568e06e 100644
--- a/lib/eal/freebsd/eal.c
+++ b/lib/eal/freebsd/eal.c
@@ -597,8 +597,8 @@ static void rte_eal_init_alert(const char *msg)
                return -1;
        }
 
-       if (!__atomic_compare_exchange_n(&run_once, &has_run, 1, 0,
-                                       __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+       if (!rte_atomic_compare_exchange_strong_explicit(&run_once, &has_run, 1,
+                                       rte_memory_order_relaxed, 
rte_memory_order_relaxed)) {
                rte_eal_init_alert("already called initialization.");
                rte_errno = EALREADY;
                return -1;
@@ -622,7 +622,7 @@ static void rte_eal_init_alert(const char *msg)
        if (fctret < 0) {
                rte_eal_init_alert("Invalid 'command line' arguments.");
                rte_errno = EINVAL;
-               __atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&run_once, 0, 
rte_memory_order_relaxed);
                return -1;
        }
 
@@ -636,20 +636,20 @@ static void rte_eal_init_alert(const char *msg)
        if (eal_plugins_init() < 0) {
                rte_eal_init_alert("Cannot init plugins");
                rte_errno = EINVAL;
-               __atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&run_once, 0, 
rte_memory_order_relaxed);
                return -1;
        }
 
        if (eal_trace_init() < 0) {
                rte_eal_init_alert("Cannot init trace");
                rte_errno = EFAULT;
-               __atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&run_once, 0, 
rte_memory_order_relaxed);
                return -1;
        }
 
        if (eal_option_device_parse()) {
                rte_errno = ENODEV;
-               __atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&run_once, 0, 
rte_memory_order_relaxed);
                return -1;
        }
 
@@ -683,7 +683,7 @@ static void rte_eal_init_alert(const char *msg)
        if (rte_bus_scan()) {
                rte_eal_init_alert("Cannot scan the buses for devices");
                rte_errno = ENODEV;
-               __atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&run_once, 0, 
rte_memory_order_relaxed);
                return -1;
        }
 
@@ -736,7 +736,7 @@ static void rte_eal_init_alert(const char *msg)
                if (ret < 0) {
                        rte_eal_init_alert("Cannot get hugepage information.");
                        rte_errno = EACCES;
-                       __atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);
+                       rte_atomic_store_explicit(&run_once, 0, 
rte_memory_order_relaxed);
                        return -1;
                }
        }
@@ -915,8 +915,8 @@ static void rte_eal_init_alert(const char *msg)
        static uint32_t run_once;
        uint32_t has_run = 0;
 
-       if (!__atomic_compare_exchange_n(&run_once, &has_run, 1, 0,
-                       __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+       if (!rte_atomic_compare_exchange_strong_explicit(&run_once, &has_run, 1,
+                       rte_memory_order_relaxed, rte_memory_order_relaxed)) {
                RTE_LOG(WARNING, EAL, "Already called cleanup\n");
                rte_errno = EALREADY;
                return -1;
diff --git a/lib/eal/include/rte_epoll.h b/lib/eal/include/rte_epoll.h
index 01525f5..ae0cf20 100644
--- a/lib/eal/include/rte_epoll.h
+++ b/lib/eal/include/rte_epoll.h
@@ -13,6 +13,7 @@
 
 #include <stdint.h>
 
+#include <rte_stdatomic.h>
 
 #ifdef __cplusplus
 extern "C" {
@@ -38,7 +39,7 @@ enum {
 
 /** interrupt epoll event obj, taken by epoll_event.ptr */
 struct rte_epoll_event {
-       uint32_t status;           /**< OUT: event status */
+       RTE_ATOMIC(uint32_t) status;           /**< OUT: event status */
        int fd;                    /**< OUT: event fd */
        int epfd;       /**< OUT: epoll instance the ev associated with */
        struct rte_epoll_data epdata;
diff --git a/lib/eal/linux/eal.c b/lib/eal/linux/eal.c
index 5f4b2fb..57da058 100644
--- a/lib/eal/linux/eal.c
+++ b/lib/eal/linux/eal.c
@@ -967,7 +967,7 @@ static void rte_eal_init_alert(const char *msg)
 rte_eal_init(int argc, char **argv)
 {
        int i, fctret, ret;
-       static uint32_t run_once;
+       static RTE_ATOMIC(uint32_t) run_once;
        uint32_t has_run = 0;
        char cpuset[RTE_CPU_AFFINITY_STR_LEN];
        char thread_name[RTE_THREAD_NAME_SIZE];
@@ -983,8 +983,8 @@ static void rte_eal_init_alert(const char *msg)
                return -1;
        }
 
-       if (!__atomic_compare_exchange_n(&run_once, &has_run, 1, 0,
-                                       __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+       if (!rte_atomic_compare_exchange_strong_explicit(&run_once, &has_run, 1,
+                                       rte_memory_order_relaxed, 
rte_memory_order_relaxed)) {
                rte_eal_init_alert("already called initialization.");
                rte_errno = EALREADY;
                return -1;
@@ -1008,14 +1008,14 @@ static void rte_eal_init_alert(const char *msg)
        if (fctret < 0) {
                rte_eal_init_alert("Invalid 'command line' arguments.");
                rte_errno = EINVAL;
-               __atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&run_once, 0, 
rte_memory_order_relaxed);
                return -1;
        }
 
        if (eal_plugins_init() < 0) {
                rte_eal_init_alert("Cannot init plugins");
                rte_errno = EINVAL;
-               __atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&run_once, 0, 
rte_memory_order_relaxed);
                return -1;
        }
 
@@ -1027,7 +1027,7 @@ static void rte_eal_init_alert(const char *msg)
 
        if (eal_option_device_parse()) {
                rte_errno = ENODEV;
-               __atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&run_once, 0, 
rte_memory_order_relaxed);
                return -1;
        }
 
@@ -1061,7 +1061,7 @@ static void rte_eal_init_alert(const char *msg)
        if (rte_bus_scan()) {
                rte_eal_init_alert("Cannot scan the buses for devices");
                rte_errno = ENODEV;
-               __atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&run_once, 0, 
rte_memory_order_relaxed);
                return -1;
        }
 
@@ -1125,7 +1125,7 @@ static void rte_eal_init_alert(const char *msg)
                if (ret < 0) {
                        rte_eal_init_alert("Cannot get hugepage information.");
                        rte_errno = EACCES;
-                       __atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);
+                       rte_atomic_store_explicit(&run_once, 0, 
rte_memory_order_relaxed);
                        return -1;
                }
        }
@@ -1150,7 +1150,7 @@ static void rte_eal_init_alert(const char *msg)
                         internal_conf->syslog_facility) < 0) {
                rte_eal_init_alert("Cannot init logging.");
                rte_errno = ENOMEM;
-               __atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&run_once, 0, 
rte_memory_order_relaxed);
                return -1;
        }
 
@@ -1158,7 +1158,7 @@ static void rte_eal_init_alert(const char *msg)
        if (rte_eal_vfio_setup() < 0) {
                rte_eal_init_alert("Cannot init VFIO");
                rte_errno = EAGAIN;
-               __atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&run_once, 0, 
rte_memory_order_relaxed);
                return -1;
        }
 #endif
@@ -1345,11 +1345,11 @@ static void rte_eal_init_alert(const char *msg)
 int
 rte_eal_cleanup(void)
 {
-       static uint32_t run_once;
+       static RTE_ATOMIC(uint32_t) run_once;
        uint32_t has_run = 0;
 
-       if (!__atomic_compare_exchange_n(&run_once, &has_run, 1, 0,
-                                       __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+       if (!rte_atomic_compare_exchange_strong_explicit(&run_once, &has_run, 1,
+                                       rte_memory_order_relaxed, 
rte_memory_order_relaxed)) {
                RTE_LOG(WARNING, EAL, "Already called cleanup\n");
                rte_errno = EALREADY;
                return -1;
diff --git a/lib/eal/linux/eal_interrupts.c b/lib/eal/linux/eal_interrupts.c
index 24fff3d..d4919df 100644
--- a/lib/eal/linux/eal_interrupts.c
+++ b/lib/eal/linux/eal_interrupts.c
@@ -1266,9 +1266,9 @@ struct rte_intr_source {
                 * ordering below acting as a lock to synchronize
                 * the event data updating.
                 */
-               if (!rev || !__atomic_compare_exchange_n(&rev->status,
-                                   &valid_status, RTE_EPOLL_EXEC, 0,
-                                   __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
+               if (!rev || 
!rte_atomic_compare_exchange_strong_explicit(&rev->status,
+                                   &valid_status, RTE_EPOLL_EXEC,
+                                   rte_memory_order_acquire, 
rte_memory_order_relaxed))
                        continue;
 
                events[count].status        = RTE_EPOLL_VALID;
@@ -1283,8 +1283,8 @@ struct rte_intr_source {
                /* the status update should be observed after
                 * the other fields change.
                 */
-               __atomic_store_n(&rev->status, RTE_EPOLL_VALID,
-                               __ATOMIC_RELEASE);
+               rte_atomic_store_explicit(&rev->status, RTE_EPOLL_VALID,
+                               rte_memory_order_release);
                count++;
        }
        return count;
@@ -1374,10 +1374,10 @@ struct rte_intr_source {
 {
        uint32_t valid_status = RTE_EPOLL_VALID;
 
-       while (!__atomic_compare_exchange_n(&ev->status, &valid_status,
-                   RTE_EPOLL_INVALID, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
-               while (__atomic_load_n(&ev->status,
-                               __ATOMIC_RELAXED) != RTE_EPOLL_VALID)
+       while (!rte_atomic_compare_exchange_strong_explicit(&ev->status, 
&valid_status,
+                   RTE_EPOLL_INVALID, rte_memory_order_acquire, 
rte_memory_order_relaxed)) {
+               while (rte_atomic_load_explicit(&ev->status,
+                               rte_memory_order_relaxed) != RTE_EPOLL_VALID)
                        rte_pause();
                valid_status = RTE_EPOLL_VALID;
        }
@@ -1402,8 +1402,8 @@ struct rte_intr_source {
                epfd = rte_intr_tls_epfd();
 
        if (op == EPOLL_CTL_ADD) {
-               __atomic_store_n(&event->status, RTE_EPOLL_VALID,
-                               __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&event->status, RTE_EPOLL_VALID,
+                               rte_memory_order_relaxed);
                event->fd = fd;  /* ignore fd in event */
                event->epfd = epfd;
                ev.data.ptr = (void *)event;
@@ -1415,13 +1415,13 @@ struct rte_intr_source {
                        op, fd, strerror(errno));
                if (op == EPOLL_CTL_ADD)
                        /* rollback status when CTL_ADD fail */
-                       __atomic_store_n(&event->status, RTE_EPOLL_INVALID,
-                                       __ATOMIC_RELAXED);
+                       rte_atomic_store_explicit(&event->status, 
RTE_EPOLL_INVALID,
+                                       rte_memory_order_relaxed);
                return -1;
        }
 
-       if (op == EPOLL_CTL_DEL && __atomic_load_n(&event->status,
-                       __ATOMIC_RELAXED) != RTE_EPOLL_INVALID)
+       if (op == EPOLL_CTL_DEL && rte_atomic_load_explicit(&event->status,
+                       rte_memory_order_relaxed) != RTE_EPOLL_INVALID)
                eal_epoll_data_safe_free(event);
 
        return 0;
@@ -1450,8 +1450,8 @@ struct rte_intr_source {
        case RTE_INTR_EVENT_ADD:
                epfd_op = EPOLL_CTL_ADD;
                rev = rte_intr_elist_index_get(intr_handle, efd_idx);
-               if (__atomic_load_n(&rev->status,
-                               __ATOMIC_RELAXED) != RTE_EPOLL_INVALID) {
+               if (rte_atomic_load_explicit(&rev->status,
+                               rte_memory_order_relaxed) != RTE_EPOLL_INVALID) 
{
                        RTE_LOG(INFO, EAL, "Event already been added.\n");
                        return -EEXIST;
                }
@@ -1474,8 +1474,8 @@ struct rte_intr_source {
        case RTE_INTR_EVENT_DEL:
                epfd_op = EPOLL_CTL_DEL;
                rev = rte_intr_elist_index_get(intr_handle, efd_idx);
-               if (__atomic_load_n(&rev->status,
-                               __ATOMIC_RELAXED) == RTE_EPOLL_INVALID) {
+               if (rte_atomic_load_explicit(&rev->status,
+                               rte_memory_order_relaxed) == RTE_EPOLL_INVALID) 
{
                        RTE_LOG(INFO, EAL, "Event does not exist.\n");
                        return -EPERM;
                }
@@ -1500,8 +1500,8 @@ struct rte_intr_source {
 
        for (i = 0; i < (uint32_t)rte_intr_nb_efd_get(intr_handle); i++) {
                rev = rte_intr_elist_index_get(intr_handle, i);
-               if (__atomic_load_n(&rev->status,
-                               __ATOMIC_RELAXED) == RTE_EPOLL_INVALID)
+               if (rte_atomic_load_explicit(&rev->status,
+                               rte_memory_order_relaxed) == RTE_EPOLL_INVALID)
                        continue;
                if (rte_epoll_ctl(rev->epfd, EPOLL_CTL_DEL, rev->fd, rev)) {
                        /* force free if the entry valid */
diff --git a/lib/eal/ppc/include/rte_atomic.h b/lib/eal/ppc/include/rte_atomic.h
index 7382412..645c713 100644
--- a/lib/eal/ppc/include/rte_atomic.h
+++ b/lib/eal/ppc/include/rte_atomic.h
@@ -48,7 +48,7 @@
 static inline int
 rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
 {
-       return __atomic_compare_exchange(dst, &exp, &src, 0, 
rte_memory_order_acquire,
+       return rte_atomic_compare_exchange_strong_explicit(dst, &exp, src, 
rte_memory_order_acquire,
                rte_memory_order_acquire) ? 1 : 0;
 }
 
@@ -90,7 +90,7 @@ static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
 static inline int
 rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
 {
-       return __atomic_compare_exchange(dst, &exp, &src, 0, 
rte_memory_order_acquire,
+       return rte_atomic_compare_exchange_strong_explicit(dst, &exp, src, 
rte_memory_order_acquire,
                rte_memory_order_acquire) ? 1 : 0;
 }
 
@@ -132,7 +132,7 @@ static inline int rte_atomic32_dec_and_test(rte_atomic32_t 
*v)
 static inline int
 rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
 {
-       return __atomic_compare_exchange(dst, &exp, &src, 0, 
rte_memory_order_acquire,
+       return rte_atomic_compare_exchange_strong_explicit(dst, &exp, src, 
rte_memory_order_acquire,
                rte_memory_order_acquire) ? 1 : 0;
 }
 
diff --git a/lib/eal/windows/rte_thread.c b/lib/eal/windows/rte_thread.c
index acf6484..145ac4b 100644
--- a/lib/eal/windows/rte_thread.c
+++ b/lib/eal/windows/rte_thread.c
@@ -9,6 +9,7 @@
 #include <rte_eal.h>
 #include <rte_common.h>
 #include <rte_errno.h>
+#include <rte_stdatomic.h>
 #include <rte_thread.h>
 
 #include "eal_windows.h"
@@ -19,7 +20,7 @@ struct eal_tls_key {
 
 struct thread_routine_ctx {
        rte_thread_func thread_func;
-       bool thread_init_failed;
+       RTE_ATOMIC(bool) thread_init_failed;
        void *routine_args;
 };
 
@@ -168,7 +169,8 @@ struct thread_routine_ctx {
 thread_func_wrapper(void *arg)
 {
        struct thread_routine_ctx ctx = *(struct thread_routine_ctx *)arg;
-       const bool thread_exit = __atomic_load_n(&ctx.thread_init_failed, 
__ATOMIC_ACQUIRE);
+       const bool thread_exit = rte_atomic_load_explicit(
+               &ctx.thread_init_failed, rte_memory_order_acquire);
 
        free(arg);
 
@@ -237,7 +239,7 @@ struct thread_routine_ctx {
        }
 
 resume_thread:
-       __atomic_store_n(&ctx->thread_init_failed, thread_exit, 
__ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&ctx->thread_init_failed, thread_exit, 
rte_memory_order_release);
 
        if (ResumeThread(thread_handle) == (DWORD)-1) {
                ret = thread_log_last_error("ResumeThread()");
-- 
1.8.3.1


Reply via email to