Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional stdatomic API

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
 drivers/event/cnxk/cnxk_tim_worker.h   |  4 +--
 lib/eventdev/rte_event_timer_adapter.c | 66 +++++++++++++++++-----------------
 lib/eventdev/rte_event_timer_adapter.h |  2 +-
 3 files changed, 36 insertions(+), 36 deletions(-)

diff --git a/drivers/event/cnxk/cnxk_tim_worker.h 
b/drivers/event/cnxk/cnxk_tim_worker.h
index f0857f2..f530d8c 100644
--- a/drivers/event/cnxk/cnxk_tim_worker.h
+++ b/drivers/event/cnxk/cnxk_tim_worker.h
@@ -314,7 +314,7 @@
 
        tim->impl_opaque[0] = (uintptr_t)chunk;
        tim->impl_opaque[1] = (uintptr_t)bkt;
-       __atomic_store_n(&tim->state, RTE_EVENT_TIMER_ARMED, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&tim->state, RTE_EVENT_TIMER_ARMED, 
rte_memory_order_release);
        cnxk_tim_bkt_inc_nent(bkt);
        cnxk_tim_bkt_dec_lock_relaxed(bkt);
 
@@ -425,7 +425,7 @@
 
        tim->impl_opaque[0] = (uintptr_t)chunk;
        tim->impl_opaque[1] = (uintptr_t)bkt;
-       __atomic_store_n(&tim->state, RTE_EVENT_TIMER_ARMED, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&tim->state, RTE_EVENT_TIMER_ARMED, 
rte_memory_order_release);
        cnxk_tim_bkt_inc_nent(bkt);
        cnxk_tim_bkt_dec_lock_relaxed(bkt);
 
diff --git a/lib/eventdev/rte_event_timer_adapter.c 
b/lib/eventdev/rte_event_timer_adapter.c
index 427c4c6..2746670 100644
--- a/lib/eventdev/rte_event_timer_adapter.c
+++ b/lib/eventdev/rte_event_timer_adapter.c
@@ -630,12 +630,12 @@ struct swtim {
        uint32_t timer_data_id;
        /* Track which cores have actually armed a timer */
        struct {
-               uint16_t v;
+               RTE_ATOMIC(uint16_t) v;
        } __rte_cache_aligned in_use[RTE_MAX_LCORE];
        /* Track which cores' timer lists should be polled */
-       unsigned int poll_lcores[RTE_MAX_LCORE];
+       RTE_ATOMIC(unsigned int) poll_lcores[RTE_MAX_LCORE];
        /* The number of lists that should be polled */
-       int n_poll_lcores;
+       RTE_ATOMIC(int) n_poll_lcores;
        /* Timers which have expired and can be returned to a mempool */
        struct rte_timer *expired_timers[EXP_TIM_BUF_SZ];
        /* The number of timers that can be returned to a mempool */
@@ -669,10 +669,10 @@ struct swtim {
 
        if (unlikely(sw->in_use[lcore].v == 0)) {
                sw->in_use[lcore].v = 1;
-               n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
-                                            __ATOMIC_RELAXED);
-               __atomic_store_n(&sw->poll_lcores[n_lcores], lcore,
-                               __ATOMIC_RELAXED);
+               n_lcores = rte_atomic_fetch_add_explicit(&sw->n_poll_lcores, 1,
+                                            rte_memory_order_relaxed);
+               rte_atomic_store_explicit(&sw->poll_lcores[n_lcores], lcore,
+                               rte_memory_order_relaxed);
        }
 
        ret = event_buffer_add(&sw->buffer, &evtim->ev);
@@ -719,8 +719,8 @@ struct swtim {
                sw->stats.evtim_exp_count++;
 
                if (type == SINGLE)
-                       __atomic_store_n(&evtim->state, 
RTE_EVENT_TIMER_NOT_ARMED,
-                               __ATOMIC_RELEASE);
+                       rte_atomic_store_explicit(&evtim->state, 
RTE_EVENT_TIMER_NOT_ARMED,
+                               rte_memory_order_release);
        }
 
        if (event_buffer_batch_ready(&sw->buffer)) {
@@ -846,7 +846,7 @@ struct swtim {
 
        if (swtim_did_tick(sw)) {
                rte_timer_alt_manage(sw->timer_data_id,
-                                    sw->poll_lcores,
+                                    (unsigned int *)(uintptr_t)sw->poll_lcores,
                                     sw->n_poll_lcores,
                                     swtim_callback);
 
@@ -1027,7 +1027,7 @@ struct swtim {
 
        /* Free outstanding timers */
        rte_timer_stop_all(sw->timer_data_id,
-                          sw->poll_lcores,
+                          (unsigned int *)(uintptr_t)sw->poll_lcores,
                           sw->n_poll_lcores,
                           swtim_free_tim,
                           sw);
@@ -1142,7 +1142,7 @@ struct swtim {
        uint64_t cur_cycles;
 
        /* Check that timer is armed */
-       n_state = __atomic_load_n(&evtim->state, __ATOMIC_ACQUIRE);
+       n_state = rte_atomic_load_explicit(&evtim->state, 
rte_memory_order_acquire);
        if (n_state != RTE_EVENT_TIMER_ARMED)
                return -EINVAL;
 
@@ -1201,15 +1201,15 @@ struct swtim {
         * The atomic compare-and-swap operation can prevent the race condition
         * on in_use flag between multiple non-EAL threads.
         */
-       if (unlikely(__atomic_compare_exchange_n(&sw->in_use[lcore_id].v,
-                       &exp_state, 1, 0,
-                       __ATOMIC_RELAXED, __ATOMIC_RELAXED))) {
+       if 
(unlikely(rte_atomic_compare_exchange_strong_explicit(&sw->in_use[lcore_id].v,
+                       &exp_state, 1,
+                       rte_memory_order_relaxed, rte_memory_order_relaxed))) {
                EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll",
                              lcore_id);
-               n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
-                                            __ATOMIC_RELAXED);
-               __atomic_store_n(&sw->poll_lcores[n_lcores], lcore_id,
-                               __ATOMIC_RELAXED);
+               n_lcores = rte_atomic_fetch_add_explicit(&sw->n_poll_lcores, 1,
+                                            rte_memory_order_relaxed);
+               rte_atomic_store_explicit(&sw->poll_lcores[n_lcores], lcore_id,
+                               rte_memory_order_relaxed);
        }
 
        ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims,
@@ -1223,7 +1223,7 @@ struct swtim {
        type = get_timer_type(adapter);
 
        for (i = 0; i < nb_evtims; i++) {
-               n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
+               n_state = rte_atomic_load_explicit(&evtims[i]->state, 
rte_memory_order_acquire);
                if (n_state == RTE_EVENT_TIMER_ARMED) {
                        rte_errno = EALREADY;
                        break;
@@ -1235,9 +1235,9 @@ struct swtim {
 
                if (unlikely(check_destination_event_queue(evtims[i],
                                                           adapter) < 0)) {
-                       __atomic_store_n(&evtims[i]->state,
+                       rte_atomic_store_explicit(&evtims[i]->state,
                                        RTE_EVENT_TIMER_ERROR,
-                                       __ATOMIC_RELAXED);
+                                       rte_memory_order_relaxed);
                        rte_errno = EINVAL;
                        break;
                }
@@ -1250,15 +1250,15 @@ struct swtim {
 
                ret = get_timeout_cycles(evtims[i], adapter, &cycles);
                if (unlikely(ret == -1)) {
-                       __atomic_store_n(&evtims[i]->state,
+                       rte_atomic_store_explicit(&evtims[i]->state,
                                        RTE_EVENT_TIMER_ERROR_TOOLATE,
-                                       __ATOMIC_RELAXED);
+                                       rte_memory_order_relaxed);
                        rte_errno = EINVAL;
                        break;
                } else if (unlikely(ret == -2)) {
-                       __atomic_store_n(&evtims[i]->state,
+                       rte_atomic_store_explicit(&evtims[i]->state,
                                        RTE_EVENT_TIMER_ERROR_TOOEARLY,
-                                       __ATOMIC_RELAXED);
+                                       rte_memory_order_relaxed);
                        rte_errno = EINVAL;
                        break;
                }
@@ -1267,9 +1267,9 @@ struct swtim {
                                          type, lcore_id, NULL, evtims[i]);
                if (ret < 0) {
                        /* tim was in RUNNING or CONFIG state */
-                       __atomic_store_n(&evtims[i]->state,
+                       rte_atomic_store_explicit(&evtims[i]->state,
                                        RTE_EVENT_TIMER_ERROR,
-                                       __ATOMIC_RELEASE);
+                                       rte_memory_order_release);
                        break;
                }
 
@@ -1277,8 +1277,8 @@ struct swtim {
                /* RELEASE ordering guarantees the adapter specific value
                 * changes observed before the update of state.
                 */
-               __atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_ARMED,
-                               __ATOMIC_RELEASE);
+               rte_atomic_store_explicit(&evtims[i]->state, 
RTE_EVENT_TIMER_ARMED,
+                               rte_memory_order_release);
        }
 
        if (i < nb_evtims)
@@ -1320,7 +1320,7 @@ struct swtim {
                /* ACQUIRE ordering guarantees the access of implementation
                 * specific opaque data under the correct state.
                 */
-               n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
+               n_state = rte_atomic_load_explicit(&evtims[i]->state, 
rte_memory_order_acquire);
                if (n_state == RTE_EVENT_TIMER_CANCELED) {
                        rte_errno = EALREADY;
                        break;
@@ -1346,8 +1346,8 @@ struct swtim {
                 * to make sure the state update data observed between
                 * threads.
                 */
-               __atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_CANCELED,
-                               __ATOMIC_RELEASE);
+               rte_atomic_store_explicit(&evtims[i]->state, 
RTE_EVENT_TIMER_CANCELED,
+                               rte_memory_order_release);
        }
 
        return i;
diff --git a/lib/eventdev/rte_event_timer_adapter.h 
b/lib/eventdev/rte_event_timer_adapter.h
index fbdddf8..49e646a 100644
--- a/lib/eventdev/rte_event_timer_adapter.h
+++ b/lib/eventdev/rte_event_timer_adapter.h
@@ -498,7 +498,7 @@ struct rte_event_timer {
         * implementation specific values to share between the arm and cancel
         * operations.  The application should not modify this field.
         */
-       enum rte_event_timer_state state;
+       RTE_ATOMIC(enum rte_event_timer_state) state;
        /**< State of the event timer. */
        uint8_t user_meta[];
        /**< Memory to store user specific metadata.
-- 
1.8.3.1

Reply via email to