Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional stdatomic API

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
 lib/timer/rte_timer.c | 50 +++++++++++++++++++++++++-------------------------
 lib/timer/rte_timer.h |  6 +++---
 2 files changed, 28 insertions(+), 28 deletions(-)

diff --git a/lib/timer/rte_timer.c b/lib/timer/rte_timer.c
index 85d6757..53ed221 100644
--- a/lib/timer/rte_timer.c
+++ b/lib/timer/rte_timer.c
@@ -210,7 +210,7 @@ struct rte_timer_data {
 
        status.state = RTE_TIMER_STOP;
        status.owner = RTE_TIMER_NO_OWNER;
-       __atomic_store_n(&tim->status.u32, status.u32, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&tim->status.u32, status.u32, 
rte_memory_order_relaxed);
 }
 
 /*
@@ -231,7 +231,7 @@ struct rte_timer_data {
 
        /* wait that the timer is in correct status before update,
         * and mark it as being configured */
-       prev_status.u32 = __atomic_load_n(&tim->status.u32, __ATOMIC_RELAXED);
+       prev_status.u32 = rte_atomic_load_explicit(&tim->status.u32, 
rte_memory_order_relaxed);
 
        while (success == 0) {
                /* timer is running on another core
@@ -254,11 +254,11 @@ struct rte_timer_data {
                 * timer is in CONFIG state, the state cannot be changed
                 * by other threads. So, we should use ACQUIRE here.
                 */
-               success = __atomic_compare_exchange_n(&tim->status.u32,
-                                             &prev_status.u32,
-                                             status.u32, 0,
-                                             __ATOMIC_ACQUIRE,
-                                             __ATOMIC_RELAXED);
+               success = 
rte_atomic_compare_exchange_strong_explicit(&tim->status.u32,
+                                             (uint32_t 
*)(uintptr_t)&prev_status.u32,
+                                             status.u32,
+                                             rte_memory_order_acquire,
+                                             rte_memory_order_relaxed);
        }
 
        ret_prev_status->u32 = prev_status.u32;
@@ -277,7 +277,7 @@ struct rte_timer_data {
 
        /* wait that the timer is in correct status before update,
         * and mark it as running */
-       prev_status.u32 = __atomic_load_n(&tim->status.u32, __ATOMIC_RELAXED);
+       prev_status.u32 = rte_atomic_load_explicit(&tim->status.u32, 
rte_memory_order_relaxed);
 
        while (success == 0) {
                /* timer is not pending anymore */
@@ -293,11 +293,11 @@ struct rte_timer_data {
                 * timer is in RUNNING state, the state cannot be changed
                 * by other threads. So, we should use ACQUIRE here.
                 */
-               success = __atomic_compare_exchange_n(&tim->status.u32,
-                                             &prev_status.u32,
-                                             status.u32, 0,
-                                             __ATOMIC_ACQUIRE,
-                                             __ATOMIC_RELAXED);
+               success = 
rte_atomic_compare_exchange_strong_explicit(&tim->status.u32,
+                                             (uint32_t 
*)(uintptr_t)&prev_status.u32,
+                                             status.u32,
+                                             rte_memory_order_acquire,
+                                             rte_memory_order_relaxed);
        }
 
        return 0;
@@ -530,7 +530,7 @@ struct rte_timer_data {
        /* The "RELEASE" ordering guarantees the memory operations above
         * the status update are observed before the update by all threads
         */
-       __atomic_store_n(&tim->status.u32, status.u32, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&tim->status.u32, status.u32, 
rte_memory_order_release);
 
        if (tim_lcore != lcore_id || !local_is_locked)
                rte_spinlock_unlock(&priv_timer[tim_lcore].list_lock);
@@ -612,7 +612,7 @@ struct rte_timer_data {
        /* The "RELEASE" ordering guarantees the memory operations above
         * the status update are observed before the update by all threads
         */
-       __atomic_store_n(&tim->status.u32, status.u32, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&tim->status.u32, status.u32, 
rte_memory_order_release);
 
        return 0;
 }
@@ -646,8 +646,8 @@ struct rte_timer_data {
 int
 rte_timer_pending(struct rte_timer *tim)
 {
-       return __atomic_load_n(&tim->status.state,
-                               __ATOMIC_RELAXED) == RTE_TIMER_PENDING;
+       return rte_atomic_load_explicit(&tim->status.state,
+                               rte_memory_order_relaxed) == RTE_TIMER_PENDING;
 }
 
 /* must be called periodically, run all timer that expired */
@@ -753,8 +753,8 @@ struct rte_timer_data {
                         * operations above the status update are observed
                         * before the update by all threads
                         */
-                       __atomic_store_n(&tim->status.u32, status.u32,
-                               __ATOMIC_RELEASE);
+                       rte_atomic_store_explicit(&tim->status.u32, status.u32,
+                               rte_memory_order_release);
                }
                else {
                        /* keep it in list and mark timer as pending */
@@ -766,8 +766,8 @@ struct rte_timer_data {
                         * operations above the status update are observed
                         * before the update by all threads
                         */
-                       __atomic_store_n(&tim->status.u32, status.u32,
-                               __ATOMIC_RELEASE);
+                       rte_atomic_store_explicit(&tim->status.u32, status.u32,
+                               rte_memory_order_release);
                        __rte_timer_reset(tim, tim->expire + tim->period,
                                tim->period, lcore_id, tim->f, tim->arg, 1,
                                timer_data);
@@ -941,8 +941,8 @@ struct rte_timer_data {
                         * operations above the status update are observed
                         * before the update by all threads
                         */
-                       __atomic_store_n(&tim->status.u32, status.u32,
-                               __ATOMIC_RELEASE);
+                       rte_atomic_store_explicit(&tim->status.u32, status.u32,
+                               rte_memory_order_release);
                } else {
                        /* keep it in list and mark timer as pending */
                        rte_spinlock_lock(
@@ -954,8 +954,8 @@ struct rte_timer_data {
                         * operations above the status update are observed
                         * before the update by all threads
                         */
-                       __atomic_store_n(&tim->status.u32, status.u32,
-                               __ATOMIC_RELEASE);
+                       rte_atomic_store_explicit(&tim->status.u32, status.u32,
+                               rte_memory_order_release);
                        __rte_timer_reset(tim, tim->expire + tim->period,
                                tim->period, this_lcore, tim->f, tim->arg, 1,
                                data);
diff --git a/lib/timer/rte_timer.h b/lib/timer/rte_timer.h
index d3927d5..a35bc08 100644
--- a/lib/timer/rte_timer.h
+++ b/lib/timer/rte_timer.h
@@ -65,10 +65,10 @@ enum rte_timer_type {
  */
 union rte_timer_status {
        struct {
-               uint16_t state;  /**< Stop, pending, running, config. */
-               int16_t owner;   /**< The lcore that owns the timer. */
+               RTE_ATOMIC(uint16_t) state;  /**< Stop, pending, running, 
config. */
+               RTE_ATOMIC(int16_t) owner;   /**< The lcore that owns the 
timer. */
        };
-       uint32_t u32;            /**< To atomic-set status + owner. */
+       RTE_ATOMIC(uint32_t) u32;            /**< To atomic-set status + owner. 
*/
 };
 
 #ifdef RTE_LIBRTE_TIMER_DEBUG
-- 
1.8.3.1

Reply via email to