Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
 drivers/net/sfc/meson.build       |  5 ++---
 drivers/net/sfc/sfc_mae_counter.c | 30 +++++++++++++++---------------
 drivers/net/sfc/sfc_repr_proxy.c  |  8 ++++----
 drivers/net/sfc/sfc_stats.h       |  8 ++++----
 4 files changed, 25 insertions(+), 26 deletions(-)

diff --git a/drivers/net/sfc/meson.build b/drivers/net/sfc/meson.build
index 5adde68..d3603a0 100644
--- a/drivers/net/sfc/meson.build
+++ b/drivers/net/sfc/meson.build
@@ -47,9 +47,8 @@ int main(void)
     __int128 a = 0;
     __int128 b;
 
-    b = __atomic_load_n(&a, __ATOMIC_RELAXED);
-    __atomic_store(&b, &a, __ATOMIC_RELAXED);
-    __atomic_store_n(&b, a, __ATOMIC_RELAXED);
+    b = rte_atomic_load_explicit(&a, rte_memory_order_relaxed);
+    rte_atomic_store_explicit(&b, a, rte_memory_order_relaxed);
     return 0;
 }
 '''
diff --git a/drivers/net/sfc/sfc_mae_counter.c 
b/drivers/net/sfc/sfc_mae_counter.c
index ba17295..a32da84 100644
--- a/drivers/net/sfc/sfc_mae_counter.c
+++ b/drivers/net/sfc/sfc_mae_counter.c
@@ -131,8 +131,8 @@
         * And it does not depend on different stores/loads in other threads.
         * Paired with relaxed ordering in counter increment.
         */
-       __atomic_store(&p->reset.pkts_bytes.int128,
-                      &p->value.pkts_bytes.int128, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&p->reset.pkts_bytes.int128,
+                      p->value.pkts_bytes.int128, rte_memory_order_relaxed);
        p->generation_count = generation_count;
 
        p->ft_switch_hit_counter = counterp->ft_switch_hit_counter;
@@ -142,7 +142,7 @@
         * at the beginning of delete operation. Release ordering is
         * paired with acquire ordering on load in counter increment operation.
         */
-       __atomic_store_n(&p->inuse, true, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&p->inuse, true, rte_memory_order_release);
 
        sfc_info(sa, "enabled MAE counter 0x%x-#%u with reset pkts=%" PRIu64
                 " bytes=%" PRIu64, counterp->type, mae_counter.id,
@@ -189,7 +189,7 @@
         * paired with acquire ordering on load in counter increment operation.
         */
        p = &counters->mae_counters[mae_counter->id];
-       __atomic_store_n(&p->inuse, false, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&p->inuse, false, rte_memory_order_release);
 
        rc = efx_mae_counters_free_type(sa->nic, counter->type, 1, &unused,
                                        mae_counter, NULL);
@@ -228,7 +228,7 @@
         * Acquire ordering is paired with release ordering in counter add
         * and delete operations.
         */
-       __atomic_load(&p->inuse, &inuse, __ATOMIC_ACQUIRE);
+       inuse = rte_atomic_load_explicit(&p->inuse, rte_memory_order_acquire);
        if (!inuse) {
                /*
                 * Two possible cases include:
@@ -258,15 +258,15 @@
         * And it does not depend on different stores/loads in other threads.
         * Paired with relaxed ordering on counter reset.
         */
-       __atomic_store(&p->value.pkts_bytes,
-                      &cnt_val.pkts_bytes, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&p->value.pkts_bytes,
+                      cnt_val.pkts_bytes, rte_memory_order_relaxed);
 
        if (p->ft_switch_hit_counter != NULL) {
                uint64_t ft_switch_hit_counter;
 
                ft_switch_hit_counter = *p->ft_switch_hit_counter + pkts;
-               __atomic_store_n(p->ft_switch_hit_counter, 
ft_switch_hit_counter,
-                                __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(p->ft_switch_hit_counter, 
ft_switch_hit_counter,
+                                rte_memory_order_relaxed);
        }
 
        sfc_info(sa, "update MAE counter 0x%x-#%u: pkts+%" PRIu64 "=%" PRIu64
@@ -498,8 +498,8 @@
                &sa->mae.counter_registry;
        int32_t rc;
 
-       while (__atomic_load_n(&counter_registry->polling.thread.run,
-                              __ATOMIC_ACQUIRE)) {
+       while (rte_atomic_load_explicit(&counter_registry->polling.thread.run,
+                              rte_memory_order_acquire)) {
                rc = sfc_mae_counter_poll_packets(sa);
                if (rc == 0) {
                        /*
@@ -684,8 +684,8 @@
        int rc;
 
        /* Ensure that flag is set before attempting to join thread */
-       __atomic_store_n(&counter_registry->polling.thread.run, false,
-                        __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&counter_registry->polling.thread.run, false,
+                        rte_memory_order_release);
 
        rc = rte_thread_join(counter_registry->polling.thread.id, NULL);
        if (rc != 0)
@@ -1024,8 +1024,8 @@
         * And it does not depend on different stores/loads in other threads.
         * Paired with relaxed ordering in counter increment.
         */
-       value.pkts_bytes.int128 = __atomic_load_n(&p->value.pkts_bytes.int128,
-                                                 __ATOMIC_RELAXED);
+       value.pkts_bytes.int128 = 
rte_atomic_load_explicit(&p->value.pkts_bytes.int128,
+                                                 rte_memory_order_relaxed);
 
        data->hits_set = 1;
        data->hits = value.pkts - p->reset.pkts;
diff --git a/drivers/net/sfc/sfc_repr_proxy.c b/drivers/net/sfc/sfc_repr_proxy.c
index ff13795..7275901 100644
--- a/drivers/net/sfc/sfc_repr_proxy.c
+++ b/drivers/net/sfc/sfc_repr_proxy.c
@@ -83,7 +83,7 @@
         * Release ordering enforces marker set after data is populated.
         * Paired with acquire ordering in sfc_repr_proxy_mbox_handle().
         */
-       __atomic_store_n(&mbox->write_marker, true, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&mbox->write_marker, true, 
rte_memory_order_release);
 
        /*
         * Wait for the representor routine to process the request.
@@ -94,7 +94,7 @@
                 * Paired with release ordering in sfc_repr_proxy_mbox_handle()
                 * on acknowledge write.
                 */
-               if (__atomic_load_n(&mbox->ack, __ATOMIC_ACQUIRE))
+               if (rte_atomic_load_explicit(&mbox->ack, 
rte_memory_order_acquire))
                        break;
 
                rte_delay_ms(1);
@@ -119,7 +119,7 @@
         * Paired with release ordering in sfc_repr_proxy_mbox_send()
         * on marker set.
         */
-       if (!__atomic_load_n(&mbox->write_marker, __ATOMIC_ACQUIRE))
+       if (!rte_atomic_load_explicit(&mbox->write_marker, 
rte_memory_order_acquire))
                return;
 
        mbox->write_marker = false;
@@ -146,7 +146,7 @@
         * Paired with acquire ordering in sfc_repr_proxy_mbox_send()
         * on acknowledge read.
         */
-       __atomic_store_n(&mbox->ack, true, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&mbox->ack, true, rte_memory_order_release);
 }
 
 static void
diff --git a/drivers/net/sfc/sfc_stats.h b/drivers/net/sfc/sfc_stats.h
index 597e14d..25c2b9e 100644
--- a/drivers/net/sfc/sfc_stats.h
+++ b/drivers/net/sfc/sfc_stats.h
@@ -51,8 +51,8 @@
         * Store the result atomically to guarantee that the reader
         * core sees both counter updates together.
         */
-       __atomic_store_n(&st->pkts_bytes.int128, result.pkts_bytes.int128,
-                        __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&st->pkts_bytes.int128, 
result.pkts_bytes.int128,
+                        rte_memory_order_relaxed);
 #else
        st->pkts += pkts;
        st->bytes += bytes;
@@ -66,8 +66,8 @@
 sfc_pkts_bytes_get(const union sfc_pkts_bytes *st, union sfc_pkts_bytes 
*result)
 {
 #if SFC_SW_STATS_ATOMIC
-       result->pkts_bytes.int128 = __atomic_load_n(&st->pkts_bytes.int128,
-                                                   __ATOMIC_RELAXED);
+       result->pkts_bytes.int128 = 
rte_atomic_load_explicit(&st->pkts_bytes.int128,
+                                                   rte_memory_order_relaxed);
 #else
        *result = *st;
 #endif
-- 
1.8.3.1

Reply via email to