Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
 examples/bbdev_app/main.c                          | 13 +++++----
 examples/l2fwd-event/l2fwd_common.h                |  4 +--
 examples/l2fwd-event/l2fwd_event.c                 | 24 ++++++++--------
 examples/l2fwd-jobstats/main.c                     | 11 ++++----
 .../client_server_mp/mp_server/main.c              |  6 ++--
 examples/server_node_efd/efd_server/main.c         |  6 ++--
 examples/vhost/main.c                              | 32 +++++++++++-----------
 examples/vhost/main.h                              |  4 +--
 examples/vhost/virtio_net.c                        | 13 +++++----
 examples/vhost_blk/vhost_blk.c                     |  8 +++---
 examples/vm_power_manager/channel_monitor.c        |  9 +++---
 11 files changed, 68 insertions(+), 62 deletions(-)

diff --git a/examples/bbdev_app/main.c b/examples/bbdev_app/main.c
index 16599ae..214fdf2 100644
--- a/examples/bbdev_app/main.c
+++ b/examples/bbdev_app/main.c
@@ -165,7 +165,7 @@ struct stats_lcore_params {
        .num_dec_cores = 1,
 };
 
-static uint16_t global_exit_flag;
+static RTE_ATOMIC(uint16_t) global_exit_flag;
 
 /* display usage */
 static inline void
@@ -277,7 +277,7 @@ uint16_t bbdev_parse_number(const char *mask)
 signal_handler(int signum)
 {
        printf("\nSignal %d received\n", signum);
-       __atomic_store_n(&global_exit_flag, 1, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&global_exit_flag, 1, 
rte_memory_order_relaxed);
 }
 
 static void
@@ -321,7 +321,8 @@ uint16_t bbdev_parse_number(const char *mask)
        fflush(stdout);
 
        for (count = 0; count <= MAX_CHECK_TIME &&
-                       !__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED); 
count++) {
+                       !rte_atomic_load_explicit(&global_exit_flag,
+                           rte_memory_order_relaxed); count++) {
                memset(&link, 0, sizeof(link));
                link_get_err = rte_eth_link_get_nowait(port_id, &link);
 
@@ -675,7 +676,7 @@ uint16_t bbdev_parse_number(const char *mask)
 {
        struct stats_lcore_params *stats_lcore = arg;
 
-       while (!__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED)) {
+       while (!rte_atomic_load_explicit(&global_exit_flag, 
rte_memory_order_relaxed)) {
                print_stats(stats_lcore);
                rte_delay_ms(500);
        }
@@ -921,7 +922,7 @@ uint16_t bbdev_parse_number(const char *mask)
        const bool run_decoder = (lcore_conf->core_type &
                        (1 << RTE_BBDEV_OP_TURBO_DEC));
 
-       while (!__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED)) {
+       while (!rte_atomic_load_explicit(&global_exit_flag, 
rte_memory_order_relaxed)) {
                if (run_encoder)
                        run_encoding(lcore_conf);
                if (run_decoder)
@@ -1055,7 +1056,7 @@ uint16_t bbdev_parse_number(const char *mask)
                .align = alignof(struct rte_mbuf *),
        };
 
-       __atomic_store_n(&global_exit_flag, 0, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&global_exit_flag, 0, 
rte_memory_order_relaxed);
 
        sigret = signal(SIGTERM, signal_handler);
        if (sigret == SIG_ERR)
diff --git a/examples/l2fwd-event/l2fwd_common.h 
b/examples/l2fwd-event/l2fwd_common.h
index 07f84cb..3d2e303 100644
--- a/examples/l2fwd-event/l2fwd_common.h
+++ b/examples/l2fwd-event/l2fwd_common.h
@@ -61,8 +61,8 @@
 /* Per-port statistics struct */
 struct l2fwd_port_statistics {
        uint64_t dropped;
-       uint64_t tx;
-       uint64_t rx;
+       RTE_ATOMIC(uint64_t) tx;
+       RTE_ATOMIC(uint64_t) rx;
 } __rte_cache_aligned;
 
 /* Event vector attributes */
diff --git a/examples/l2fwd-event/l2fwd_event.c 
b/examples/l2fwd-event/l2fwd_event.c
index 4b5a032..2247202 100644
--- a/examples/l2fwd-event/l2fwd_event.c
+++ b/examples/l2fwd-event/l2fwd_event.c
@@ -163,8 +163,8 @@
        dst_port = rsrc->dst_ports[mbuf->port];
 
        if (timer_period > 0)
-               __atomic_fetch_add(&rsrc->port_stats[mbuf->port].rx,
-                               1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbuf->port].rx,
+                               1, rte_memory_order_relaxed);
        mbuf->port = dst_port;
 
        if (flags & L2FWD_EVENT_UPDT_MAC)
@@ -179,8 +179,8 @@
                rte_event_eth_tx_adapter_txq_set(mbuf, 0);
 
        if (timer_period > 0)
-               __atomic_fetch_add(&rsrc->port_stats[mbuf->port].tx,
-                               1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbuf->port].tx,
+                               1, rte_memory_order_relaxed);
 }
 
 static __rte_always_inline void
@@ -367,8 +367,8 @@
                        vec->queue = 0;
 
                if (timer_period > 0)
-                       __atomic_fetch_add(&rsrc->port_stats[mbufs[0]->port].rx,
-                                          vec->nb_elem, __ATOMIC_RELAXED);
+                       
rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbufs[0]->port].rx,
+                                          vec->nb_elem, 
rte_memory_order_relaxed);
 
                for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
                        if (j < vec->nb_elem)
@@ -382,14 +382,14 @@
                }
 
                if (timer_period > 0)
-                       __atomic_fetch_add(&rsrc->port_stats[vec->port].tx,
-                                          vec->nb_elem, __ATOMIC_RELAXED);
+                       
rte_atomic_fetch_add_explicit(&rsrc->port_stats[vec->port].tx,
+                                          vec->nb_elem, 
rte_memory_order_relaxed);
        } else {
                for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
                        if (timer_period > 0)
-                               __atomic_fetch_add(
+                               rte_atomic_fetch_add_explicit(
                                        &rsrc->port_stats[mbufs[i]->port].rx, 1,
-                                       __ATOMIC_RELAXED);
+                                       rte_memory_order_relaxed);
 
                        if (j < vec->nb_elem)
                                rte_prefetch0(
@@ -406,9 +406,9 @@
                                rte_event_eth_tx_adapter_txq_set(mbufs[i], 0);
 
                        if (timer_period > 0)
-                               __atomic_fetch_add(
+                               rte_atomic_fetch_add_explicit(
                                        &rsrc->port_stats[mbufs[i]->port].tx, 1,
-                                       __ATOMIC_RELAXED);
+                                       rte_memory_order_relaxed);
                }
        }
 }
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index 2653db4..9a094ef 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -80,7 +80,7 @@ struct lcore_queue_conf {
        struct rte_jobstats idle_job;
        struct rte_jobstats_context jobs_context;
 
-       uint16_t stats_read_pending;
+       RTE_ATOMIC(uint16_t) stats_read_pending;
        rte_spinlock_t lock;
 } __rte_cache_aligned;
 /* >8 End of list of queues to be polled for given lcore. */
@@ -151,9 +151,9 @@ struct l2fwd_port_statistics {
        uint64_t collection_time = rte_get_timer_cycles();
 
        /* Ask forwarding thread to give us stats. */
-       __atomic_store_n(&qconf->stats_read_pending, 1, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&qconf->stats_read_pending, 1, 
rte_memory_order_relaxed);
        rte_spinlock_lock(&qconf->lock);
-       __atomic_store_n(&qconf->stats_read_pending, 0, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&qconf->stats_read_pending, 0, 
rte_memory_order_relaxed);
 
        /* Collect context statistics. */
        stats_period = ctx->state_time - ctx->start_time;
@@ -522,8 +522,9 @@ struct l2fwd_port_statistics {
                                repeats++;
                                need_manage = qconf->flush_timer.expire < now;
                                /* Check if we was esked to give a stats. */
-                               stats_read_pending = 
__atomic_load_n(&qconf->stats_read_pending,
-                                               __ATOMIC_RELAXED);
+                               stats_read_pending = rte_atomic_load_explicit(
+                                       &qconf->stats_read_pending,
+                                       rte_memory_order_relaxed);
                                need_manage |= stats_read_pending;
 
                                for (i = 0; i < qconf->n_rx_port && 
!need_manage; i++)
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c 
b/examples/multi_process/client_server_mp/mp_server/main.c
index f54bb8b..ebfc2fe 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -157,12 +157,12 @@ struct client_rx_buf {
 sleep_lcore(__rte_unused void *dummy)
 {
        /* Used to pick a display thread - static, so zero-initialised */
-       static uint32_t display_stats;
+       static RTE_ATOMIC(uint32_t) display_stats;
 
        uint32_t status = 0;
        /* Only one core should display stats */
-       if (__atomic_compare_exchange_n(&display_stats, &status, 1, 0,
-                       __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+       if (rte_atomic_compare_exchange_strong_explicit(&display_stats, 
&status, 1,
+                       rte_memory_order_relaxed, rte_memory_order_relaxed)) {
                const unsigned sleeptime = 1;
                printf("Core %u displaying statistics\n", rte_lcore_id());
 
diff --git a/examples/server_node_efd/efd_server/main.c 
b/examples/server_node_efd/efd_server/main.c
index fd72882..75ff0ea 100644
--- a/examples/server_node_efd/efd_server/main.c
+++ b/examples/server_node_efd/efd_server/main.c
@@ -177,12 +177,12 @@ struct efd_stats {
 sleep_lcore(__rte_unused void *dummy)
 {
        /* Used to pick a display thread - static, so zero-initialised */
-       static uint32_t display_stats;
+       static RTE_ATOMIC(uint32_t) display_stats;
 
        /* Only one core should display stats */
        uint32_t display_init = 0;
-       if (__atomic_compare_exchange_n(&display_stats, &display_init, 1, 0,
-                       __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+       if (rte_atomic_compare_exchange_strong_explicit(&display_stats, 
&display_init, 1,
+                       rte_memory_order_relaxed, rte_memory_order_relaxed)) {
                const unsigned int sleeptime = 1;
 
                printf("Core %u displaying statistics\n", rte_lcore_id());
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 3fc1b15..4391d88 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -1052,10 +1052,10 @@ static unsigned check_ports_num(unsigned nb_ports)
        }
 
        if (enable_stats) {
-               __atomic_fetch_add(&dst_vdev->stats.rx_total_atomic, 1,
-                               __ATOMIC_SEQ_CST);
-               __atomic_fetch_add(&dst_vdev->stats.rx_atomic, ret,
-                               __ATOMIC_SEQ_CST);
+               rte_atomic_fetch_add_explicit(&dst_vdev->stats.rx_total_atomic, 
1,
+                               rte_memory_order_seq_cst);
+               rte_atomic_fetch_add_explicit(&dst_vdev->stats.rx_atomic, ret,
+                               rte_memory_order_seq_cst);
                src_vdev->stats.tx_total++;
                src_vdev->stats.tx += ret;
        }
@@ -1072,10 +1072,10 @@ static unsigned check_ports_num(unsigned nb_ports)
        ret = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev, VIRTIO_RXQ, m, 
nr_xmit);
 
        if (enable_stats) {
-               __atomic_fetch_add(&vdev->stats.rx_total_atomic, nr_xmit,
-                               __ATOMIC_SEQ_CST);
-               __atomic_fetch_add(&vdev->stats.rx_atomic, ret,
-                               __ATOMIC_SEQ_CST);
+               rte_atomic_fetch_add_explicit(&vdev->stats.rx_total_atomic, 
nr_xmit,
+                               rte_memory_order_seq_cst);
+               rte_atomic_fetch_add_explicit(&vdev->stats.rx_atomic, ret,
+                               rte_memory_order_seq_cst);
        }
 
        if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled) {
@@ -1404,10 +1404,10 @@ static void virtio_tx_offload(struct rte_mbuf *m)
        }
 
        if (enable_stats) {
-               __atomic_fetch_add(&vdev->stats.rx_total_atomic, rx_count,
-                               __ATOMIC_SEQ_CST);
-               __atomic_fetch_add(&vdev->stats.rx_atomic, enqueue_count,
-                               __ATOMIC_SEQ_CST);
+               rte_atomic_fetch_add_explicit(&vdev->stats.rx_total_atomic, 
rx_count,
+                               rte_memory_order_seq_cst);
+               rte_atomic_fetch_add_explicit(&vdev->stats.rx_atomic, 
enqueue_count,
+                               rte_memory_order_seq_cst);
        }
 
        if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled) {
@@ -1832,10 +1832,10 @@ uint16_t sync_dequeue_pkts(struct vhost_dev *dev, 
uint16_t queue_id,
                        tx         = vdev->stats.tx;
                        tx_dropped = tx_total - tx;
 
-                       rx_total = __atomic_load_n(&vdev->stats.rx_total_atomic,
-                               __ATOMIC_SEQ_CST);
-                       rx         = __atomic_load_n(&vdev->stats.rx_atomic,
-                               __ATOMIC_SEQ_CST);
+                       rx_total = 
rte_atomic_load_explicit(&vdev->stats.rx_total_atomic,
+                               rte_memory_order_seq_cst);
+                       rx         = 
rte_atomic_load_explicit(&vdev->stats.rx_atomic,
+                               rte_memory_order_seq_cst);
                        rx_dropped = rx_total - rx;
 
                        printf("Statistics for device %d\n"
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index 2fcb837..b163955 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -22,8 +22,8 @@
 struct device_statistics {
        uint64_t        tx;
        uint64_t        tx_total;
-       uint64_t        rx_atomic;
-       uint64_t        rx_total_atomic;
+       RTE_ATOMIC(uint64_t)    rx_atomic;
+       RTE_ATOMIC(uint64_t)    rx_total_atomic;
 };
 
 struct vhost_queue {
diff --git a/examples/vhost/virtio_net.c b/examples/vhost/virtio_net.c
index 514c8e0..55af6e7 100644
--- a/examples/vhost/virtio_net.c
+++ b/examples/vhost/virtio_net.c
@@ -198,7 +198,8 @@
        queue = &dev->queues[queue_id];
        vr    = &queue->vr;
 
-       avail_idx = __atomic_load_n(&vr->avail->idx, __ATOMIC_ACQUIRE);
+       avail_idx = rte_atomic_load_explicit((uint16_t __rte_atomic 
*)&vr->avail->idx,
+           rte_memory_order_acquire);
        start_idx = queue->last_used_idx;
        free_entries = avail_idx - start_idx;
        count = RTE_MIN(count, free_entries);
@@ -231,7 +232,8 @@
                        rte_prefetch0(&vr->desc[desc_indexes[i+1]]);
        }
 
-       __atomic_fetch_add(&vr->used->idx, count, __ATOMIC_RELEASE);
+       rte_atomic_fetch_add_explicit((uint16_t __rte_atomic *)&vr->used->idx, 
count,
+           rte_memory_order_release);
        queue->last_used_idx += count;
 
        rte_vhost_vring_call(dev->vid, queue_id);
@@ -386,8 +388,8 @@
        queue = &dev->queues[queue_id];
        vr    = &queue->vr;
 
-       free_entries = __atomic_load_n(&vr->avail->idx, __ATOMIC_ACQUIRE) -
-                       queue->last_avail_idx;
+       free_entries = rte_atomic_load_explicit((uint16_t __rte_atomic 
*)&vr->avail->idx,
+           rte_memory_order_acquire) - queue->last_avail_idx;
        if (free_entries == 0)
                return 0;
 
@@ -442,7 +444,8 @@
        queue->last_avail_idx += i;
        queue->last_used_idx += i;
 
-       __atomic_fetch_add(&vr->used->idx, i, __ATOMIC_ACQ_REL);
+       rte_atomic_fetch_add_explicit((uint16_t __rte_atomic *)&vr->used->idx, 
i,
+           rte_memory_order_acq_rel);
 
        rte_vhost_vring_call(dev->vid, queue_id);
 
diff --git a/examples/vhost_blk/vhost_blk.c b/examples/vhost_blk/vhost_blk.c
index 376f7b8..03f1ac9 100644
--- a/examples/vhost_blk/vhost_blk.c
+++ b/examples/vhost_blk/vhost_blk.c
@@ -85,9 +85,9 @@ struct vhost_blk_ctrlr *
         */
        used->ring[used->idx & (vq->vring.size - 1)].id = task->req_idx;
        used->ring[used->idx & (vq->vring.size - 1)].len = task->data_len;
-       rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+       rte_atomic_thread_fence(rte_memory_order_seq_cst);
        used->idx++;
-       rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+       rte_atomic_thread_fence(rte_memory_order_seq_cst);
 
        rte_vhost_clr_inflight_desc_split(task->ctrlr->vid,
                vq->id, used->idx, task->req_idx);
@@ -111,12 +111,12 @@ struct vhost_blk_ctrlr *
        desc->id = task->buffer_id;
        desc->addr = 0;
 
-       rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+       rte_atomic_thread_fence(rte_memory_order_seq_cst);
        if (vq->used_wrap_counter)
                desc->flags |= VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED;
        else
                desc->flags &= ~(VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
-       rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+       rte_atomic_thread_fence(rte_memory_order_seq_cst);
 
        rte_vhost_clr_inflight_desc_packed(task->ctrlr->vid, vq->id,
                                           task->inflight_idx);
diff --git a/examples/vm_power_manager/channel_monitor.c 
b/examples/vm_power_manager/channel_monitor.c
index 5fef268..d384c86 100644
--- a/examples/vm_power_manager/channel_monitor.c
+++ b/examples/vm_power_manager/channel_monitor.c
@@ -828,8 +828,9 @@ void channel_monitor_exit(void)
                return -1;
 
        uint32_t channel_connected = CHANNEL_MGR_CHANNEL_CONNECTED;
-       if (__atomic_compare_exchange_n(&(chan_info->status), 
&channel_connected,
-               CHANNEL_MGR_CHANNEL_PROCESSING, 0, __ATOMIC_RELAXED, 
__ATOMIC_RELAXED) == 0)
+       if (rte_atomic_compare_exchange_strong_explicit(&(chan_info->status), 
&channel_connected,
+               CHANNEL_MGR_CHANNEL_PROCESSING, rte_memory_order_relaxed,
+                   rte_memory_order_relaxed) == 0)
                return -1;
 
        if (pkt->command == RTE_POWER_CPU_POWER) {
@@ -934,8 +935,8 @@ void channel_monitor_exit(void)
         * from management thread
         */
        uint32_t channel_processing = CHANNEL_MGR_CHANNEL_PROCESSING;
-       __atomic_compare_exchange_n(&(chan_info->status), &channel_processing,
-               CHANNEL_MGR_CHANNEL_CONNECTED, 0, __ATOMIC_RELAXED, 
__ATOMIC_RELAXED);
+       rte_atomic_compare_exchange_strong_explicit(&(chan_info->status), 
&channel_processing,
+               CHANNEL_MGR_CHANNEL_CONNECTED, rte_memory_order_relaxed, 
rte_memory_order_relaxed);
        return 0;
 
 }
-- 
1.8.3.1

Reply via email to