Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional stdatomic API

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
 lib/vhost/vdpa.c            |  3 ++-
 lib/vhost/vhost.c           | 42 ++++++++++++++++----------------
 lib/vhost/vhost.h           | 39 ++++++++++++++++--------------
 lib/vhost/vhost_user.c      |  6 ++---
 lib/vhost/virtio_net.c      | 58 +++++++++++++++++++++++++--------------------
 lib/vhost/virtio_net_ctrl.c |  6 +++--
 6 files changed, 84 insertions(+), 70 deletions(-)

diff --git a/lib/vhost/vdpa.c b/lib/vhost/vdpa.c
index 6284ea2..219eef8 100644
--- a/lib/vhost/vdpa.c
+++ b/lib/vhost/vdpa.c
@@ -235,7 +235,8 @@ struct rte_vdpa_device *
        }
 
        /* used idx is the synchronization point for the split vring */
-       __atomic_store_n(&vq->used->idx, idx_m, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit((unsigned short __rte_atomic *)&vq->used->idx,
+               idx_m, rte_memory_order_release);
 
        if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
                vring_used_event(s_vring) = idx_m;
diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
index 7fde412..bdcf85b 100644
--- a/lib/vhost/vhost.c
+++ b/lib/vhost/vhost.c
@@ -128,12 +128,13 @@ struct vhost_vq_stats_name_off {
 {
 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
        /*
-        * __sync_ built-ins are deprecated, but __atomic_ ones
+        * __sync_ built-ins are deprecated, but rte_atomic_ ones
         * are sub-optimized in older GCC versions.
         */
        __sync_fetch_and_or_1(addr, (1U << nr));
 #else
-       __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED);
+       rte_atomic_fetch_or_explicit((volatile uint8_t __rte_atomic *)addr, (1U 
<< nr),
+               rte_memory_order_relaxed);
 #endif
 }
 
@@ -155,7 +156,7 @@ struct vhost_vq_stats_name_off {
                return;
 
        /* To make sure guest memory updates are committed before logging */
-       rte_atomic_thread_fence(__ATOMIC_RELEASE);
+       rte_atomic_thread_fence(rte_memory_order_release);
 
        page = addr / VHOST_LOG_PAGE;
        while (page * VHOST_LOG_PAGE < addr + len) {
@@ -197,7 +198,7 @@ struct vhost_vq_stats_name_off {
        if (unlikely(!vq->log_cache))
                return;
 
-       rte_atomic_thread_fence(__ATOMIC_RELEASE);
+       rte_atomic_thread_fence(rte_memory_order_release);
 
        log_base = (unsigned long *)(uintptr_t)dev->log_base;
 
@@ -206,17 +207,18 @@ struct vhost_vq_stats_name_off {
 
 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
                /*
-                * '__sync' builtins are deprecated, but '__atomic' ones
+                * '__sync' builtins are deprecated, but 'rte_atomic' ones
                 * are sub-optimized in older GCC versions.
                 */
                __sync_fetch_and_or(log_base + elem->offset, elem->val);
 #else
-               __atomic_fetch_or(log_base + elem->offset, elem->val,
-                               __ATOMIC_RELAXED);
+               rte_atomic_fetch_or_explicit(
+                       (unsigned long __rte_atomic *)(log_base + elem->offset),
+                       elem->val, rte_memory_order_relaxed);
 #endif
        }
 
-       rte_atomic_thread_fence(__ATOMIC_RELEASE);
+       rte_atomic_thread_fence(rte_memory_order_release);
 
        vq->log_cache_nb_elem = 0;
 }
@@ -231,7 +233,7 @@ struct vhost_vq_stats_name_off {
 
        if (unlikely(!vq->log_cache)) {
                /* No logging cache allocated, write dirty log map directly */
-               rte_atomic_thread_fence(__ATOMIC_RELEASE);
+               rte_atomic_thread_fence(rte_memory_order_release);
                vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
 
                return;
@@ -251,7 +253,7 @@ struct vhost_vq_stats_name_off {
                 * No more room for a new log cache entry,
                 * so write the dirty log map directly.
                 */
-               rte_atomic_thread_fence(__ATOMIC_RELEASE);
+               rte_atomic_thread_fence(rte_memory_order_release);
                vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
 
                return;
@@ -1184,11 +1186,11 @@ struct vhost_vq_stats_name_off {
        if (unlikely(idx >= vq->size))
                return -1;
 
-       rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+       rte_atomic_thread_fence(rte_memory_order_seq_cst);
 
        vq->inflight_split->desc[idx].inflight = 0;
 
-       rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+       rte_atomic_thread_fence(rte_memory_order_seq_cst);
 
        vq->inflight_split->used_idx = last_used_idx;
        return 0;
@@ -1227,11 +1229,11 @@ struct vhost_vq_stats_name_off {
        if (unlikely(head >= vq->size))
                return -1;
 
-       rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+       rte_atomic_thread_fence(rte_memory_order_seq_cst);
 
        inflight_info->desc[head].inflight = 0;
 
-       rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+       rte_atomic_thread_fence(rte_memory_order_seq_cst);
 
        inflight_info->old_free_head = inflight_info->free_head;
        inflight_info->old_used_idx = inflight_info->used_idx;
@@ -1454,7 +1456,7 @@ struct vhost_vq_stats_name_off {
                        vq->avail_wrap_counter << 15;
        }
 
-       rte_atomic_thread_fence(__ATOMIC_RELEASE);
+       rte_atomic_thread_fence(rte_memory_order_release);
 
        vq->device_event->flags = flags;
        return 0;
@@ -1519,16 +1521,16 @@ struct vhost_vq_stats_name_off {
 
        rte_rwlock_read_lock(&vq->access_lock);
 
-       __atomic_store_n(&vq->irq_pending, false, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&vq->irq_pending, false, 
rte_memory_order_release);
 
        if (dev->backend_ops->inject_irq(dev, vq)) {
                if (dev->flags & VIRTIO_DEV_STATS_ENABLED)
-                       __atomic_fetch_add(&vq->stats.guest_notifications_error,
-                                       1, __ATOMIC_RELAXED);
+                       
rte_atomic_fetch_add_explicit(&vq->stats.guest_notifications_error,
+                                       1, rte_memory_order_relaxed);
        } else {
                if (dev->flags & VIRTIO_DEV_STATS_ENABLED)
-                       __atomic_fetch_add(&vq->stats.guest_notifications,
-                                       1, __ATOMIC_RELAXED);
+                       
rte_atomic_fetch_add_explicit(&vq->stats.guest_notifications,
+                                       1, rte_memory_order_relaxed);
                if (dev->notify_ops->guest_notified)
                        dev->notify_ops->guest_notified(dev->vid);
        }
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index 5fc9035..f8624fb 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -158,9 +158,9 @@ struct virtqueue_stats {
        uint64_t inflight_completed;
        uint64_t guest_notifications_suppressed;
        /* Counters below are atomic, and should be incremented as such. */
-       uint64_t guest_notifications;
-       uint64_t guest_notifications_offloaded;
-       uint64_t guest_notifications_error;
+       RTE_ATOMIC(uint64_t) guest_notifications;
+       RTE_ATOMIC(uint64_t) guest_notifications_offloaded;
+       RTE_ATOMIC(uint64_t) guest_notifications_error;
 };
 
 /**
@@ -348,7 +348,7 @@ struct vhost_virtqueue {
        struct vhost_vring_addr ring_addrs;
        struct virtqueue_stats  stats;
 
-       bool irq_pending;
+       RTE_ATOMIC(bool) irq_pending;
 } __rte_cache_aligned;
 
 /* Virtio device status as per Virtio specification */
@@ -486,7 +486,7 @@ struct virtio_net {
        uint32_t                flags;
        uint16_t                vhost_hlen;
        /* to tell if we need broadcast rarp packet */
-       int16_t                 broadcast_rarp;
+       RTE_ATOMIC(int16_t)     broadcast_rarp;
        uint32_t                nr_vring;
        int                     async_copy;
 
@@ -557,7 +557,8 @@ struct virtio_net {
 static inline bool
 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
 {
-       uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
+       uint16_t flags = rte_atomic_load_explicit((unsigned short __rte_atomic 
*)&desc->flags,
+               rte_memory_order_acquire);
 
        return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&
                wrap_counter != !!(flags & VRING_DESC_F_USED);
@@ -914,17 +915,19 @@ uint64_t translate_log_addr(struct virtio_net *dev, 
struct vhost_virtqueue *vq,
        bool expected = false;
 
        if (dev->notify_ops->guest_notify) {
-               if (__atomic_compare_exchange_n(&vq->irq_pending, &expected, 
true, 0,
-                                 __ATOMIC_RELEASE, __ATOMIC_RELAXED)) {
+               if 
(rte_atomic_compare_exchange_strong_explicit(&vq->irq_pending, &expected, true,
+                                 rte_memory_order_release, 
rte_memory_order_relaxed)) {
                        if (dev->notify_ops->guest_notify(dev->vid, vq->index)) 
{
                                if (dev->flags & VIRTIO_DEV_STATS_ENABLED)
-                                       
__atomic_fetch_add(&vq->stats.guest_notifications_offloaded,
-                                               1, __ATOMIC_RELAXED);
+                                       rte_atomic_fetch_add_explicit(
+                                               
&vq->stats.guest_notifications_offloaded,
+                                               1, rte_memory_order_relaxed);
                                return;
                        }
 
                        /* Offloading failed, fallback to direct IRQ injection 
*/
-                       __atomic_store_n(&vq->irq_pending, false, 
__ATOMIC_RELEASE);
+                       rte_atomic_store_explicit(&vq->irq_pending, false,
+                               rte_memory_order_release);
                } else {
                        vq->stats.guest_notifications_suppressed++;
                        return;
@@ -933,14 +936,14 @@ uint64_t translate_log_addr(struct virtio_net *dev, 
struct vhost_virtqueue *vq,
 
        if (dev->backend_ops->inject_irq(dev, vq)) {
                if (dev->flags & VIRTIO_DEV_STATS_ENABLED)
-                       __atomic_fetch_add(&vq->stats.guest_notifications_error,
-                               1, __ATOMIC_RELAXED);
+                       
rte_atomic_fetch_add_explicit(&vq->stats.guest_notifications_error,
+                               1, rte_memory_order_relaxed);
                return;
        }
 
        if (dev->flags & VIRTIO_DEV_STATS_ENABLED)
-               __atomic_fetch_add(&vq->stats.guest_notifications,
-                       1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&vq->stats.guest_notifications,
+                       1, rte_memory_order_relaxed);
        if (dev->notify_ops->guest_notified)
                dev->notify_ops->guest_notified(dev->vid);
 }
@@ -949,7 +952,7 @@ uint64_t translate_log_addr(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
 vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
        /* Flush used->idx update before we read avail->flags. */
-       rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+       rte_atomic_thread_fence(rte_memory_order_seq_cst);
 
        /* Don't kick guest if we don't reach index specified by guest. */
        if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
@@ -981,7 +984,7 @@ uint64_t translate_log_addr(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
        bool signalled_used_valid, kick = false;
 
        /* Flush used desc update. */
-       rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+       rte_atomic_thread_fence(rte_memory_order_seq_cst);
 
        if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
                if (vq->driver_event->flags !=
@@ -1007,7 +1010,7 @@ uint64_t translate_log_addr(struct virtio_net *dev, 
struct vhost_virtqueue *vq,
                goto kick;
        }
 
-       rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+       rte_atomic_thread_fence(rte_memory_order_acquire);
 
        off_wrap = vq->driver_event->off_wrap;
        off = off_wrap & ~(1 << 15);
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 901a80b..e363121 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -1914,7 +1914,7 @@ static int vhost_user_set_vring_err(struct virtio_net 
**pdev,
 
        if (inflight_split->used_idx != used->idx) {
                inflight_split->desc[last_io].inflight = 0;
-               rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+               rte_atomic_thread_fence(rte_memory_order_seq_cst);
                inflight_split->used_idx = used->idx;
        }
 
@@ -2418,10 +2418,10 @@ static int vhost_user_set_log_fd(struct virtio_net 
**pdev,
         * Set the flag to inject a RARP broadcast packet at
         * rte_vhost_dequeue_burst().
         *
-        * __ATOMIC_RELEASE ordering is for making sure the mac is
+        * rte_memory_order_release ordering is for making sure the mac is
         * copied before the flag is set.
         */
-       __atomic_store_n(&dev->broadcast_rarp, 1, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&dev->broadcast_rarp, 1, 
rte_memory_order_release);
        vdpa_dev = dev->vdpa_dev;
        if (vdpa_dev && vdpa_dev->ops->migration_done)
                vdpa_dev->ops->migration_done(dev->vid);
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 759a78e..8af20f1 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -298,8 +298,8 @@
 
        vhost_log_cache_sync(dev, vq);
 
-       __atomic_fetch_add(&vq->used->idx, vq->shadow_used_idx,
-                          __ATOMIC_RELEASE);
+       rte_atomic_fetch_add_explicit((unsigned short __rte_atomic 
*)&vq->used->idx,
+               vq->shadow_used_idx, rte_memory_order_release);
        vq->shadow_used_idx = 0;
        vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
                sizeof(vq->used->idx));
@@ -335,7 +335,7 @@
        }
 
        /* The ordering for storing desc flags needs to be enforced. */
-       rte_atomic_thread_fence(__ATOMIC_RELEASE);
+       rte_atomic_thread_fence(rte_memory_order_release);
 
        for (i = 0; i < vq->shadow_used_idx; i++) {
                uint16_t flags;
@@ -387,8 +387,9 @@
 
        vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
        /* desc flags is the synchronization point for virtio packed vring */
-       __atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags,
-                        used_elem->flags, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(
+               (unsigned short __rte_atomic 
*)&vq->desc_packed[vq->shadow_last_used_idx].flags,
+               used_elem->flags, rte_memory_order_release);
 
        vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
                                   sizeof(struct vring_packed_desc),
@@ -418,7 +419,7 @@
                desc_base[i].len = lens[i];
        }
 
-       rte_atomic_thread_fence(__ATOMIC_RELEASE);
+       rte_atomic_thread_fence(rte_memory_order_release);
 
        vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
                desc_base[i].flags = flags;
@@ -515,7 +516,7 @@
                vq->desc_packed[vq->last_used_idx + i].len = 0;
        }
 
-       rte_atomic_thread_fence(__ATOMIC_RELEASE);
+       rte_atomic_thread_fence(rte_memory_order_release);
        vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
                vq->desc_packed[vq->last_used_idx + i].flags = flags;
 
@@ -1415,7 +1416,8 @@
         * The ordering between avail index and
         * desc reads needs to be enforced.
         */
-       avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
+       avail_head = rte_atomic_load_explicit((unsigned short __rte_atomic 
*)&vq->avail->idx,
+               rte_memory_order_acquire);
 
        rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
 
@@ -1806,7 +1808,8 @@
        /*
         * The ordering between avail index and desc reads need to be enforced.
         */
-       avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
+       avail_head = rte_atomic_load_explicit((unsigned short __rte_atomic 
*)&vq->avail->idx,
+               rte_memory_order_acquire);
 
        rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
 
@@ -2222,7 +2225,7 @@
        }
 
        /* The ordering for storing desc flags needs to be enforced. */
-       rte_atomic_thread_fence(__ATOMIC_RELEASE);
+       rte_atomic_thread_fence(rte_memory_order_release);
 
        from = async->last_buffer_idx_packed;
 
@@ -2311,7 +2314,9 @@
                        vhost_vring_call_packed(dev, vq);
                } else {
                        write_back_completed_descs_split(vq, n_descs);
-                       __atomic_fetch_add(&vq->used->idx, n_descs, 
__ATOMIC_RELEASE);
+                       rte_atomic_fetch_add_explicit(
+                               (unsigned short __rte_atomic *)&vq->used->idx,
+                               n_descs, rte_memory_order_release);
                        vhost_vring_call_split(dev, vq);
                }
        } else {
@@ -3085,8 +3090,8 @@
         * The ordering between avail index and
         * desc reads needs to be enforced.
         */
-       avail_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
-                       vq->last_avail_idx;
+       avail_entries = rte_atomic_load_explicit((unsigned short __rte_atomic 
*)&vq->avail->idx,
+               rte_memory_order_acquire) - vq->last_avail_idx;
        if (avail_entries == 0)
                return 0;
 
@@ -3224,7 +3229,7 @@
                        return -1;
        }
 
-       rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+       rte_atomic_thread_fence(rte_memory_order_acquire);
 
        vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
                lens[i] = descs[avail_idx + i].len;
@@ -3297,7 +3302,7 @@
                        return -1;
        }
 
-       rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+       rte_atomic_thread_fence(rte_memory_order_acquire);
 
        vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
                lens[i] = descs[avail_idx + i].len;
@@ -3590,7 +3595,7 @@
         *
         * broadcast_rarp shares a cacheline in the virtio_net structure
         * with some fields that are accessed during enqueue and
-        * __atomic_compare_exchange_n causes a write if performed compare
+        * rte_atomic_compare_exchange_strong_explicit causes a write if 
performed compare
         * and exchange. This could result in false sharing between enqueue
         * and dequeue.
         *
@@ -3598,9 +3603,9 @@
         * and only performing compare and exchange if the read indicates it
         * is likely to be set.
         */
-       if (unlikely(__atomic_load_n(&dev->broadcast_rarp, __ATOMIC_ACQUIRE) &&
-                       __atomic_compare_exchange_n(&dev->broadcast_rarp,
-                       &success, 0, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED))) {
+       if (unlikely(rte_atomic_load_explicit(&dev->broadcast_rarp, 
rte_memory_order_acquire) &&
+                       
rte_atomic_compare_exchange_strong_explicit(&dev->broadcast_rarp,
+                       &success, 0, rte_memory_order_release, 
rte_memory_order_relaxed))) {
 
                rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
                if (rarp_mbuf == NULL) {
@@ -3683,7 +3688,8 @@
                vhost_vring_call_packed(dev, vq);
        } else {
                write_back_completed_descs_split(vq, nr_cpl_pkts);
-               __atomic_fetch_add(&vq->used->idx, nr_cpl_pkts, 
__ATOMIC_RELEASE);
+               rte_atomic_fetch_add_explicit((unsigned short __rte_atomic 
*)&vq->used->idx,
+                       nr_cpl_pkts, rte_memory_order_release);
                vhost_vring_call_split(dev, vq);
        }
        vq->async->pkts_inflight_n -= nr_cpl_pkts;
@@ -3714,8 +3720,8 @@
         * The ordering between avail index and
         * desc reads needs to be enforced.
         */
-       avail_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
-                       vq->last_avail_idx;
+       avail_entries = rte_atomic_load_explicit((unsigned short __rte_atomic 
*)&vq->avail->idx,
+               rte_memory_order_acquire) - vq->last_avail_idx;
        if (avail_entries == 0)
                goto out;
 
@@ -4204,7 +4210,7 @@
         *
         * broadcast_rarp shares a cacheline in the virtio_net structure
         * with some fields that are accessed during enqueue and
-        * __atomic_compare_exchange_n causes a write if performed compare
+        * rte_atomic_compare_exchange_strong_explicit causes a write if 
performed compare
         * and exchange. This could result in false sharing between enqueue
         * and dequeue.
         *
@@ -4212,9 +4218,9 @@
         * and only performing compare and exchange if the read indicates it
         * is likely to be set.
         */
-       if (unlikely(__atomic_load_n(&dev->broadcast_rarp, __ATOMIC_ACQUIRE) &&
-                       __atomic_compare_exchange_n(&dev->broadcast_rarp,
-                       &success, 0, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED))) {
+       if (unlikely(rte_atomic_load_explicit(&dev->broadcast_rarp, 
rte_memory_order_acquire) &&
+                       
rte_atomic_compare_exchange_strong_explicit(&dev->broadcast_rarp,
+                       &success, 0, rte_memory_order_release, 
rte_memory_order_relaxed))) {
 
                rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
                if (rarp_mbuf == NULL) {
diff --git a/lib/vhost/virtio_net_ctrl.c b/lib/vhost/virtio_net_ctrl.c
index 6b583a0..c4847f8 100644
--- a/lib/vhost/virtio_net_ctrl.c
+++ b/lib/vhost/virtio_net_ctrl.c
@@ -33,7 +33,8 @@ struct virtio_net_ctrl_elem {
        uint8_t *ctrl_req;
        struct vring_desc *descs;
 
-       avail_idx = __atomic_load_n(&cvq->avail->idx, __ATOMIC_ACQUIRE);
+       avail_idx = rte_atomic_load_explicit((unsigned short __rte_atomic 
*)&cvq->avail->idx,
+               rte_memory_order_acquire);
        if (avail_idx == cvq->last_avail_idx) {
                VHOST_LOG_CONFIG(dev->ifname, DEBUG, "Control queue empty\n");
                return 0;
@@ -236,7 +237,8 @@ struct virtio_net_ctrl_elem {
        if (cvq->last_used_idx >= cvq->size)
                cvq->last_used_idx -= cvq->size;
 
-       __atomic_store_n(&cvq->used->idx, cvq->last_used_idx, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit((unsigned short __rte_atomic 
*)&cvq->used->idx,
+               cvq->last_used_idx, rte_memory_order_release);
 
        vhost_vring_call_split(dev, dev->cvq);
 
-- 
1.8.3.1

Reply via email to