Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
 drivers/net/virtio/virtio_ring.h                 |  4 +--
 drivers/net/virtio/virtio_user/virtio_user_dev.c | 12 ++++-----
 drivers/net/virtio/virtqueue.h                   | 32 ++++++++++++------------
 3 files changed, 24 insertions(+), 24 deletions(-)

diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index e848c0b..2a25751 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -59,7 +59,7 @@ struct vring_used_elem {
 
 struct vring_used {
        uint16_t flags;
-       uint16_t idx;
+       RTE_ATOMIC(uint16_t) idx;
        struct vring_used_elem ring[];
 };
 
@@ -70,7 +70,7 @@ struct vring_packed_desc {
        uint64_t addr;
        uint32_t len;
        uint16_t id;
-       uint16_t flags;
+       RTE_ATOMIC(uint16_t) flags;
 };
 
 #define RING_EVENT_FLAGS_ENABLE 0x0
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c 
b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 4fdfe70..24e2b2c 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -948,7 +948,7 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
 static inline int
 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
 {
-       uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
+       uint16_t flags = rte_atomic_load_explicit(&desc->flags, 
rte_memory_order_acquire);
 
        return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
                wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
@@ -1037,8 +1037,8 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
                if (vq->used_wrap_counter)
                        flags |= VRING_PACKED_DESC_F_AVAIL_USED;
 
-               __atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
-                                __ATOMIC_RELEASE);
+               rte_atomic_store_explicit(&vring->desc[vq->used_idx].flags, 
flags,
+                                rte_memory_order_release);
 
                vq->used_idx += n_descs;
                if (vq->used_idx >= dev->queue_size) {
@@ -1057,9 +1057,9 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
        struct vring *vring = &dev->vrings.split[queue_idx];
 
        /* Consume avail ring, using used ring idx as first one */
-       while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
+       while (rte_atomic_load_explicit(&vring->used->idx, 
rte_memory_order_relaxed)
               != vring->avail->idx) {
-               avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
+               avail_idx = rte_atomic_load_explicit(&vring->used->idx, 
rte_memory_order_relaxed)
                            & (vring->num - 1);
                desc_idx = vring->avail->ring[avail_idx];
 
@@ -1070,7 +1070,7 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
                uep->id = desc_idx;
                uep->len = n_descs;
 
-               __atomic_fetch_add(&vring->used->idx, 1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&vring->used->idx, 1, 
rte_memory_order_relaxed);
        }
 }
 
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 5d0c039..b7bbdde 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -37,7 +37,7 @@
 virtio_mb(uint8_t weak_barriers)
 {
        if (weak_barriers)
-               rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+               rte_atomic_thread_fence(rte_memory_order_seq_cst);
        else
                rte_mb();
 }
@@ -46,7 +46,7 @@
 virtio_rmb(uint8_t weak_barriers)
 {
        if (weak_barriers)
-               rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+               rte_atomic_thread_fence(rte_memory_order_acquire);
        else
                rte_io_rmb();
 }
@@ -55,7 +55,7 @@
 virtio_wmb(uint8_t weak_barriers)
 {
        if (weak_barriers)
-               rte_atomic_thread_fence(__ATOMIC_RELEASE);
+               rte_atomic_thread_fence(rte_memory_order_release);
        else
                rte_io_wmb();
 }
@@ -67,12 +67,12 @@
        uint16_t flags;
 
        if (weak_barriers) {
-/* x86 prefers to using rte_io_rmb over __atomic_load_n as it reports
+/* x86 prefers to using rte_io_rmb over rte_atomic_load_explicit as it reports
  * a better perf(~1.5%), which comes from the saved branch by the compiler.
  * The if and else branch are identical  on the platforms except Arm.
  */
 #ifdef RTE_ARCH_ARM
-               flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE);
+               flags = rte_atomic_load_explicit(&dp->flags, 
rte_memory_order_acquire);
 #else
                flags = dp->flags;
                rte_io_rmb();
@@ -90,12 +90,12 @@
                              uint16_t flags, uint8_t weak_barriers)
 {
        if (weak_barriers) {
-/* x86 prefers to using rte_io_wmb over __atomic_store_n as it reports
+/* x86 prefers to using rte_io_wmb over rte_atomic_store_explicit as it reports
  * a better perf(~1.5%), which comes from the saved branch by the compiler.
  * The if and else branch are identical on the platforms except Arm.
  */
 #ifdef RTE_ARCH_ARM
-               __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE);
+               rte_atomic_store_explicit(&dp->flags, flags, 
rte_memory_order_release);
 #else
                rte_io_wmb();
                dp->flags = flags;
@@ -425,7 +425,7 @@ struct virtqueue *virtqueue_alloc(struct virtio_hw *hw, 
uint16_t index,
 
        if (vq->hw->weak_barriers) {
        /**
-        * x86 prefers to using rte_smp_rmb over __atomic_load_n as it
+        * x86 prefers to using rte_smp_rmb over rte_atomic_load_explicit as it
         * reports a slightly better perf, which comes from the saved
         * branch by the compiler.
         * The if and else branches are identical with the smp and io
@@ -435,8 +435,8 @@ struct virtqueue *virtqueue_alloc(struct virtio_hw *hw, 
uint16_t index,
                idx = vq->vq_split.ring.used->idx;
                rte_smp_rmb();
 #else
-               idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx,
-                               __ATOMIC_ACQUIRE);
+               idx = rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx,
+                               rte_memory_order_acquire);
 #endif
        } else {
                idx = vq->vq_split.ring.used->idx;
@@ -454,7 +454,7 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t 
desc_idx,
 vq_update_avail_idx(struct virtqueue *vq)
 {
        if (vq->hw->weak_barriers) {
-       /* x86 prefers to using rte_smp_wmb over __atomic_store_n as
+       /* x86 prefers to using rte_smp_wmb over rte_atomic_store_explicit as
         * it reports a slightly better perf, which comes from the
         * saved branch by the compiler.
         * The if and else branches are identical with the smp and
@@ -464,8 +464,8 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t 
desc_idx,
                rte_smp_wmb();
                vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
 #else
-               __atomic_store_n(&vq->vq_split.ring.avail->idx,
-                                vq->vq_avail_idx, __ATOMIC_RELEASE);
+               rte_atomic_store_explicit(&vq->vq_split.ring.avail->idx,
+                                vq->vq_avail_idx, rte_memory_order_release);
 #endif
        } else {
                rte_io_wmb();
@@ -528,8 +528,8 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t 
desc_idx,
 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
 #define VIRTQUEUE_DUMP(vq) do { \
        uint16_t used_idx, nused; \
-       used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \
-                                  __ATOMIC_RELAXED); \
+       used_idx = rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx, \
+                                  rte_memory_order_relaxed); \
        nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
        if (virtio_with_packed_queue((vq)->hw)) { \
                PMD_INIT_LOG(DEBUG, \
@@ -546,7 +546,7 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t 
desc_idx,
          " avail.flags=0x%x; used.flags=0x%x", \
          (vq)->vq_nentries, (vq)->vq_free_cnt, nused, (vq)->vq_desc_head_idx, \
          (vq)->vq_split.ring.avail->idx, (vq)->vq_used_cons_idx, \
-         __atomic_load_n(&(vq)->vq_split.ring.used->idx, __ATOMIC_RELAXED), \
+         rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx, 
rte_memory_order_relaxed), \
          (vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \
 } while (0)
 #else
-- 
1.8.3.1

Reply via email to