Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
 drivers/net/memif/memif.h         |  4 ++--
 drivers/net/memif/rte_eth_memif.c | 50 +++++++++++++++++++--------------------
 2 files changed, 27 insertions(+), 27 deletions(-)

diff --git a/drivers/net/memif/memif.h b/drivers/net/memif/memif.h
index cb72c69..ccaa218 100644
--- a/drivers/net/memif/memif.h
+++ b/drivers/net/memif/memif.h
@@ -169,9 +169,9 @@ typedef struct __rte_packed __rte_aligned(128)
        uint32_t cookie;                        /**< MEMIF_COOKIE */
        uint16_t flags;                         /**< flags */
 #define MEMIF_RING_FLAG_MASK_INT 1             /**< disable interrupt mode */
-       uint16_t head;                  /**< pointer to ring buffer head */
+       RTE_ATOMIC(uint16_t) head;                      /**< pointer to ring 
buffer head */
        MEMIF_CACHELINE_ALIGN_MARK(cacheline1);
-       uint16_t tail;                  /**< pointer to ring buffer tail */
+       RTE_ATOMIC(uint16_t) tail;                      /**< pointer to ring 
buffer tail */
        MEMIF_CACHELINE_ALIGN_MARK(cacheline2);
        memif_desc_t desc[0];                   /**< buffer descriptors */
 } memif_ring_t;
diff --git a/drivers/net/memif/rte_eth_memif.c 
b/drivers/net/memif/rte_eth_memif.c
index 18377d9..16da22b 100644
--- a/drivers/net/memif/rte_eth_memif.c
+++ b/drivers/net/memif/rte_eth_memif.c
@@ -262,7 +262,7 @@ struct mp_region_msg {
         * threads, so using load-acquire pairs with store-release
         * in function eth_memif_rx for C2S queues.
         */
-       cur_tail = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+       cur_tail = rte_atomic_load_explicit(&ring->tail, 
rte_memory_order_acquire);
        while (mq->last_tail != cur_tail) {
                RTE_MBUF_PREFETCH_TO_FREE(mq->buffers[(mq->last_tail + 1) & 
mask]);
                rte_pktmbuf_free_seg(mq->buffers[mq->last_tail & mask]);
@@ -334,10 +334,10 @@ struct mp_region_msg {
 
        if (type == MEMIF_RING_C2S) {
                cur_slot = mq->last_head;
-               last_slot = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE);
+               last_slot = rte_atomic_load_explicit(&ring->head, 
rte_memory_order_acquire);
        } else {
                cur_slot = mq->last_tail;
-               last_slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+               last_slot = rte_atomic_load_explicit(&ring->tail, 
rte_memory_order_acquire);
        }
 
        if (cur_slot == last_slot)
@@ -473,7 +473,7 @@ struct mp_region_msg {
 
 no_free_bufs:
        if (type == MEMIF_RING_C2S) {
-               __atomic_store_n(&ring->tail, cur_slot, __ATOMIC_RELEASE);
+               rte_atomic_store_explicit(&ring->tail, cur_slot, 
rte_memory_order_release);
                mq->last_head = cur_slot;
        } else {
                mq->last_tail = cur_slot;
@@ -485,7 +485,7 @@ struct mp_region_msg {
                 * is called in the context of receiver thread. The loads in
                 * the receiver do not need to synchronize with its own stores.
                 */
-               head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+               head = rte_atomic_load_explicit(&ring->head, 
rte_memory_order_relaxed);
                n_slots = ring_size - head + mq->last_tail;
 
                while (n_slots--) {
@@ -493,7 +493,7 @@ struct mp_region_msg {
                        d0 = &ring->desc[s0];
                        d0->length = pmd->run.pkt_buffer_size;
                }
-               __atomic_store_n(&ring->head, head, __ATOMIC_RELEASE);
+               rte_atomic_store_explicit(&ring->head, head, 
rte_memory_order_release);
        }
 
        mq->n_pkts += n_rx_pkts;
@@ -541,7 +541,7 @@ struct mp_region_msg {
         * threads, so using load-acquire pairs with store-release
         * to synchronize it between threads.
         */
-       last_slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+       last_slot = rte_atomic_load_explicit(&ring->tail, 
rte_memory_order_acquire);
        if (cur_slot == last_slot)
                goto refill;
        n_slots = last_slot - cur_slot;
@@ -591,7 +591,7 @@ struct mp_region_msg {
         * is called in the context of receiver thread. The loads in
         * the receiver do not need to synchronize with its own stores.
         */
-       head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+       head = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
        n_slots = ring_size - head + mq->last_tail;
 
        if (n_slots < 32)
@@ -620,7 +620,7 @@ struct mp_region_msg {
         * threads, so using store-release pairs with load-acquire
         * in function eth_memif_tx.
         */
-       __atomic_store_n(&ring->head, head, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&ring->head, head, rte_memory_order_release);
 
        mq->n_pkts += n_rx_pkts;
 
@@ -668,9 +668,9 @@ struct mp_region_msg {
                 * its own stores. Hence, the following load can be a
                 * relaxed load.
                 */
-               slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+               slot = rte_atomic_load_explicit(&ring->head, 
rte_memory_order_relaxed);
                n_free = ring_size - slot +
-                               __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+                               rte_atomic_load_explicit(&ring->tail, 
rte_memory_order_acquire);
        } else {
                /* For S2C queues ring->tail is updated by the sender and
                 * this function is called in the context of sending thread.
@@ -678,8 +678,8 @@ struct mp_region_msg {
                 * its own stores. Hence, the following load can be a
                 * relaxed load.
                 */
-               slot = __atomic_load_n(&ring->tail, __ATOMIC_RELAXED);
-               n_free = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE) - slot;
+               slot = rte_atomic_load_explicit(&ring->tail, 
rte_memory_order_relaxed);
+               n_free = rte_atomic_load_explicit(&ring->head, 
rte_memory_order_acquire) - slot;
        }
 
        uint16_t i;
@@ -792,9 +792,9 @@ struct mp_region_msg {
 
 no_free_slots:
        if (type == MEMIF_RING_C2S)
-               __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
+               rte_atomic_store_explicit(&ring->head, slot, 
rte_memory_order_release);
        else
-               __atomic_store_n(&ring->tail, slot, __ATOMIC_RELEASE);
+               rte_atomic_store_explicit(&ring->tail, slot, 
rte_memory_order_release);
 
        if (((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) &&
            (rte_intr_fd_get(mq->intr_handle) >= 0)) {
@@ -882,7 +882,7 @@ struct mp_region_msg {
         * its own stores. Hence, the following load can be a
         * relaxed load.
         */
-       slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+       slot = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
        n_free = ring_size - slot + mq->last_tail;
 
        int used_slots;
@@ -942,7 +942,7 @@ struct mp_region_msg {
         * threads, so using store-release pairs with load-acquire
         * in function eth_memif_rx for C2S rings.
         */
-       __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&ring->head, slot, rte_memory_order_release);
 
        /* Send interrupt, if enabled. */
        if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
@@ -1155,8 +1155,8 @@ struct mp_region_msg {
 
        for (i = 0; i < pmd->run.num_c2s_rings; i++) {
                ring = memif_get_ring(pmd, proc_private, MEMIF_RING_C2S, i);
-               __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
-               __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&ring->head, 0, 
rte_memory_order_relaxed);
+               rte_atomic_store_explicit(&ring->tail, 0, 
rte_memory_order_relaxed);
                ring->cookie = MEMIF_COOKIE;
                ring->flags = 0;
 
@@ -1175,8 +1175,8 @@ struct mp_region_msg {
 
        for (i = 0; i < pmd->run.num_s2c_rings; i++) {
                ring = memif_get_ring(pmd, proc_private, MEMIF_RING_S2C, i);
-               __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
-               __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&ring->head, 0, 
rte_memory_order_relaxed);
+               rte_atomic_store_explicit(&ring->tail, 0, 
rte_memory_order_relaxed);
                ring->cookie = MEMIF_COOKIE;
                ring->flags = 0;
 
@@ -1314,8 +1314,8 @@ struct mp_region_msg {
                                MIF_LOG(ERR, "Wrong ring");
                                return -1;
                        }
-                       __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
-                       __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+                       rte_atomic_store_explicit(&ring->head, 0, 
rte_memory_order_relaxed);
+                       rte_atomic_store_explicit(&ring->tail, 0, 
rte_memory_order_relaxed);
                        mq->last_head = 0;
                        mq->last_tail = 0;
                        /* enable polling mode */
@@ -1330,8 +1330,8 @@ struct mp_region_msg {
                                MIF_LOG(ERR, "Wrong ring");
                                return -1;
                        }
-                       __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
-                       __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+                       rte_atomic_store_explicit(&ring->head, 0, 
rte_memory_order_relaxed);
+                       rte_atomic_store_explicit(&ring->tail, 0, 
rte_memory_order_relaxed);
                        mq->last_head = 0;
                        mq->last_tail = 0;
                        /* enable polling mode */
-- 
1.8.3.1

Reply via email to