Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional stdatomic API

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
 lib/pdump/rte_pdump.c | 14 +++++++-------
 lib/pdump/rte_pdump.h |  8 ++++----
 2 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/lib/pdump/rte_pdump.c b/lib/pdump/rte_pdump.c
index 53cca10..80b90c6 100644
--- a/lib/pdump/rte_pdump.c
+++ b/lib/pdump/rte_pdump.c
@@ -110,8 +110,8 @@ struct pdump_response {
                 * then packet doesn't match the filter (will be ignored).
                 */
                if (cbs->filter && rcs[i] == 0) {
-                       __atomic_fetch_add(&stats->filtered,
-                                          1, __ATOMIC_RELAXED);
+                       rte_atomic_fetch_add_explicit(&stats->filtered,
+                                          1, rte_memory_order_relaxed);
                        continue;
                }
 
@@ -127,18 +127,18 @@ struct pdump_response {
                        p = rte_pktmbuf_copy(pkts[i], mp, 0, cbs->snaplen);
 
                if (unlikely(p == NULL))
-                       __atomic_fetch_add(&stats->nombuf, 1, __ATOMIC_RELAXED);
+                       rte_atomic_fetch_add_explicit(&stats->nombuf, 1, 
rte_memory_order_relaxed);
                else
                        dup_bufs[d_pkts++] = p;
        }
 
-       __atomic_fetch_add(&stats->accepted, d_pkts, __ATOMIC_RELAXED);
+       rte_atomic_fetch_add_explicit(&stats->accepted, d_pkts, 
rte_memory_order_relaxed);
 
        ring_enq = rte_ring_enqueue_burst(ring, (void *)&dup_bufs[0], d_pkts, 
NULL);
        if (unlikely(ring_enq < d_pkts)) {
                unsigned int drops = d_pkts - ring_enq;
 
-               __atomic_fetch_add(&stats->ringfull, drops, __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&stats->ringfull, drops, 
rte_memory_order_relaxed);
                rte_pktmbuf_free_bulk(&dup_bufs[ring_enq], drops);
        }
 }
@@ -720,10 +720,10 @@ struct pdump_response {
        uint16_t qid;
 
        for (qid = 0; qid < nq; qid++) {
-               const uint64_t *perq = (const uint64_t *)&stats[port][qid];
+               const RTE_ATOMIC(uint64_t) *perq = (const uint64_t __rte_atomic 
*)&stats[port][qid];
 
                for (i = 0; i < sizeof(*total) / sizeof(uint64_t); i++) {
-                       val = __atomic_load_n(&perq[i], __ATOMIC_RELAXED);
+                       val = rte_atomic_load_explicit(&perq[i], 
rte_memory_order_relaxed);
                        sum[i] += val;
                }
        }
diff --git a/lib/pdump/rte_pdump.h b/lib/pdump/rte_pdump.h
index b1a3918..7feb2b6 100644
--- a/lib/pdump/rte_pdump.h
+++ b/lib/pdump/rte_pdump.h
@@ -233,10 +233,10 @@ enum {
  * The statistics are sum of both receive and transmit queues.
  */
 struct rte_pdump_stats {
-       uint64_t accepted; /**< Number of packets accepted by filter. */
-       uint64_t filtered; /**< Number of packets rejected by filter. */
-       uint64_t nombuf;   /**< Number of mbuf allocation failures. */
-       uint64_t ringfull; /**< Number of missed packets due to ring full. */
+       RTE_ATOMIC(uint64_t) accepted; /**< Number of packets accepted by 
filter. */
+       RTE_ATOMIC(uint64_t) filtered; /**< Number of packets rejected by 
filter. */
+       RTE_ATOMIC(uint64_t) nombuf;   /**< Number of mbuf allocation failures. 
*/
+       RTE_ATOMIC(uint64_t) ringfull; /**< Number of missed packets due to 
ring full. */
 
        uint64_t reserved[4]; /**< Reserved and pad to cache line */
 };
-- 
1.8.3.1

Reply via email to