Adapt distributor for EAL optional atomics API changes

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
 lib/distributor/distributor_private.h    |  2 +-
 lib/distributor/rte_distributor_single.c | 44 ++++++++++++++++----------------
 2 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/lib/distributor/distributor_private.h 
b/lib/distributor/distributor_private.h
index 7101f63..ffbdae5 100644
--- a/lib/distributor/distributor_private.h
+++ b/lib/distributor/distributor_private.h
@@ -52,7 +52,7 @@
  * Only 64-bits of the memory is actually used though.
  */
 union rte_distributor_buffer_single {
-       volatile int64_t bufptr64;
+       volatile int64_t __rte_atomic bufptr64;
        char pad[RTE_CACHE_LINE_SIZE*3];
 } __rte_cache_aligned;
 
diff --git a/lib/distributor/rte_distributor_single.c 
b/lib/distributor/rte_distributor_single.c
index 2c77ac4..ad43c13 100644
--- a/lib/distributor/rte_distributor_single.c
+++ b/lib/distributor/rte_distributor_single.c
@@ -32,10 +32,10 @@
        int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
                        | RTE_DISTRIB_GET_BUF;
        RTE_WAIT_UNTIL_MASKED(&buf->bufptr64, RTE_DISTRIB_FLAGS_MASK,
-               ==, 0, __ATOMIC_RELAXED);
+               ==, 0, rte_memory_order_relaxed);
 
        /* Sync with distributor on GET_BUF flag. */
-       __atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&buf->bufptr64, req, 
rte_memory_order_release);
 }
 
 struct rte_mbuf *
@@ -44,7 +44,7 @@ struct rte_mbuf *
 {
        union rte_distributor_buffer_single *buf = &d->bufs[worker_id];
        /* Sync with distributor. Acquire bufptr64. */
-       if (__atomic_load_n(&buf->bufptr64, __ATOMIC_ACQUIRE)
+       if (rte_atomic_load_explicit(&buf->bufptr64, rte_memory_order_acquire)
                & RTE_DISTRIB_GET_BUF)
                return NULL;
 
@@ -72,10 +72,10 @@ struct rte_mbuf *
        uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
                        | RTE_DISTRIB_RETURN_BUF;
        RTE_WAIT_UNTIL_MASKED(&buf->bufptr64, RTE_DISTRIB_FLAGS_MASK,
-               ==, 0, __ATOMIC_RELAXED);
+               ==, 0, rte_memory_order_relaxed);
 
        /* Sync with distributor on RETURN_BUF flag. */
-       __atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&buf->bufptr64, req, 
rte_memory_order_release);
        return 0;
 }
 
@@ -119,7 +119,7 @@ struct rte_mbuf *
        d->in_flight_tags[wkr] = 0;
        d->in_flight_bitmask &= ~(1UL << wkr);
        /* Sync with worker. Release bufptr64. */
-       __atomic_store_n(&(d->bufs[wkr].bufptr64), 0, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit(&d->bufs[wkr].bufptr64, 0, 
rte_memory_order_release);
        if (unlikely(d->backlog[wkr].count != 0)) {
                /* On return of a packet, we need to move the
                 * queued packets for this core elsewhere.
@@ -165,21 +165,21 @@ struct rte_mbuf *
        for (wkr = 0; wkr < d->num_workers; wkr++) {
                uintptr_t oldbuf = 0;
                /* Sync with worker. Acquire bufptr64. */
-               const int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64),
-                                                       __ATOMIC_ACQUIRE);
+               const int64_t data = 
rte_atomic_load_explicit(&d->bufs[wkr].bufptr64,
+                                                       
rte_memory_order_acquire);
 
                if (data & RTE_DISTRIB_GET_BUF) {
                        flushed++;
                        if (d->backlog[wkr].count)
                                /* Sync with worker. Release bufptr64. */
-                               __atomic_store_n(&(d->bufs[wkr].bufptr64),
+                               
rte_atomic_store_explicit(&d->bufs[wkr].bufptr64,
                                        backlog_pop(&d->backlog[wkr]),
-                                       __ATOMIC_RELEASE);
+                                       rte_memory_order_release);
                        else {
                                /* Sync with worker on GET_BUF flag. */
-                               __atomic_store_n(&(d->bufs[wkr].bufptr64),
+                               
rte_atomic_store_explicit(&d->bufs[wkr].bufptr64,
                                        RTE_DISTRIB_GET_BUF,
-                                       __ATOMIC_RELEASE);
+                                       rte_memory_order_release);
                                d->in_flight_tags[wkr] = 0;
                                d->in_flight_bitmask &= ~(1UL << wkr);
                        }
@@ -217,8 +217,8 @@ struct rte_mbuf *
        while (next_idx < num_mbufs || next_mb != NULL) {
                uintptr_t oldbuf = 0;
                /* Sync with worker. Acquire bufptr64. */
-               int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64),
-                                               __ATOMIC_ACQUIRE);
+               int64_t data = 
rte_atomic_load_explicit(&(d->bufs[wkr].bufptr64),
+                                               rte_memory_order_acquire);
 
                if (!next_mb) {
                        next_mb = mbufs[next_idx++];
@@ -264,15 +264,15 @@ struct rte_mbuf *
 
                        if (d->backlog[wkr].count)
                                /* Sync with worker. Release bufptr64. */
-                               __atomic_store_n(&(d->bufs[wkr].bufptr64),
+                               
rte_atomic_store_explicit(&d->bufs[wkr].bufptr64,
                                                backlog_pop(&d->backlog[wkr]),
-                                               __ATOMIC_RELEASE);
+                                               rte_memory_order_release);
 
                        else {
                                /* Sync with worker. Release bufptr64.  */
-                               __atomic_store_n(&(d->bufs[wkr].bufptr64),
+                               
rte_atomic_store_explicit(&d->bufs[wkr].bufptr64,
                                                next_value,
-                                               __ATOMIC_RELEASE);
+                                               rte_memory_order_release);
                                d->in_flight_tags[wkr] = new_tag;
                                d->in_flight_bitmask |= (1UL << wkr);
                                next_mb = NULL;
@@ -294,8 +294,8 @@ struct rte_mbuf *
        for (wkr = 0; wkr < d->num_workers; wkr++)
                if (d->backlog[wkr].count &&
                                /* Sync with worker. Acquire bufptr64. */
-                               (__atomic_load_n(&(d->bufs[wkr].bufptr64),
-                               __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)) {
+                               
(rte_atomic_load_explicit(&d->bufs[wkr].bufptr64,
+                               rte_memory_order_acquire) & 
RTE_DISTRIB_GET_BUF)) {
 
                        int64_t oldbuf = d->bufs[wkr].bufptr64 >>
                                        RTE_DISTRIB_FLAG_BITS;
@@ -303,9 +303,9 @@ struct rte_mbuf *
                        store_return(oldbuf, d, &ret_start, &ret_count);
 
                        /* Sync with worker. Release bufptr64. */
-                       __atomic_store_n(&(d->bufs[wkr].bufptr64),
+                       rte_atomic_store_explicit(&d->bufs[wkr].bufptr64,
                                backlog_pop(&d->backlog[wkr]),
-                               __ATOMIC_RELEASE);
+                               rte_memory_order_release);
                }
 
        d->returns.start = ret_start;
-- 
1.8.3.1

Reply via email to