From: Pavan Nikhilesh <pbhagavat...@marvell.com>

Replace gcc inbuilt __atomic_xxx intrinsics with rte_atomic_xxx API.

Signed-off-by: Pavan Nikhilesh <pbhagavat...@marvell.com>
---
Depends-on: series-33602 ("event/cnxk: fix getwork write data on reconfig")

v2 Changes:
- Rebase and remove single dequeue and enqueue functions.
v3 Changes:
- Remove __atomic builtins.
v4 Changes:
- Rebase onto next-event tree.

 drivers/event/cnxk/cn10k_eventdev.c         |  6 +--
 drivers/event/cnxk/cn10k_eventdev.h         |  4 +-
 drivers/event/cnxk/cn10k_tx_worker.h        |  7 ++-
 drivers/event/cnxk/cn10k_worker.c           | 15 +++---
 drivers/event/cnxk/cn10k_worker.h           |  2 +-
 drivers/event/cnxk/cn9k_eventdev.c          |  8 +--
 drivers/event/cnxk/cn9k_worker.h            | 18 ++++---
 drivers/event/cnxk/cnxk_eventdev.h          |  4 +-
 drivers/event/cnxk/cnxk_eventdev_selftest.c | 60 ++++++++++-----------
 drivers/event/cnxk/cnxk_tim_evdev.c         |  4 +-
 drivers/event/cnxk/cnxk_tim_evdev.h         | 10 ++--
 drivers/event/cnxk/cnxk_tim_worker.c        | 10 ++--
 drivers/event/cnxk/cnxk_tim_worker.h        | 57 ++++++++++----------
 drivers/event/cnxk/cnxk_worker.h            |  3 +-
 drivers/net/cnxk/cn9k_ethdev.h              |  2 +-
 15 files changed, 108 insertions(+), 102 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c 
b/drivers/event/cnxk/cn10k_eventdev.c
index 4edac33a84..4a2c88c8c6 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -94,9 +94,9 @@ cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
        uint64_t val;

        ws->grp_base = grp_base;
-       ws->fc_mem = (int64_t *)dev->fc_iova;
+       ws->fc_mem = (int64_t __rte_atomic *)dev->fc_iova;
        ws->xaq_lmt = dev->xaq_lmt;
-       ws->fc_cache_space = dev->fc_cache_space;
+       ws->fc_cache_space = (int64_t __rte_atomic *)dev->fc_cache_space;
        ws->aw_lmt = ws->lmt_base;
        ws->gw_wdata = cn10k_sso_gw_mode_wdata(dev);

@@ -768,7 +768,7 @@ cn10k_sso_set_priv_mem(const struct rte_eventdev 
*event_dev, void *lookup_mem)
        for (i = 0; i < dev->nb_event_ports; i++) {
                struct cn10k_sso_hws *ws = event_dev->data->ports[i];
                ws->xaq_lmt = dev->xaq_lmt;
-               ws->fc_mem = (int64_t *)dev->fc_iova;
+               ws->fc_mem = (int64_t __rte_atomic *)dev->fc_iova;
                ws->tstamp = dev->tstamp;
                if (lookup_mem)
                        ws->lookup_mem = lookup_mem;
diff --git a/drivers/event/cnxk/cn10k_eventdev.h 
b/drivers/event/cnxk/cn10k_eventdev.h
index 372121465c..b8395aa314 100644
--- a/drivers/event/cnxk/cn10k_eventdev.h
+++ b/drivers/event/cnxk/cn10k_eventdev.h
@@ -19,8 +19,8 @@ struct __rte_cache_aligned cn10k_sso_hws {
        struct cnxk_timesync_info **tstamp;
        uint64_t meta_aura;
        /* Add Work Fastpath data */
-       alignas(RTE_CACHE_LINE_SIZE) int64_t *fc_mem;
-       int64_t *fc_cache_space;
+       alignas(RTE_CACHE_LINE_SIZE) int64_t __rte_atomic *fc_mem;
+       int64_t __rte_atomic *fc_cache_space;
        uintptr_t aw_lmt;
        uintptr_t grp_base;
        int32_t xaq_lmt;
diff --git a/drivers/event/cnxk/cn10k_tx_worker.h 
b/drivers/event/cnxk/cn10k_tx_worker.h
index 0695ea23e1..19cb2e22e5 100644
--- a/drivers/event/cnxk/cn10k_tx_worker.h
+++ b/drivers/event/cnxk/cn10k_tx_worker.h
@@ -51,7 +51,9 @@ cn10k_sso_txq_fc_wait(const struct cn10k_eth_txq *txq)
                     : "memory");
 #else
        do {
-               avail = txq->nb_sqb_bufs_adj - __atomic_load_n(txq->fc_mem, 
__ATOMIC_RELAXED);
+               avail = txq->nb_sqb_bufs_adj -
+                       rte_atomic_load_explicit((uint64_t __rte_atomic 
*)txq->fc_mem,
+                                                rte_memory_order_relaxed);
        } while (((avail << txq->sqes_per_sqb_log2) - avail) <= 0);
 #endif
 }
@@ -60,7 +62,8 @@ static __rte_always_inline int32_t
 cn10k_sso_sq_depth(const struct cn10k_eth_txq *txq)
 {
        int32_t avail = (int32_t)txq->nb_sqb_bufs_adj -
-                       (int32_t)__atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED);
+                       (int32_t)rte_atomic_load_explicit((uint64_t 
__rte_atomic *)txq->fc_mem,
+                                                         
rte_memory_order_relaxed);
        return (avail << txq->sqes_per_sqb_log2) - avail;
 }

diff --git a/drivers/event/cnxk/cn10k_worker.c 
b/drivers/event/cnxk/cn10k_worker.c
index c49138316c..06ad7437d5 100644
--- a/drivers/event/cnxk/cn10k_worker.c
+++ b/drivers/event/cnxk/cn10k_worker.c
@@ -16,7 +16,7 @@ cn10k_sso_hws_new_event(struct cn10k_sso_hws *ws, const 
struct rte_event *ev)
        const uint64_t event_ptr = ev->u64;
        const uint16_t grp = ev->queue_id;

-       rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
+       rte_atomic_thread_fence(rte_memory_order_acq_rel);
        if (ws->xaq_lmt <= *ws->fc_mem)
                return 0;

@@ -80,7 +80,7 @@ cn10k_sso_hws_forward_event(struct cn10k_sso_hws *ws,
 static inline int32_t
 sso_read_xaq_space(struct cn10k_sso_hws *ws)
 {
-       return (ws->xaq_lmt - __atomic_load_n(ws->fc_mem, __ATOMIC_RELAXED)) *
+       return (ws->xaq_lmt - rte_atomic_load_explicit(ws->fc_mem, 
rte_memory_order_relaxed)) *
               ROC_SSO_XAE_PER_XAQ;
 }

@@ -90,19 +90,20 @@ sso_lmt_aw_wait_fc(struct cn10k_sso_hws *ws, int64_t req)
        int64_t cached, refill;

 retry:
-       while (__atomic_load_n(ws->fc_cache_space, __ATOMIC_RELAXED) < 0)
+       while (rte_atomic_load_explicit(ws->fc_cache_space, 
rte_memory_order_relaxed) < 0)
                ;

-       cached = __atomic_fetch_sub(ws->fc_cache_space, req, __ATOMIC_ACQUIRE) 
- req;
+       cached = rte_atomic_fetch_sub_explicit(ws->fc_cache_space, req, 
rte_memory_order_acquire) -
+                req;
        /* Check if there is enough space, else update and retry. */
        if (cached < 0) {
                /* Check if we have space else retry. */
                do {
                        refill = sso_read_xaq_space(ws);
                } while (refill <= 0);
-               __atomic_compare_exchange(ws->fc_cache_space, &cached, &refill,
-                                         0, __ATOMIC_RELEASE,
-                                         __ATOMIC_RELAXED);
+               rte_atomic_compare_exchange_strong_explicit(ws->fc_cache_space, 
&cached, refill,
+                                                           
rte_memory_order_release,
+                                                           
rte_memory_order_relaxed);
                goto retry;
        }
 }
diff --git a/drivers/event/cnxk/cn10k_worker.h 
b/drivers/event/cnxk/cn10k_worker.h
index 5d3394508e..954dee5a2a 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -311,7 +311,7 @@ cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct 
rte_event *ev,
                roc_load_pair(gw.u64[0], gw.u64[1],
                              ws->base + SSOW_LF_GWS_WQE0);
        } while (gw.u64[0] & BIT_ULL(63));
-       rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+       rte_atomic_thread_fence(rte_memory_order_seq_cst);
 #endif
        ws->gw_rdata = gw.u64[0];
        if (gw.u64[1])
diff --git a/drivers/event/cnxk/cn9k_eventdev.c 
b/drivers/event/cnxk/cn9k_eventdev.c
index b176044aa5..05e237c005 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -74,7 +74,7 @@ cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
        if (dev->dual_ws) {
                dws = hws;
                dws->grp_base = grp_base;
-               dws->fc_mem = (uint64_t *)dev->fc_iova;
+               dws->fc_mem = (uint64_t __rte_atomic *)dev->fc_iova;
                dws->xaq_lmt = dev->xaq_lmt;

                plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
@@ -82,7 +82,7 @@ cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
        } else {
                ws = hws;
                ws->grp_base = grp_base;
-               ws->fc_mem = (uint64_t *)dev->fc_iova;
+               ws->fc_mem = (uint64_t __rte_atomic *)dev->fc_iova;
                ws->xaq_lmt = dev->xaq_lmt;

                plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
@@ -822,14 +822,14 @@ cn9k_sso_set_priv_mem(const struct rte_eventdev 
*event_dev, void *lookup_mem)
                        struct cn9k_sso_hws_dual *dws =
                                event_dev->data->ports[i];
                        dws->xaq_lmt = dev->xaq_lmt;
-                       dws->fc_mem = (uint64_t *)dev->fc_iova;
+                       dws->fc_mem = (uint64_t __rte_atomic *)dev->fc_iova;
                        dws->tstamp = dev->tstamp;
                        if (lookup_mem)
                                dws->lookup_mem = lookup_mem;
                } else {
                        struct cn9k_sso_hws *ws = event_dev->data->ports[i];
                        ws->xaq_lmt = dev->xaq_lmt;
-                       ws->fc_mem = (uint64_t *)dev->fc_iova;
+                       ws->fc_mem = (uint64_t __rte_atomic *)dev->fc_iova;
                        ws->tstamp = dev->tstamp;
                        if (lookup_mem)
                                ws->lookup_mem = lookup_mem;
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 064cdfe94a..71caf45574 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -28,7 +28,7 @@ cn9k_sso_hws_new_event(struct cn9k_sso_hws *ws, const struct 
rte_event *ev)
        const uint64_t event_ptr = ev->u64;
        const uint16_t grp = ev->queue_id;

-       rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
+       rte_atomic_thread_fence(rte_memory_order_acq_rel);
        if (ws->xaq_lmt <= *ws->fc_mem)
                return 0;

@@ -71,7 +71,7 @@ cn9k_sso_hws_new_event_wait(struct cn9k_sso_hws *ws, const 
struct rte_event *ev)
        const uint64_t event_ptr = ev->u64;
        const uint16_t grp = ev->queue_id;

-       while (ws->xaq_lmt <= __atomic_load_n(ws->fc_mem, __ATOMIC_RELAXED))
+       while (ws->xaq_lmt <= rte_atomic_load_explicit(ws->fc_mem, 
rte_memory_order_relaxed))
                ;

        cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
@@ -93,7 +93,7 @@ cn9k_sso_hws_forward_event(struct cn9k_sso_hws *ws, const 
struct rte_event *ev)
                 * Use add_work operation to transfer the event to
                 * new group/core
                 */
-               rte_atomic_thread_fence(__ATOMIC_RELEASE);
+               rte_atomic_thread_fence(rte_memory_order_release);
                roc_sso_hws_head_wait(ws->base);
                cn9k_sso_hws_new_event_wait(ws, ev);
        }
@@ -110,7 +110,7 @@ cn9k_sso_hws_dual_new_event(struct cn9k_sso_hws_dual *dws,
        const uint64_t event_ptr = ev->u64;
        const uint16_t grp = ev->queue_id;

-       rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
+       rte_atomic_thread_fence(rte_memory_order_acq_rel);
        if (dws->xaq_lmt <= *dws->fc_mem)
                return 0;

@@ -128,7 +128,7 @@ cn9k_sso_hws_dual_new_event_wait(struct cn9k_sso_hws_dual 
*dws,
        const uint64_t event_ptr = ev->u64;
        const uint16_t grp = ev->queue_id;

-       while (dws->xaq_lmt <= __atomic_load_n(dws->fc_mem, __ATOMIC_RELAXED))
+       while (dws->xaq_lmt <= rte_atomic_load_explicit(dws->fc_mem, 
rte_memory_order_relaxed))
                ;

        cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
@@ -151,7 +151,7 @@ cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual 
*dws, uint64_t base,
                 * Use add_work operation to transfer the event to
                 * new group/core
                 */
-               rte_atomic_thread_fence(__ATOMIC_RELEASE);
+               rte_atomic_thread_fence(rte_memory_order_release);
                roc_sso_hws_head_wait(base);
                cn9k_sso_hws_dual_new_event_wait(dws, ev);
        }
@@ -571,7 +571,9 @@ cn9k_sso_txq_fc_wait(const struct cn9k_eth_txq *txq)
                     : "memory");
 #else
        do {
-               avail = txq->nb_sqb_bufs_adj - __atomic_load_n(txq->fc_mem, 
__ATOMIC_RELAXED);
+               avail = txq->nb_sqb_bufs_adj -
+                       rte_atomic_load_explicit((uint64_t __rte_atomic 
*)txq->fc_mem,
+                                                rte_memory_order_relaxed);
        } while (((avail << txq->sqes_per_sqb_log2) - avail) <= 0);
 #endif
 }
@@ -740,7 +742,7 @@ static __rte_always_inline int32_t
 cn9k_sso_sq_depth(const struct cn9k_eth_txq *txq)
 {
        int32_t avail = (int32_t)txq->nb_sqb_bufs_adj -
-                       (int32_t)__atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED);
+                       (int32_t)rte_atomic_load_explicit(txq->fc_mem, 
rte_memory_order_relaxed);
        return (avail << txq->sqes_per_sqb_log2) - avail;
 }

diff --git a/drivers/event/cnxk/cnxk_eventdev.h 
b/drivers/event/cnxk/cnxk_eventdev.h
index f147ef3c78..982bbb6a9b 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -136,7 +136,7 @@ struct __rte_cache_aligned cn9k_sso_hws {
        struct cnxk_timesync_info **tstamp;
        /* Add Work Fastpath data */
        alignas(RTE_CACHE_LINE_SIZE) uint64_t xaq_lmt;
-       uint64_t *fc_mem;
+       uint64_t __rte_atomic *fc_mem;
        uintptr_t grp_base;
        /* Tx Fastpath data */
        alignas(RTE_CACHE_LINE_SIZE) uint64_t lso_tun_fmt;
@@ -154,7 +154,7 @@ struct __rte_cache_aligned cn9k_sso_hws_dual {
        struct cnxk_timesync_info **tstamp;
        /* Add Work Fastpath data */
        alignas(RTE_CACHE_LINE_SIZE) uint64_t xaq_lmt;
-       uint64_t *fc_mem;
+       uint64_t __rte_atomic *fc_mem;
        uintptr_t grp_base;
        /* Tx Fastpath data */
        alignas(RTE_CACHE_LINE_SIZE) uint64_t lso_tun_fmt;
diff --git a/drivers/event/cnxk/cnxk_eventdev_selftest.c 
b/drivers/event/cnxk/cnxk_eventdev_selftest.c
index 95c0f1b1f7..a4615c1356 100644
--- a/drivers/event/cnxk/cnxk_eventdev_selftest.c
+++ b/drivers/event/cnxk/cnxk_eventdev_selftest.c
@@ -63,7 +63,7 @@ seqn_list_update(int val)
                return -1;

        seqn_list[seqn_list_index++] = val;
-       rte_atomic_thread_fence(__ATOMIC_RELEASE);
+       rte_atomic_thread_fence(rte_memory_order_release);
        return 0;
 }

@@ -82,7 +82,7 @@ seqn_list_check(int limit)
 }

 struct test_core_param {
-       uint32_t *total_events;
+       uint32_t __rte_atomic *total_events;
        uint64_t dequeue_tmo_ticks;
        uint8_t port;
        uint8_t sched_type;
@@ -540,13 +540,13 @@ static int
 worker_multi_port_fn(void *arg)
 {
        struct test_core_param *param = arg;
-       uint32_t *total_events = param->total_events;
+       uint32_t __rte_atomic *total_events = param->total_events;
        uint8_t port = param->port;
        uint16_t valid_event;
        struct rte_event ev;
        int ret;

-       while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {
+       while (rte_atomic_load_explicit(total_events, rte_memory_order_relaxed) 
> 0) {
                valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
                if (!valid_event)
                        continue;
@@ -554,30 +554,30 @@ worker_multi_port_fn(void *arg)
                ret = validate_event(&ev);
                RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
                rte_pktmbuf_free(ev.mbuf);
-               __atomic_fetch_sub(total_events, 1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_sub_explicit(total_events, 1, 
rte_memory_order_relaxed);
        }

        return 0;
 }

 static inline int
-wait_workers_to_join(const uint32_t *count)
+wait_workers_to_join(const uint32_t __rte_atomic *count)
 {
        uint64_t cycles, print_cycles;

        cycles = rte_get_timer_cycles();
        print_cycles = cycles;
-       while (__atomic_load_n(count, __ATOMIC_RELAXED)) {
+       while (rte_atomic_load_explicit(count, rte_memory_order_relaxed)) {
                uint64_t new_cycles = rte_get_timer_cycles();

                if (new_cycles - print_cycles > rte_get_timer_hz()) {
                        plt_info("Events %d",
-                                __atomic_load_n(count, __ATOMIC_RELAXED));
+                                rte_atomic_load_explicit(count, 
rte_memory_order_relaxed));
                        print_cycles = new_cycles;
                }
                if (new_cycles - cycles > rte_get_timer_hz() * 10000000000) {
                        plt_err("No schedules for seconds, deadlock (%d)",
-                               __atomic_load_n(count, __ATOMIC_RELAXED));
+                               rte_atomic_load_explicit(count, 
rte_memory_order_relaxed));
                        rte_event_dev_dump(evdev, stdout);
                        cycles = new_cycles;
                        return -1;
@@ -593,7 +593,7 @@ launch_workers_and_wait(int (*main_thread)(void *),
                        int (*worker_thread)(void *), uint32_t total_events,
                        uint8_t nb_workers, uint8_t sched_type)
 {
-       uint32_t atomic_total_events;
+       uint32_t __rte_atomic atomic_total_events;
        struct test_core_param *param;
        uint64_t dequeue_tmo_ticks;
        uint8_t port = 0;
@@ -603,7 +603,7 @@ launch_workers_and_wait(int (*main_thread)(void *),
        if (!nb_workers)
                return 0;

-       __atomic_store_n(&atomic_total_events, total_events, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&atomic_total_events, total_events, 
rte_memory_order_relaxed);
        seqn_list_init();

        param = malloc(sizeof(struct test_core_param) * nb_workers);
@@ -640,7 +640,7 @@ launch_workers_and_wait(int (*main_thread)(void *),
                param[port].sched_type = sched_type;
                param[port].port = port;
                param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
-               rte_atomic_thread_fence(__ATOMIC_RELEASE);
+               rte_atomic_thread_fence(rte_memory_order_release);
                w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
                if (w_lcore == RTE_MAX_LCORE) {
                        plt_err("Failed to get next available lcore");
@@ -651,7 +651,7 @@ launch_workers_and_wait(int (*main_thread)(void *),
                rte_eal_remote_launch(worker_thread, &param[port], w_lcore);
        }

-       rte_atomic_thread_fence(__ATOMIC_RELEASE);
+       rte_atomic_thread_fence(rte_memory_order_release);
        ret = wait_workers_to_join(&atomic_total_events);
        free(param);

@@ -890,13 +890,13 @@ worker_flow_based_pipeline(void *arg)
 {
        struct test_core_param *param = arg;
        uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
-       uint32_t *total_events = param->total_events;
+       uint32_t __rte_atomic *total_events = param->total_events;
        uint8_t new_sched_type = param->sched_type;
        uint8_t port = param->port;
        uint16_t valid_event;
        struct rte_event ev;

-       while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {
+       while (rte_atomic_load_explicit(total_events, rte_memory_order_relaxed) 
> 0) {
                valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
                                                      dequeue_tmo_ticks);
                if (!valid_event)
@@ -916,8 +916,8 @@ worker_flow_based_pipeline(void *arg)

                        if (seqn_list_update(seqn) == 0) {
                                rte_pktmbuf_free(ev.mbuf);
-                               __atomic_fetch_sub(total_events, 1,
-                                                  __ATOMIC_RELAXED);
+                               rte_atomic_fetch_sub_explicit(total_events, 1,
+                                                             
rte_memory_order_relaxed);
                        } else {
                                plt_err("Failed to update seqn_list");
                                return -1;
@@ -1046,13 +1046,13 @@ worker_group_based_pipeline(void *arg)
 {
        struct test_core_param *param = arg;
        uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
-       uint32_t *total_events = param->total_events;
+       uint32_t __rte_atomic *total_events = param->total_events;
        uint8_t new_sched_type = param->sched_type;
        uint8_t port = param->port;
        uint16_t valid_event;
        struct rte_event ev;

-       while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {
+       while (rte_atomic_load_explicit(total_events, rte_memory_order_relaxed) 
> 0) {
                valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
                                                      dequeue_tmo_ticks);
                if (!valid_event)
@@ -1072,8 +1072,8 @@ worker_group_based_pipeline(void *arg)

                        if (seqn_list_update(seqn) == 0) {
                                rte_pktmbuf_free(ev.mbuf);
-                               __atomic_fetch_sub(total_events, 1,
-                                                  __ATOMIC_RELAXED);
+                               rte_atomic_fetch_sub_explicit(total_events, 1,
+                                                             
rte_memory_order_relaxed);
                        } else {
                                plt_err("Failed to update seqn_list");
                                return -1;
@@ -1205,19 +1205,19 @@ static int
 worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
 {
        struct test_core_param *param = arg;
-       uint32_t *total_events = param->total_events;
+       uint32_t __rte_atomic *total_events = param->total_events;
        uint8_t port = param->port;
        uint16_t valid_event;
        struct rte_event ev;

-       while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {
+       while (rte_atomic_load_explicit(total_events, rte_memory_order_relaxed) 
> 0) {
                valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
                if (!valid_event)
                        continue;

                if (ev.sub_event_type == MAX_STAGES) { /* last stage */
                        rte_pktmbuf_free(ev.mbuf);
-                       __atomic_fetch_sub(total_events, 1, __ATOMIC_RELAXED);
+                       rte_atomic_fetch_sub_explicit(total_events, 1, 
rte_memory_order_relaxed);
                } else {
                        ev.event_type = RTE_EVENT_TYPE_CPU;
                        ev.sub_event_type++;
@@ -1284,16 +1284,16 @@ 
worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
                                       &queue_count),
                "Queue count get failed");
        uint8_t nr_queues = queue_count;
-       uint32_t *total_events = param->total_events;
+       uint32_t __rte_atomic *total_events = param->total_events;

-       while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {
+       while (rte_atomic_load_explicit(total_events, rte_memory_order_relaxed) 
> 0) {
                valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
                if (!valid_event)
                        continue;

                if (ev.queue_id == nr_queues - 1) { /* last stage */
                        rte_pktmbuf_free(ev.mbuf);
-                       __atomic_fetch_sub(total_events, 1, __ATOMIC_RELAXED);
+                       rte_atomic_fetch_sub_explicit(total_events, 1, 
rte_memory_order_relaxed);
                } else {
                        ev.event_type = RTE_EVENT_TYPE_CPU;
                        ev.queue_id++;
@@ -1329,16 +1329,16 @@ worker_mixed_pipeline_max_stages_rand_sched_type(void 
*arg)
                                       &queue_count),
                "Queue count get failed");
        uint8_t nr_queues = queue_count;
-       uint32_t *total_events = param->total_events;
+       uint32_t __rte_atomic *total_events = param->total_events;

-       while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {
+       while (rte_atomic_load_explicit(total_events, rte_memory_order_relaxed) 
> 0) {
                valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
                if (!valid_event)
                        continue;

                if (ev.queue_id == nr_queues - 1) { /* Last stage */
                        rte_pktmbuf_free(ev.mbuf);
-                       __atomic_fetch_sub(total_events, 1, __ATOMIC_RELAXED);
+                       rte_atomic_fetch_sub_explicit(total_events, 1, 
rte_memory_order_relaxed);
                } else {
                        ev.event_type = RTE_EVENT_TYPE_CPU;
                        ev.queue_id++;
diff --git a/drivers/event/cnxk/cnxk_tim_evdev.c 
b/drivers/event/cnxk/cnxk_tim_evdev.c
index bba70646fa..74a6da5070 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.c
+++ b/drivers/event/cnxk/cnxk_tim_evdev.c
@@ -358,7 +358,7 @@ cnxk_tim_stats_get(const struct rte_event_timer_adapter 
*adapter,
                tim_ring->tick_fn(tim_ring->tbase) - tim_ring->ring_start_cyc;

        stats->evtim_exp_count =
-               __atomic_load_n(&tim_ring->arm_cnt, __ATOMIC_RELAXED);
+               rte_atomic_load_explicit(&tim_ring->arm_cnt, 
rte_memory_order_relaxed);
        stats->ev_enq_count = stats->evtim_exp_count;
        stats->adapter_tick_count =
                rte_reciprocal_divide_u64(bkt_cyc, &tim_ring->fast_div);
@@ -370,7 +370,7 @@ cnxk_tim_stats_reset(const struct rte_event_timer_adapter 
*adapter)
 {
        struct cnxk_tim_ring *tim_ring = adapter->data->adapter_priv;

-       __atomic_store_n(&tim_ring->arm_cnt, 0, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&tim_ring->arm_cnt, 0, 
rte_memory_order_relaxed);
        return 0;
 }

diff --git a/drivers/event/cnxk/cnxk_tim_evdev.h 
b/drivers/event/cnxk/cnxk_tim_evdev.h
index 6cf10dbf4d..f4c61dfb44 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.h
+++ b/drivers/event/cnxk/cnxk_tim_evdev.h
@@ -108,15 +108,15 @@ struct cnxk_tim_evdev {
 struct cnxk_tim_bkt {
        uint64_t first_chunk;
        union {
-               uint64_t w1;
+               uint64_t __rte_atomic w1;
                struct {
-                       uint32_t nb_entry;
+                       uint32_t __rte_atomic nb_entry;
                        uint8_t sbt : 1;
                        uint8_t hbt : 1;
                        uint8_t bsk : 1;
                        uint8_t rsvd : 5;
-                       uint8_t lock;
-                       int16_t chunk_remainder;
+                       uint8_t __rte_atomic lock;
+                       int16_t __rte_atomic chunk_remainder;
                };
        };
        uint64_t current_chunk;
@@ -134,7 +134,7 @@ struct __rte_cache_aligned cnxk_tim_ring {
        struct rte_reciprocal_u64 fast_div;
        struct rte_reciprocal_u64 fast_bkt;
        uint64_t tck_int;
-       uint64_t arm_cnt;
+       uint64_t __rte_atomic arm_cnt;
        uintptr_t base;
        uint8_t prod_type_sp;
        uint8_t enable_stats;
diff --git a/drivers/event/cnxk/cnxk_tim_worker.c 
b/drivers/event/cnxk/cnxk_tim_worker.c
index 1f2f2fe5d8..db31f91818 100644
--- a/drivers/event/cnxk/cnxk_tim_worker.c
+++ b/drivers/event/cnxk/cnxk_tim_worker.c
@@ -70,7 +70,7 @@ cnxk_tim_timer_arm_burst(const struct rte_event_timer_adapter 
*adptr,
        }

        if (flags & CNXK_TIM_ENA_STATS)
-               __atomic_fetch_add(&tim_ring->arm_cnt, index, __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&tim_ring->arm_cnt, index, 
rte_memory_order_relaxed);

        return index;
 }
@@ -124,8 +124,8 @@ cnxk_tim_timer_arm_tmo_brst(const struct 
rte_event_timer_adapter *adptr,
        }

        if (flags & CNXK_TIM_ENA_STATS)
-               __atomic_fetch_add(&tim_ring->arm_cnt, set_timers,
-                                  __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&tim_ring->arm_cnt, set_timers,
+                                             rte_memory_order_relaxed);

        return set_timers;
 }
@@ -151,7 +151,7 @@ cnxk_tim_timer_cancel_burst(const struct 
rte_event_timer_adapter *adptr,
        int ret;

        RTE_SET_USED(adptr);
-       rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+       rte_atomic_thread_fence(rte_memory_order_acquire);
        for (index = 0; index < nb_timers; index++) {
                if (tim[index]->state == RTE_EVENT_TIMER_CANCELED) {
                        rte_errno = EALREADY;
@@ -193,7 +193,7 @@ cnxk_tim_remaining_ticks_get(const struct 
rte_event_timer_adapter *adapter,
                return -ENOENT;

        bkt = (struct cnxk_tim_bkt *)evtim->impl_opaque[1];
-       sema = __atomic_load_n(&bkt->w1, rte_memory_order_acquire);
+       sema = rte_atomic_load_explicit(&bkt->w1, rte_memory_order_acquire);
        if (cnxk_tim_bkt_get_hbt(sema) || !cnxk_tim_bkt_get_nent(sema))
                return -ENOENT;

diff --git a/drivers/event/cnxk/cnxk_tim_worker.h 
b/drivers/event/cnxk/cnxk_tim_worker.h
index f530d8c5c4..e52eadbc08 100644
--- a/drivers/event/cnxk/cnxk_tim_worker.h
+++ b/drivers/event/cnxk/cnxk_tim_worker.h
@@ -23,19 +23,19 @@ cnxk_tim_bkt_fetch_rem(uint64_t w1)
 static inline int16_t
 cnxk_tim_bkt_get_rem(struct cnxk_tim_bkt *bktp)
 {
-       return __atomic_load_n(&bktp->chunk_remainder, __ATOMIC_ACQUIRE);
+       return rte_atomic_load_explicit(&bktp->chunk_remainder, 
rte_memory_order_acquire);
 }

 static inline void
 cnxk_tim_bkt_set_rem(struct cnxk_tim_bkt *bktp, uint16_t v)
 {
-       __atomic_store_n(&bktp->chunk_remainder, v, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&bktp->chunk_remainder, v, 
rte_memory_order_relaxed);
 }

 static inline void
 cnxk_tim_bkt_sub_rem(struct cnxk_tim_bkt *bktp, uint16_t v)
 {
-       __atomic_fetch_sub(&bktp->chunk_remainder, v, __ATOMIC_RELAXED);
+       rte_atomic_fetch_sub_explicit(&bktp->chunk_remainder, v, 
rte_memory_order_relaxed);
 }

 static inline uint8_t
@@ -56,20 +56,20 @@ cnxk_tim_bkt_clr_bsk(struct cnxk_tim_bkt *bktp)
        /* Clear everything except lock. */
        const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;

-       return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+       return rte_atomic_fetch_and_explicit(&bktp->w1, v, 
rte_memory_order_acq_rel);
 }

 static inline uint64_t
 cnxk_tim_bkt_fetch_sema_lock(struct cnxk_tim_bkt *bktp)
 {
-       return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
-                                 __ATOMIC_ACQUIRE);
+       return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
+                                            rte_memory_order_acquire);
 }

 static inline uint64_t
 cnxk_tim_bkt_fetch_sema(struct cnxk_tim_bkt *bktp)
 {
-       return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA, __ATOMIC_RELAXED);
+       return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA, 
rte_memory_order_relaxed);
 }

 static inline uint64_t
@@ -77,19 +77,19 @@ cnxk_tim_bkt_inc_lock(struct cnxk_tim_bkt *bktp)
 {
        const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;

-       return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQUIRE);
+       return rte_atomic_fetch_add_explicit(&bktp->w1, v, 
rte_memory_order_acquire);
 }

 static inline void
 cnxk_tim_bkt_dec_lock(struct cnxk_tim_bkt *bktp)
 {
-       __atomic_fetch_sub(&bktp->lock, 1, __ATOMIC_RELEASE);
+       rte_atomic_fetch_sub_explicit(&bktp->lock, 1, rte_memory_order_release);
 }

 static inline void
 cnxk_tim_bkt_dec_lock_relaxed(struct cnxk_tim_bkt *bktp)
 {
-       __atomic_fetch_sub(&bktp->lock, 1, __ATOMIC_RELAXED);
+       rte_atomic_fetch_sub_explicit(&bktp->lock, 1, rte_memory_order_relaxed);
 }

 static inline uint32_t
@@ -102,19 +102,19 @@ cnxk_tim_bkt_get_nent(uint64_t w1)
 static inline void
 cnxk_tim_bkt_inc_nent(struct cnxk_tim_bkt *bktp)
 {
-       __atomic_fetch_add(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
+       rte_atomic_fetch_add_explicit(&bktp->nb_entry, 1, 
rte_memory_order_relaxed);
 }

 static inline void
 cnxk_tim_bkt_add_nent_relaxed(struct cnxk_tim_bkt *bktp, uint32_t v)
 {
-       __atomic_fetch_add(&bktp->nb_entry, v, __ATOMIC_RELAXED);
+       rte_atomic_fetch_add_explicit(&bktp->nb_entry, v, 
rte_memory_order_relaxed);
 }

 static inline void
 cnxk_tim_bkt_add_nent(struct cnxk_tim_bkt *bktp, uint32_t v)
 {
-       __atomic_fetch_add(&bktp->nb_entry, v, __ATOMIC_RELEASE);
+       rte_atomic_fetch_add_explicit(&bktp->nb_entry, v, 
rte_memory_order_release);
 }

 static inline uint64_t
@@ -123,7 +123,7 @@ cnxk_tim_bkt_clr_nent(struct cnxk_tim_bkt *bktp)
        const uint64_t v =
                ~(TIM_BUCKET_W1_M_NUM_ENTRIES << TIM_BUCKET_W1_S_NUM_ENTRIES);

-       return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL) & v;
+       return rte_atomic_fetch_and_explicit(&bktp->w1, v, 
rte_memory_order_acq_rel) & v;
 }

 static inline uint64_t
@@ -273,8 +273,8 @@ cnxk_tim_add_entry_sp(struct cnxk_tim_ring *const tim_ring,
                                     : "memory");
 #else
                        do {
-                               hbt_state = __atomic_load_n(&bkt->w1,
-                                                           __ATOMIC_RELAXED);
+                               hbt_state = rte_atomic_load_explicit(&bkt->w1,
+                                                                    
rte_memory_order_relaxed);
                        } while (hbt_state & BIT_ULL(33));
 #endif

@@ -356,8 +356,8 @@ cnxk_tim_add_entry_mp(struct cnxk_tim_ring *const tim_ring,
                                     : "memory");
 #else
                        do {
-                               hbt_state = __atomic_load_n(&bkt->w1,
-                                                           __ATOMIC_RELAXED);
+                               hbt_state = rte_atomic_load_explicit(&bkt->w1,
+                                                                    
rte_memory_order_relaxed);
                        } while (hbt_state & BIT_ULL(33));
 #endif

@@ -385,8 +385,8 @@ cnxk_tim_add_entry_mp(struct cnxk_tim_ring *const tim_ring,
                             : [crem] "r"(&bkt->w1)
                             : "memory");
 #else
-               while (__atomic_load_n((int64_t *)&bkt->w1, __ATOMIC_RELAXED) <
-                      0)
+               while (rte_atomic_load_explicit((int64_t __rte_atomic 
*)&bkt->w1,
+                                               rte_memory_order_relaxed) < 0)
                        ;
 #endif
                goto __retry;
@@ -408,15 +408,14 @@ cnxk_tim_add_entry_mp(struct cnxk_tim_ring *const 
tim_ring,
                *chunk = *pent;
                if (cnxk_tim_bkt_fetch_lock(lock_sema)) {
                        do {
-                               lock_sema = __atomic_load_n(&bkt->w1,
-                                                           __ATOMIC_RELAXED);
+                               lock_sema = rte_atomic_load_explicit(&bkt->w1,
+                                                                    
rte_memory_order_relaxed);
                        } while (cnxk_tim_bkt_fetch_lock(lock_sema) - 1);
                }
-               rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+               rte_atomic_thread_fence(rte_memory_order_acquire);
                mirr_bkt->current_chunk = (uintptr_t)chunk;
-               __atomic_store_n(&bkt->chunk_remainder,
-                                tim_ring->nb_chunk_slots - 1,
-                                __ATOMIC_RELEASE);
+               rte_atomic_store_explicit(&bkt->chunk_remainder, 
tim_ring->nb_chunk_slots - 1,
+                                         rte_memory_order_release);
        } else {
                chunk = (struct cnxk_tim_ent *)mirr_bkt->current_chunk;
                chunk += tim_ring->nb_chunk_slots - rem;
@@ -489,8 +488,8 @@ cnxk_tim_add_entry_brst(struct cnxk_tim_ring *const 
tim_ring,
                                     : "memory");
 #else
                        do {
-                               hbt_state = __atomic_load_n(&bkt->w1,
-                                                           __ATOMIC_RELAXED);
+                               hbt_state = rte_atomic_load_explicit(&bkt->w1,
+                                                                    
rte_memory_order_relaxed);
                        } while (hbt_state & BIT_ULL(33));
 #endif

@@ -521,7 +520,7 @@ cnxk_tim_add_entry_brst(struct cnxk_tim_ring *const 
tim_ring,
                             : [lock] "r"(&bkt->lock)
                             : "memory");
 #else
-               while (__atomic_load_n(&bkt->lock, __ATOMIC_RELAXED))
+               while (rte_atomic_load_explicit(&bkt->lock, 
rte_memory_order_relaxed))
                        ;
 #endif
                goto __retry;
diff --git a/drivers/event/cnxk/cnxk_worker.h b/drivers/event/cnxk/cnxk_worker.h
index 0e0d728ba4..3592344e04 100644
--- a/drivers/event/cnxk/cnxk_worker.h
+++ b/drivers/event/cnxk/cnxk_worker.h
@@ -33,7 +33,8 @@ cnxk_sso_hws_swtag_desched(uint32_t tag, uint8_t new_tt, 
uint16_t grp,
        uint64_t val;

        val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
-       __atomic_store_n((uint64_t *)swtag_desched_op, val, __ATOMIC_RELEASE);
+       rte_atomic_store_explicit((uint64_t __rte_atomic *)swtag_desched_op, 
val,
+                                 rte_memory_order_release);
 }

 static __rte_always_inline void
diff --git a/drivers/net/cnxk/cn9k_ethdev.h b/drivers/net/cnxk/cn9k_ethdev.h
index 4933954c33..c0e649655d 100644
--- a/drivers/net/cnxk/cn9k_ethdev.h
+++ b/drivers/net/cnxk/cn9k_ethdev.h
@@ -11,7 +11,7 @@
 struct cn9k_eth_txq {
        uint64_t send_hdr_w0;
        int64_t fc_cache_pkts;
-       uint64_t *fc_mem;
+       uint64_t __rte_atomic *fc_mem;
        void *lmt_addr;
        rte_iova_t io_addr;
        uint64_t lso_tun_fmt;
--
2.25.1

Reply via email to