Convert rte_atomic usages to compiler atomic built-ins for lcores
sync in ticketlock testcases.

Signed-off-by: Joyce Kong <joyce.k...@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.w...@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagaraha...@arm.com>
Acked-by: Stephen Hemminger <step...@networkplumber.org>
---
 app/test/test_ticketlock.c | 10 ++++------
 1 file changed, 4 insertions(+), 6 deletions(-)

diff --git a/app/test/test_ticketlock.c b/app/test/test_ticketlock.c
index 7aab8665bc..242c136478 100644
--- a/app/test/test_ticketlock.c
+++ b/app/test/test_ticketlock.c
@@ -9,7 +9,6 @@
 #include <sys/queue.h>
 #include <unistd.h>
 
-#include <rte_atomic.h>
 #include <rte_common.h>
 #include <rte_cycles.h>
 #include <rte_eal.h>
@@ -49,7 +48,7 @@ static rte_ticketlock_t tl_tab[RTE_MAX_LCORE];
 static rte_ticketlock_recursive_t tlr;
 static unsigned int count;
 
-static rte_atomic32_t synchro;
+static uint32_t synchro;
 
 static int
 test_ticketlock_per_core(__rte_unused void *arg)
@@ -112,8 +111,7 @@ load_loop_fn(void *func_param)
 
        /* wait synchro for workers */
        if (lcore != rte_get_main_lcore())
-               while (rte_atomic32_read(&synchro) == 0)
-                       ;
+               rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
 
        begin = rte_rdtsc_precise();
        while (lcore_count[lcore] < MAX_LOOP) {
@@ -155,11 +153,11 @@ test_ticketlock_perf(void)
        printf("\nTest with lock on %u cores...\n", rte_lcore_count());
 
        /* Clear synchro and start workers */
-       rte_atomic32_set(&synchro, 0);
+       __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
        rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
 
        /* start synchro and launch test on main */
-       rte_atomic32_set(&synchro, 1);
+       __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
        load_loop_fn(&lock);
 
        rte_eal_mp_wait_lcore();
-- 
2.17.1

Reply via email to