Use rte_atomic_thread_fence wrapper which has been provided for
__atomic_thread_fence builtins to support optimized code for
__ATOMIC_SEQ_CST memory order on x86 platforms.

Signed-off-by: Joyce Kong <joyce.k...@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagaraha...@arm.com>
---
 lib/librte_rcu/rte_rcu_qsbr.h | 7 +------
 1 file changed, 1 insertion(+), 6 deletions(-)

diff --git a/lib/librte_rcu/rte_rcu_qsbr.h b/lib/librte_rcu/rte_rcu_qsbr.h
index fa2b881bd..18811c1cc 100644
--- a/lib/librte_rcu/rte_rcu_qsbr.h
+++ b/lib/librte_rcu/rte_rcu_qsbr.h
@@ -329,12 +329,7 @@ rte_rcu_qsbr_thread_online(struct rte_rcu_qsbr *v, 
unsigned int thread_id)
         * writer might not see that the reader is online, even though
         * the reader is referencing the shared data structure.
         */
-#ifdef RTE_ARCH_X86_64
-       /* rte_smp_mb() for x86 is lighter */
-       rte_smp_mb();
-#else
-       __atomic_thread_fence(__ATOMIC_SEQ_CST);
-#endif
+       rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
 }
 
 /**
-- 
2.29.2

Reply via email to