Simply replace rte_smp barrier with atomic threand fence.

Signed-off-by: Phil Yang <phil.y...@arm.com>
Signed-off-by: Feifei Wang <feifei.wa...@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.w...@arm.com>
---
 app/test-eventdev/test_perf_common.h | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/app/test-eventdev/test_perf_common.h 
b/app/test-eventdev/test_perf_common.h
index e7233e5a5..9785dc3e2 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -98,11 +98,11 @@ perf_process_last_stage(struct rte_mempool *const pool,
 {
        bufs[count++] = ev->event_ptr;
 
-       /* wmb here ensures event_prt is stored before
-        * updating the number of processed packets
-        * for worker lcores
+       /* release fence here ensures event_prt is
+        * stored before updating the number of
+        * processed packets for worker lcores
         */
-       rte_smp_wmb();
+       rte_atomic_thread_fence(__ATOMIC_RELEASE);
        w->processed_pkts++;
 
        if (unlikely(count == buf_sz)) {
@@ -122,11 +122,11 @@ perf_process_last_stage_latency(struct rte_mempool *const 
pool,
 
        bufs[count++] = ev->event_ptr;
 
-       /* wmb here ensures event_prt is stored before
-        * updating the number of processed packets
-        * for worker lcores
+       /* release fence here ensures event_prt is
+        * stored before updating the number of
+        * processed packets for worker lcores
         */
-       rte_smp_wmb();
+       rte_atomic_thread_fence(__ATOMIC_RELEASE);
        w->processed_pkts++;
 
        if (unlikely(count == buf_sz)) {
-- 
2.17.1

Reply via email to