Instead of polling for mcslock to be updated, use wait event scheme
for this case.

Signed-off-by: Feifei Wang <feifei.wa...@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.w...@arm.com>
---
 lib/eal/include/generic/rte_mcslock.h | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/lib/eal/include/generic/rte_mcslock.h 
b/lib/eal/include/generic/rte_mcslock.h
index 9f323bd2a2..c8d1c4f38f 100644
--- a/lib/eal/include/generic/rte_mcslock.h
+++ b/lib/eal/include/generic/rte_mcslock.h
@@ -84,8 +84,7 @@ rte_mcslock_lock(rte_mcslock_t **msl, rte_mcslock_t *me)
         * to spin on me->locked until the previous lock holder resets
         * the me->locked using mcslock_unlock().
         */
-       while (__atomic_load_n(&me->locked, __ATOMIC_ACQUIRE))
-               rte_pause();
+       rte_wait_event_32(&me->locked, INT_MAX, 0, !=, __ATOMIC_ACQUIRE);
 }
 
 /**
@@ -117,8 +116,13 @@ rte_mcslock_unlock(rte_mcslock_t **msl, rte_mcslock_t *me)
                /* More nodes added to the queue by other CPUs.
                 * Wait until the next pointer is set.
                 */
-               while (__atomic_load_n(&me->next, __ATOMIC_RELAXED) == NULL)
-                       rte_pause();
+#ifdef RTE_ARCH_32
+               rte_wait_event_32((uint32_t *)&me->next, UINT_MAX, 0, ==,
+                               __ATOMIC_RELAXED);
+#else
+               rte_wait_event_64((uint64_t *)&me->next, ULONG_MAX, 0, ==,
+                               __ATOMIC_RELAXED);
+#endif
        }
 
        /* Pass lock to next waiter. */
-- 
2.25.1

Reply via email to