There is no need for rmb(), this allows faster lwsync here.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/lib/locks.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index 6440d5943c00..47a530de733e 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -30,7 +30,7 @@ void splpar_spin_yield(arch_spinlock_t *lock)
        yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
        if ((yield_count & 1) == 0)
                return;         /* virtual cpu is currently running */
-       rmb();
+       smp_rmb();
        if (lock->slock != lock_value)
                return;         /* something has changed */
        plpar_hcall_norets(H_CONFER,
@@ -56,7 +56,7 @@ void splpar_rw_yield(arch_rwlock_t *rw)
        yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
        if ((yield_count & 1) == 0)
                return;         /* virtual cpu is currently running */
-       rmb();
+       smp_rmb();
        if (rw->lock != lock_value)
                return;         /* something has changed */
        plpar_hcall_norets(H_CONFER,
-- 
2.23.0

Reply via email to