This gives trylock slightly more strength, and it also gives most of the benefit of passing 'val' back through the slowpath without the complexity. --- arch/powerpc/include/asm/qspinlock.h | 39 +++++++++++++++++++++++++++- arch/powerpc/lib/qspinlock.c | 9 +++++++ 2 files changed, 47 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h index 44601b261e08..d3d2039237b2 100644 --- a/arch/powerpc/include/asm/qspinlock.h +++ b/arch/powerpc/include/asm/qspinlock.h @@ -5,6 +5,8 @@ #include <linux/compiler.h> #include <asm/qspinlock_types.h> +#define _Q_SPIN_TRY_LOCK_STEAL 1 + static __always_inline int queued_spin_is_locked(struct qspinlock *lock) { return READ_ONCE(lock->val); @@ -26,11 +28,12 @@ static __always_inline u32 queued_spin_get_locked_val(void) return _Q_LOCKED_VAL | (smp_processor_id() << _Q_OWNER_CPU_OFFSET); } -static __always_inline int queued_spin_trylock(struct qspinlock *lock) +static __always_inline int __queued_spin_trylock_nosteal(struct qspinlock *lock) { u32 new = queued_spin_get_locked_val(); u32 prev; + /* Trylock succeeds only when unlocked and no queued nodes */ asm volatile( "1: lwarx %0,0,%1,%3 # queued_spin_trylock \n" " cmpwi 0,%0,0 \n" @@ -49,6 +52,40 @@ static __always_inline int queued_spin_trylock(struct qspinlock *lock) return 0; } +static __always_inline int __queued_spin_trylock_steal(struct qspinlock *lock) +{ + u32 new = queued_spin_get_locked_val(); + u32 prev, tmp; + + /* Trylock may get ahead of queued nodes if it finds unlocked */ + asm volatile( +"1: lwarx %0,0,%2,%5 # queued_spin_trylock \n" +" andc. %1,%0,%4 \n" +" bne- 2f \n" +" and %1,%0,%4 \n" +" or %1,%1,%3 \n" +" stwcx. %1,0,%2 \n" +" bne- 1b \n" +"\t" PPC_ACQUIRE_BARRIER " \n" +"2: \n" + : "=&r" (prev), "=&r" (tmp) + : "r" (&lock->val), "r" (new), "r" (_Q_TAIL_CPU_MASK), + "i" (IS_ENABLED(CONFIG_PPC64) ? 1 : 0) + : "cr0", "memory"); + + if (likely(!(prev & ~_Q_TAIL_CPU_MASK))) + return 1; + return 0; +} + +static __always_inline int queued_spin_trylock(struct qspinlock *lock) +{ + if (!_Q_SPIN_TRY_LOCK_STEAL) + return __queued_spin_trylock_nosteal(lock); + else + return __queued_spin_trylock_steal(lock); +} + void queued_spin_lock_slowpath(struct qspinlock *lock); static __always_inline void queued_spin_lock(struct qspinlock *lock) diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c index ce0563c56915..d67b923e4f98 100644 --- a/arch/powerpc/lib/qspinlock.c +++ b/arch/powerpc/lib/qspinlock.c @@ -24,7 +24,11 @@ struct qnodes { /* Tuning parameters */ static int STEAL_SPINS __read_mostly = (1<<5); +#if _Q_SPIN_TRY_LOCK_STEAL == 1 +static const bool MAYBE_STEALERS = true; +#else static bool MAYBE_STEALERS __read_mostly = true; +#endif static int HEAD_SPINS __read_mostly = (1<<13); static bool pv_yield_owner __read_mostly = true; @@ -505,6 +509,10 @@ void pv_spinlocks_init(void) #include <linux/debugfs.h> static int steal_spins_set(void *data, u64 val) { +#if _Q_SPIN_TRY_LOCK_STEAL == 1 + /* MAYBE_STEAL remains true */ + STEAL_SPINS = val; +#else static DEFINE_MUTEX(lock); mutex_lock(&lock); @@ -522,6 +530,7 @@ static int steal_spins_set(void *data, u64 val) STEAL_SPINS = val; } mutex_unlock(&lock); +#endif return 0; } -- 2.35.1