Give the queue head the ability to stop stealers. After a number of spins without sucessfully acquiring the lock, the queue head employs this, which will assure it is the next owner. --- arch/powerpc/include/asm/qspinlock_types.h | 10 ++++- arch/powerpc/lib/qspinlock.c | 45 +++++++++++++++++++++- 2 files changed, 52 insertions(+), 3 deletions(-)
diff --git a/arch/powerpc/include/asm/qspinlock_types.h b/arch/powerpc/include/asm/qspinlock_types.h index 210adf05b235..8b20f5e22bba 100644 --- a/arch/powerpc/include/asm/qspinlock_types.h +++ b/arch/powerpc/include/asm/qspinlock_types.h @@ -29,7 +29,8 @@ typedef struct qspinlock { * Bitfields in the lock word: * * 0: locked bit - * 16-31: tail cpu (+1) + * 16: must queue bit + * 17-31: tail cpu (+1) */ #define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\ << _Q_ ## type ## _OFFSET) @@ -38,7 +39,12 @@ typedef struct qspinlock { #define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED) #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) -#define _Q_TAIL_CPU_OFFSET 16 +#define _Q_MUST_Q_OFFSET 16 +#define _Q_MUST_Q_BITS 1 +#define _Q_MUST_Q_MASK _Q_SET_MASK(MUST_Q) +#define _Q_MUST_Q_VAL (1U << _Q_MUST_Q_OFFSET) + +#define _Q_TAIL_CPU_OFFSET 17 #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c index cb87991602ff..662a744fa1ee 100644 --- a/arch/powerpc/lib/qspinlock.c +++ b/arch/powerpc/lib/qspinlock.c @@ -22,6 +22,7 @@ struct qnodes { /* Tuning parameters */ static int STEAL_SPINS __read_mostly = (1<<5); static bool MAYBE_STEALERS __read_mostly = true; +static int HEAD_SPINS __read_mostly = (1<<13); static DEFINE_PER_CPU_ALIGNED(struct qnodes, qnodes); @@ -137,6 +138,23 @@ static __always_inline u32 publish_tail_cpu(struct qspinlock *lock, u32 tail) return prev; } +static __always_inline u32 lock_set_mustq(struct qspinlock *lock) +{ + u32 new = _Q_MUST_Q_VAL; + u32 prev; + + asm volatile( +"1: lwarx %0,0,%1 # queued_spin_set_mustq \n" +" or %0,%0,%2 \n" +" stwcx. %0,0,%1 \n" +" bne- 1b \n" + : "=&r" (prev) + : "r" (&lock->val), "r" (new) + : "cr0", "memory"); + + return prev; +} + static inline struct qnode *get_tail_qnode(struct qspinlock *lock, u32 val) { int cpu = get_tail_cpu(val); @@ -160,6 +178,9 @@ static inline bool try_to_steal_lock(struct qspinlock *lock) for (iters = 0; iters < STEAL_SPINS; iters++) { u32 val = READ_ONCE(lock->val); + if (val & _Q_MUST_Q_VAL) + break; + if (val & _Q_LOCKED_VAL) { cpu_relax(); continue; @@ -236,10 +257,14 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock) /* We must be the owner, just set the lock bit and acquire */ lock_set_locked(lock); } else { + int iters = 0; again: /* We're at the head of the waitqueue, wait for the lock. */ - while ((val = READ_ONCE(lock->val)) & _Q_LOCKED_VAL) + while ((val = READ_ONCE(lock->val)) & _Q_LOCKED_VAL) { + if (iters++ == HEAD_SPINS) + lock_set_mustq(lock); cpu_relax(); + } /* If we're the last queued, must clean up the tail. */ if ((val & _Q_TAIL_CPU_MASK) == tail) { @@ -284,6 +309,7 @@ EXPORT_SYMBOL(queued_spin_lock_slowpath); void pv_spinlocks_init(void) { STEAL_SPINS = (1<<15); + HEAD_SPINS = (1<<13); } #endif @@ -320,9 +346,26 @@ static int steal_spins_get(void *data, u64 *val) DEFINE_SIMPLE_ATTRIBUTE(fops_steal_spins, steal_spins_get, steal_spins_set, "%llu\n"); +static int head_spins_set(void *data, u64 val) +{ + HEAD_SPINS = val; + + return 0; +} + +static int head_spins_get(void *data, u64 *val) +{ + *val = HEAD_SPINS; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_head_spins, head_spins_get, head_spins_set, "%llu\n"); + static __init int spinlock_debugfs_init(void) { debugfs_create_file("qspl_steal_spins", 0600, arch_debugfs_dir, NULL, &fops_steal_spins); + debugfs_create_file("qspl_head_spins", 0600, arch_debugfs_dir, NULL, &fops_head_spins); return 0; } -- 2.35.1