On Thu, 2022-07-28 at 16:31 +1000, Nicholas Piggin wrote: [resend as utf-8, not utf-7] > Waiters spinning on the lock word should yield to the lock owner if the > vCPU is preempted. This improves performance when the hypervisor has > oversubscribed physical CPUs. > --- > arch/powerpc/lib/qspinlock.c | 97 ++++++++++++++++++++++++++++++------ > 1 file changed, 83 insertions(+), 14 deletions(-) > > diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c > index aa26cfe21f18..55286ac91da5 100644 > --- a/arch/powerpc/lib/qspinlock.c > +++ b/arch/powerpc/lib/qspinlock.c > @@ -5,6 +5,7 @@ > #include <linux/percpu.h> > #include <linux/smp.h> > #include <asm/qspinlock.h> > +#include <asm/paravirt.h> > > #define MAX_NODES 4 > > @@ -24,14 +25,16 @@ static int STEAL_SPINS __read_mostly = (1<<5); > static bool MAYBE_STEALERS __read_mostly = true; > static int HEAD_SPINS __read_mostly = (1<<8); > > +static bool pv_yield_owner __read_mostly = true;
Not macro case for these globals? To me name does not make it super clear this is a boolean. What about pv_yield_owner_enabled? > + > static DEFINE_PER_CPU_ALIGNED(struct qnodes, qnodes); > > -static __always_inline int get_steal_spins(void) > +static __always_inline int get_steal_spins(bool paravirt) > { > return STEAL_SPINS; > } > > -static __always_inline int get_head_spins(void) > +static __always_inline int get_head_spins(bool paravirt) > { > return HEAD_SPINS; > } > @@ -46,7 +49,11 @@ static inline int get_tail_cpu(u32 val) > return (val >> _Q_TAIL_CPU_OFFSET) - 1; > } > > -/* Take the lock by setting the bit, no other CPUs may concurrently lock it. > */ > +static inline int get_owner_cpu(u32 val) > +{ > + return (val & _Q_OWNER_CPU_MASK) >> _Q_OWNER_CPU_OFFSET; > +} > + > /* Take the lock by setting the lock bit, no other CPUs will touch it. */ > static __always_inline void lock_set_locked(struct qspinlock *lock) > { > @@ -180,7 +187,45 @@ static struct qnode *get_tail_qnode(struct qspinlock > *lock, u32 val) > BUG(); > } > > -static inline bool try_to_steal_lock(struct qspinlock *lock) > +static __always_inline void yield_to_locked_owner(struct qspinlock *lock, > u32 val, bool paravirt) This name doesn't seem correct for the non paravirt case. > +{ > + int owner; > + u32 yield_count; > + > + BUG_ON(!(val & _Q_LOCKED_VAL)); > + > + if (!paravirt) > + goto relax; > + > + if (!pv_yield_owner) > + goto relax; > + > + owner = get_owner_cpu(val); > + yield_count = yield_count_of(owner); > + > + if ((yield_count & 1) == 0) > + goto relax; /* owner vcpu is running */ I wonder why not use vcpu_is_preempted()? > + > + /* > + * Read the lock word after sampling the yield count. On the other side > + * there may a wmb because the yield count update is done by the > + * hypervisor preemption and the value update by the OS, however this > + * ordering might reduce the chance of out of order accesses and > + * improve the heuristic. > + */ > + smp_rmb(); > + > + if (READ_ONCE(lock->val) == val) { > + yield_to_preempted(owner, yield_count); > + /* Don't relax if we yielded. Maybe we should? */ > + return; > + } > +relax: > + cpu_relax(); > +} > + > + > +static __always_inline bool try_to_steal_lock(struct qspinlock *lock, bool > paravirt) > { > int iters; > > @@ -197,18 +242,18 @@ static inline bool try_to_steal_lock(struct qspinlock > *lock) > continue; > } > > - cpu_relax(); > + yield_to_locked_owner(lock, val, paravirt); > > iters++; > > - if (iters >= get_steal_spins()) > + if (iters >= get_steal_spins(paravirt)) > break; > } > > return false; > } > > -static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock) > +static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock > *lock, bool paravirt) > { > struct qnodes *qnodesp; > struct qnode *next, *node; > @@ -260,7 +305,7 @@ static inline void queued_spin_lock_mcs_queue(struct > qspinlock *lock) > if (!MAYBE_STEALERS) { > /* We're at the head of the waitqueue, wait for the lock. */ > while ((val = READ_ONCE(lock->val)) & _Q_LOCKED_VAL) > - cpu_relax(); > + yield_to_locked_owner(lock, val, paravirt); > > /* If we're the last queued, must clean up the tail. */ > if ((val & _Q_TAIL_CPU_MASK) == tail) { > @@ -278,10 +323,10 @@ static inline void queued_spin_lock_mcs_queue(struct > qspinlock *lock) > again: > /* We're at the head of the waitqueue, wait for the lock. */ > while ((val = READ_ONCE(lock->val)) & _Q_LOCKED_VAL) { > - cpu_relax(); > + yield_to_locked_owner(lock, val, paravirt); > > iters++; > - if (!set_mustq && iters >= get_head_spins()) { > + if (!set_mustq && iters >= get_head_spins(paravirt)) { > set_mustq = true; > lock_set_mustq(lock); > val |= _Q_MUST_Q_VAL; > @@ -320,10 +365,15 @@ static inline void queued_spin_lock_mcs_queue(struct > qspinlock *lock) > > void queued_spin_lock_slowpath(struct qspinlock *lock) > { > - if (try_to_steal_lock(lock)) > - return; > - > - queued_spin_lock_mcs_queue(lock); > + if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && is_shared_processor()) { > + if (try_to_steal_lock(lock, true)) > + return; > + queued_spin_lock_mcs_queue(lock, true); > + } else { > + if (try_to_steal_lock(lock, false)) > + return; > + queued_spin_lock_mcs_queue(lock, false); > + } > } There is not really a need for a conditional: bool paravirt = IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && is_shared_processor(); if (try_to_steal_lock(lock, paravirt)) return; queued_spin_lock_mcs_queue(lock, paravirt); The paravirt parameter used by the various functions seems always to be equivalent to (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && is_shared_processor()). I wonder if it would be simpler testing (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && is_shared_processor()) (using a helper function) in those functions instead passing it as a parameter? > EXPORT_SYMBOL(queued_spin_lock_slowpath); > > @@ -382,10 +432,29 @@ static int head_spins_get(void *data, u64 *val) > > DEFINE_SIMPLE_ATTRIBUTE(fops_head_spins, head_spins_get, head_spins_set, > "%llu\n"); > > +static int pv_yield_owner_set(void *data, u64 val) > +{ > + pv_yield_owner = !!val; > + > + return 0; > +} > + > +static int pv_yield_owner_get(void *data, u64 *val) > +{ > + *val = pv_yield_owner; > + > + return 0; > +} > + > +DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_owner, pv_yield_owner_get, > pv_yield_owner_set, "%llu\n"); > + > static __init int spinlock_debugfs_init(void) > { > debugfs_create_file("qspl_steal_spins", 0600, arch_debugfs_dir, NULL, > &fops_steal_spins); > debugfs_create_file("qspl_head_spins", 0600, arch_debugfs_dir, NULL, > &fops_head_spins); > + if (is_shared_processor()) { > + debugfs_create_file("qspl_pv_yield_owner", 0600, > arch_debugfs_dir, NULL, &fops_pv_yield_owner); > + } > > return 0; > }