Commit-ID: 2aa79af64263190eec610422b07f60e99a7d230a Gitweb: http://git.kernel.org/tip/2aa79af64263190eec610422b07f60e99a7d230a Author: Peter Zijlstra (Intel) <pet...@infradead.org> AuthorDate: Fri, 24 Apr 2015 14:56:36 -0400 Committer: Ingo Molnar <mi...@kernel.org> CommitDate: Fri, 8 May 2015 12:36:58 +0200
locking/qspinlock: Revert to test-and-set on hypervisors When we detect a hypervisor (!paravirt, see qspinlock paravirt support patches), revert to a simple test-and-set lock to avoid the horrors of queue preemption. Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org> Signed-off-by: Waiman Long <waiman.l...@hp.com> Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org> Cc: Andrew Morton <a...@linux-foundation.org> Cc: Boris Ostrovsky <boris.ostrov...@oracle.com> Cc: Borislav Petkov <b...@alien8.de> Cc: Daniel J Blueman <dan...@numascale.com> Cc: David Vrabel <david.vra...@citrix.com> Cc: Douglas Hatch <doug.ha...@hp.com> Cc: H. Peter Anvin <h...@zytor.com> Cc: Konrad Rzeszutek Wilk <konrad.w...@oracle.com> Cc: Linus Torvalds <torva...@linux-foundation.org> Cc: Oleg Nesterov <o...@redhat.com> Cc: Paolo Bonzini <paolo.bonz...@gmail.com> Cc: Paul E. McKenney <paul...@linux.vnet.ibm.com> Cc: Peter Zijlstra <pet...@infradead.org> Cc: Raghavendra K T <raghavendra...@linux.vnet.ibm.com> Cc: Rik van Riel <r...@redhat.com> Cc: Scott J Norton <scott.nor...@hp.com> Cc: Thomas Gleixner <t...@linutronix.de> Cc: virtualizat...@lists.linux-foundation.org Cc: xen-de...@lists.xenproject.org Link: http://lkml.kernel.org/r/1429901803-29771-8-git-send-email-waiman.l...@hp.com Signed-off-by: Ingo Molnar <mi...@kernel.org> --- arch/x86/include/asm/qspinlock.h | 14 ++++++++++++++ include/asm-generic/qspinlock.h | 7 +++++++ kernel/locking/qspinlock.c | 3 +++ 3 files changed, 24 insertions(+) diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index e2aee82..f079b70 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -1,6 +1,7 @@ #ifndef _ASM_X86_QSPINLOCK_H #define _ASM_X86_QSPINLOCK_H +#include <asm/cpufeature.h> #include <asm-generic/qspinlock_types.h> #define queued_spin_unlock queued_spin_unlock @@ -15,6 +16,19 @@ static inline void queued_spin_unlock(struct qspinlock *lock) smp_store_release((u8 *)lock, 0); } +#define virt_queued_spin_lock virt_queued_spin_lock + +static inline bool virt_queued_spin_lock(struct qspinlock *lock) +{ + if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) + return false; + + while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0) + cpu_relax(); + + return true; +} + #include <asm-generic/qspinlock.h> #endif /* _ASM_X86_QSPINLOCK_H */ diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 569abcd..83bfb87 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -111,6 +111,13 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock) cpu_relax(); } +#ifndef virt_queued_spin_lock +static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock) +{ + return false; +} +#endif + /* * Initializier */ diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 0338721..fd31a47 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -249,6 +249,9 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); + if (virt_queued_spin_lock(lock)) + return; + /* * wait for in-progress pending->locked hand-overs * -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/