From: Peter Zijlstra <pet...@infradead.org>

When we detect a hypervisor (!paravirt, see qspinlock paravirt support
patches), revert to a simple test-and-set lock to avoid the horrors
of queue preemption.

Cc: Ingo Molnar <mi...@redhat.com>
Cc: David Vrabel <david.vra...@citrix.com>
Cc: Oleg Nesterov <o...@redhat.com>
Cc: Scott J Norton <scott.nor...@hp.com>
Cc: Paolo Bonzini <paolo.bonz...@gmail.com>
Cc: Douglas Hatch <doug.ha...@hp.com>
Cc: Konrad Rzeszutek Wilk <konrad.w...@oracle.com>
Cc: Boris Ostrovsky <boris.ostrov...@oracle.com>
Cc: "Paul E. McKenney" <paul...@linux.vnet.ibm.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: "H. Peter Anvin" <h...@zytor.com>
Cc: Rik van Riel <r...@redhat.com>
Cc: Raghavendra K T <raghavendra...@linux.vnet.ibm.com>
Signed-off-by: Waiman Long <waiman.l...@hp.com>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Link: 
http://lkml.kernel.org/r/1421784755-21945-8-git-send-email-waiman.l...@hp.com
---
 arch/x86/include/asm/qspinlock.h |   14 ++++++++++++++
 include/asm-generic/qspinlock.h  |    7 +++++++
 kernel/locking/qspinlock.c       |    3 +++
 3 files changed, 24 insertions(+)

--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -1,6 +1,7 @@
 #ifndef _ASM_X86_QSPINLOCK_H
 #define _ASM_X86_QSPINLOCK_H
 
+#include <asm/cpufeature.h>
 #include <asm-generic/qspinlock_types.h>
 
 #define        queue_spin_unlock queue_spin_unlock
@@ -15,6 +16,19 @@ static inline void queue_spin_unlock(str
        smp_store_release((u8 *)lock, 0);
 }
 
+#define virt_queue_spin_lock virt_queue_spin_lock
+
+static inline bool virt_queue_spin_lock(struct qspinlock *lock)
+{
+       if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+               return false;
+
+       while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
+               cpu_relax();
+
+       return true;
+}
+
 #include <asm-generic/qspinlock.h>
 
 #endif /* _ASM_X86_QSPINLOCK_H */
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -111,6 +111,13 @@ static inline void queue_spin_unlock_wai
                cpu_relax();
 }
 
+#ifndef virt_queue_spin_lock
+static __always_inline bool virt_queue_spin_lock(struct qspinlock *lock)
+{
+       return false;
+}
+#endif
+
 /*
  * Initializier
  */
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -259,6 +259,9 @@ void queue_spin_lock_slowpath(struct qsp
 
        BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
 
+       if (virt_queue_spin_lock(lock))
+               return;
+
        /*
         * wait for in-progress pending->locked hand-overs
         *



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

Reply via email to