Rohan McLure <rmcl...@linux.ibm.com> writes: > asm-generic/qspinlock.h provides an identical implementation of > queued_spin_lock. Remove the variant in asm/qspinlock.h.
This code has changed recently, so this patch no longer applies. See 9f61521c7a28 ("powerpc/qspinlock: powerpc qspinlock implementation") in powerpc/next. cheers > diff --git a/arch/powerpc/include/asm/qspinlock.h > b/arch/powerpc/include/asm/qspinlock.h > index b676c4fb90fd..bf5ba0f00258 100644 > --- a/arch/powerpc/include/asm/qspinlock.h > +++ b/arch/powerpc/include/asm/qspinlock.h > @@ -33,17 +33,6 @@ static inline void queued_spin_unlock(struct qspinlock > *lock) > extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); > #endif > > -static __always_inline void queued_spin_lock(struct qspinlock *lock) > -{ > - u32 val = 0; > - > - if (likely(arch_atomic_try_cmpxchg_lock(&lock->val, &val, > _Q_LOCKED_VAL))) > - return; > - > - queued_spin_lock_slowpath(lock, val); > -} > -#define queued_spin_lock queued_spin_lock > - > #ifdef CONFIG_PARAVIRT_SPINLOCKS > #define SPIN_THRESHOLD (1<<15) /* not tuned */ > > -- > 2.37.2