The Alpha Architecture Reference Manual states that any memory access performed between an LD_xL and a STx_C instruction may cause the store-conditional to fail unconditionally and, as such, `no useful program should do this'.
Linux is a useful program, so fix up the Alpha spinlock implementation to use logical operations rather than load-address instructions for generating immediates. Cc: Richard Henderson <r...@twiddle.net> Cc: Ivan Kokshaysky <i...@jurassic.park.msu.ru> Cc: Matt Turner <matts...@gmail.com> Signed-off-by: Will Deacon <will.dea...@arm.com> --- arch/alpha/include/asm/spinlock.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h index 3bba21e..0c357cd 100644 --- a/arch/alpha/include/asm/spinlock.h +++ b/arch/alpha/include/asm/spinlock.h @@ -29,7 +29,7 @@ static inline void arch_spin_lock(arch_spinlock_t * lock) __asm__ __volatile__( "1: ldl_l %0,%1\n" " bne %0,2f\n" - " lda %0,1\n" + " mov 1,%0\n" " stl_c %0,%1\n" " beq %0,2f\n" " mb\n" @@ -86,7 +86,7 @@ static inline void arch_write_lock(arch_rwlock_t *lock) __asm__ __volatile__( "1: ldl_l %1,%0\n" " bne %1,6f\n" - " lda %1,1\n" + " mov 1,%1\n" " stl_c %1,%0\n" " beq %1,6f\n" " mb\n" @@ -106,7 +106,7 @@ static inline int arch_read_trylock(arch_rwlock_t * lock) __asm__ __volatile__( "1: ldl_l %1,%0\n" - " lda %2,0\n" + " mov 0,%2\n" " blbs %1,2f\n" " subl %1,2,%2\n" " stl_c %2,%0\n" @@ -128,9 +128,9 @@ static inline int arch_write_trylock(arch_rwlock_t * lock) __asm__ __volatile__( "1: ldl_l %1,%0\n" - " lda %2,0\n" + " mov 0,%2\n" " bne %1,2f\n" - " lda %2,1\n" + " mov 1,%2\n" " stl_c %2,%0\n" " beq %2,6f\n" "2: mb\n" -- 1.8.2.2 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/