Commit-ID: 08645077b7f9f7824dbaf1959b0e014a894c8acc Gitweb: http://git.kernel.org/tip/08645077b7f9f7824dbaf1959b0e014a894c8acc Author: Nikolay Borisov <n.borisov.l...@gmail.com> AuthorDate: Mon, 26 Sep 2016 21:11:18 +0300 Committer: Ingo Molnar <mi...@kernel.org> CommitDate: Fri, 30 Sep 2016 10:56:01 +0200
x86/cmpxchg, locking/atomics: Remove superfluous definitions cmpxchg contained definitions for unused (x)add_* operations, dating back to the original ticket spinlock implementation. Nowadays these are unused so remove them. Signed-off-by: Nikolay Borisov <n.borisov.l...@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org> Cc: Andrew Morton <a...@linux-foundation.org> Cc: Linus Torvalds <torva...@linux-foundation.org> Cc: Paul E. McKenney <paul...@linux.vnet.ibm.com> Cc: Peter Zijlstra <pet...@infradead.org> Cc: Thomas Gleixner <t...@linutronix.de> Cc: h...@zytor.com Link: http://lkml.kernel.org/r/1474913478-17757-1-git-send-email-n.borisov.l...@gmail.com Signed-off-by: Ingo Molnar <mi...@kernel.org> --- arch/x86/include/asm/cmpxchg.h | 44 ------------------------------------------ 1 file changed, 44 deletions(-) diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index 9733361..97848cd 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h @@ -158,53 +158,9 @@ extern void __add_wrong_size(void) * value of "*ptr". * * xadd() is locked when multiple CPUs are online - * xadd_sync() is always locked - * xadd_local() is never locked */ #define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock) #define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX) -#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ") -#define xadd_local(ptr, inc) __xadd((ptr), (inc), "") - -#define __add(ptr, inc, lock) \ - ({ \ - __typeof__ (*(ptr)) __ret = (inc); \ - switch (sizeof(*(ptr))) { \ - case __X86_CASE_B: \ - asm volatile (lock "addb %b1, %0\n" \ - : "+m" (*(ptr)) : "qi" (inc) \ - : "memory", "cc"); \ - break; \ - case __X86_CASE_W: \ - asm volatile (lock "addw %w1, %0\n" \ - : "+m" (*(ptr)) : "ri" (inc) \ - : "memory", "cc"); \ - break; \ - case __X86_CASE_L: \ - asm volatile (lock "addl %1, %0\n" \ - : "+m" (*(ptr)) : "ri" (inc) \ - : "memory", "cc"); \ - break; \ - case __X86_CASE_Q: \ - asm volatile (lock "addq %1, %0\n" \ - : "+m" (*(ptr)) : "ri" (inc) \ - : "memory", "cc"); \ - break; \ - default: \ - __add_wrong_size(); \ - } \ - __ret; \ - }) - -/* - * add_*() adds "inc" to "*ptr" - * - * __add() takes a lock prefix - * add_smp() is locked when multiple CPUs are online - * add_sync() is always locked - */ -#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX) -#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ") #define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \ ({ \