Add arch_try_cmpxchg64(), similar to arch_try_cmpxchg(), that
operates with 64 bit operands. This function provides the same
interface for 32 bit and 64 bit targets.

v2: Use correct #ifdef.

Signed-off-by: Uros Bizjak <ubiz...@gmail.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Ingo Molnar <mi...@redhat.com>
Cc: Borislav Petkov <b...@alien8.de>
Cc: "H. Peter Anvin" <h...@zytor.com>
---
 arch/x86/include/asm/cmpxchg_32.h | 64 ++++++++++++++++++++++++++-----
 arch/x86/include/asm/cmpxchg_64.h |  6 +++
 2 files changed, 60 insertions(+), 10 deletions(-)

diff --git a/arch/x86/include/asm/cmpxchg_32.h 
b/arch/x86/include/asm/cmpxchg_32.h
index 0a7fe0321613..f3684c0413b1 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -35,15 +35,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 value)
                     : "memory");
 }
 
-#ifdef CONFIG_X86_CMPXCHG64
-#define arch_cmpxchg64(ptr, o, n)                                      \
-       ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
-                                        (unsigned long long)(n)))
-#define arch_cmpxchg64_local(ptr, o, n)                                        
\
-       ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
-                                              (unsigned long long)(n)))
-#endif
-
 static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
 {
        u64 prev;
@@ -70,7 +61,40 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 
old, u64 new)
        return prev;
 }
 
-#ifndef CONFIG_X86_CMPXCHG64
+#ifdef CONFIG_X86_CMPXCHG64
+#define arch_cmpxchg64(ptr, o, n)                                      \
+       ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
+                                        (unsigned long long)(n)))
+#define arch_cmpxchg64_local(ptr, o, n)                                        
\
+       ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
+
+#define __raw_try_cmpxchg64(_ptr, _pold, _new, lock)           \
+({                                                             \
+       bool success;                                           \
+       __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);      \
+       __typeof__(*(_ptr)) __old = *_old;                      \
+       __typeof__(*(_ptr)) __new = (_new);                     \
+       asm volatile(lock "cmpxchg8b %1"                        \
+                    CC_SET(z)                                  \
+                    : CC_OUT(z) (success),                     \
+                      "+m" (*_ptr),                            \
+                      "+A" (__old)                             \
+                    : "b" ((unsigned int)__new),               \
+                      "c" ((unsigned int)(__new>>32))          \
+                    : "memory");                               \
+       if (unlikely(!success))                                 \
+               *_old = __old;                                  \
+       likely(success);                                        \
+})
+
+#define __try_cmpxchg64(ptr, pold, new)                                \
+       __raw_try_cmpxchg64((ptr), (pold), (new), LOCK_PREFIX)
+
+#define arch_try_cmpxchg64(ptr, pold, new)                     \
+       __try_cmpxchg64((ptr), (pold), (new))
+
+#else
+
 /*
  * Building a kernel capable running on 80386 and 80486. It may be necessary
  * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
@@ -108,6 +132,26 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 
old, u64 new)
                       : "memory");                             \
        __ret; })
 
+#define arch_try_cmpxchg64(ptr, po, n)                         \
+({                                                             \
+       bool success;                                           \
+       __typeof__(ptr) _old = (__typeof__(ptr))(po);           \
+       __typeof__(*(ptr)) __old = *_old;                       \
+       __typeof__(*(ptr)) __new = (n);                         \
+       alternative_io(LOCK_PREFIX_HERE                         \
+                       "call cmpxchg8b_emu",                   \
+                       "lock; cmpxchg8b (%%esi)" ,             \
+                      X86_FEATURE_CX8,                         \
+                      "+A" (__old),                            \
+                      "S" ((ptr)),                             \
+                      "b" ((unsigned int)__new),               \
+                      "c" ((unsigned int)(__new>>32))          \
+                      : "memory");                             \
+       success = (__old == *_old);                             \
+       if (unlikely(!success))                                 \
+               *_old = __old;                                  \
+       likely(success);                                        \
+})
 #endif
 
 #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8)
diff --git a/arch/x86/include/asm/cmpxchg_64.h 
b/arch/x86/include/asm/cmpxchg_64.h
index 072e5459fe2f..250187ac8248 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -19,6 +19,12 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
        arch_cmpxchg_local((ptr), (o), (n));                            \
 })
 
+#define arch_try_cmpxchg64(ptr, po, n)                                 \
+({                                                                     \
+       BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
+       arch_try_cmpxchg((ptr), (po), (n));                             \
+})
+
 #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16)
 
 #endif /* _ASM_X86_CMPXCHG_64_H */
-- 
2.26.2

Reply via email to