Turn cmpxchg64() and similar functions into inline C++ template functions.
This produces more robust source as the all the casting the C macros
require is then unnecessary.

Signed-off-by: David Howells <dhowe...@redhat.com>
---

 arch/x86/include/asm/cmpxchg_64.h |   28 +++++++++++++++++-----------
 1 file changed, 17 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/cmpxchg_64.h 
b/arch/x86/include/asm/cmpxchg_64.h
index 03cad196a301..55beb7fc036d 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 -*- c++ -*- */
 #ifndef _ASM_X86_CMPXCHG_64_H
 #define _ASM_X86_CMPXCHG_64_H
 
@@ -7,17 +7,23 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
        *ptr = val;
 }
 
-#define cmpxchg64(ptr, o, n)                                           \
-({                                                                     \
-       BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
-       cmpxchg((ptr), (o), (n));                                       \
-})
+template <typename P, typename N>
+static inline P cmpxchg64(P *ptr, P old, N rep)
+{
+       P ret = old;
+       if (sizeof(P) != 8)
+               __cmpxchg_wrong_size();
+       return cmpxchg(ptr, ret, rep);
+}
 
-#define cmpxchg64_local(ptr, o, n)                                     \
-({                                                                     \
-       BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
-       cmpxchg_local((ptr), (o), (n));                                 \
-})
+template <typename P, typename N>
+static inline P cmpxchg64_local(P *ptr, P old, N rep)
+{
+       P ret = old;
+       if (sizeof(P) != 8)
+               __cmpxchg_wrong_size();
+       return cmpxchg_local(ptr, ret, rep);
+}
 
 #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16)
 

Reply via email to