__xchg will be used for non-atomic xchg macro.

Signed-off-by: Andrzej Hajda <andrzej.ha...@intel.com>
---
 arch/loongarch/include/asm/cmpxchg.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/loongarch/include/asm/cmpxchg.h 
b/arch/loongarch/include/asm/cmpxchg.h
index ecfa6cf79806e6..979fde61bba8a4 100644
--- a/arch/loongarch/include/asm/cmpxchg.h
+++ b/arch/loongarch/include/asm/cmpxchg.h
@@ -62,7 +62,7 @@ static inline unsigned int __xchg_small(volatile void *ptr, 
unsigned int val,
 }
 
 static __always_inline unsigned long
-__xchg(volatile void *ptr, unsigned long x, int size)
+__arch_xchg(volatile void *ptr, unsigned long x, int size)
 {
        switch (size) {
        case 1:
@@ -87,7 +87,7 @@ __xchg(volatile void *ptr, unsigned long x, int size)
        __typeof__(*(ptr)) __res;                                       \
                                                                        \
        __res = (__typeof__(*(ptr)))                                    \
-               __xchg((ptr), (unsigned long)(x), sizeof(*(ptr)));      \
+               __arch_xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \
                                                                        \
        __res;                                                          \
 })
-- 
2.34.1

Reply via email to