__xchg will be used for non-atomic xchg macro.

Signed-off-by: Andrzej Hajda <andrzej.ha...@intel.com>
---
 arch/arm64/include/asm/cmpxchg.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 497acf134d9923..3a36ba58e8c2ef 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -62,7 +62,7 @@ __XCHG_CASE( ,  ,  mb_, 64, dmb ish, nop,  , a, l, "memory")
 #undef __XCHG_CASE
 
 #define __XCHG_GEN(sfx)                                                        
\
-static __always_inline  unsigned long __xchg##sfx(unsigned long x,     \
+static __always_inline  unsigned long __arch_xchg##sfx(unsigned long x,        
\
                                        volatile void *ptr,             \
                                        int size)                       \
 {                                                                      \
@@ -93,7 +93,7 @@ __XCHG_GEN(_mb)
 ({                                                                     \
        __typeof__(*(ptr)) __ret;                                       \
        __ret = (__typeof__(*(ptr)))                                    \
-               __xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
+               __arch_xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
        __ret;                                                          \
 })
 
-- 
2.34.1

Reply via email to