__xchg will be used for non-atomic xchg macro.

Signed-off-by: Andrzej Hajda <andrzej.ha...@intel.com>
---
 arch/parisc/include/asm/cmpxchg.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/parisc/include/asm/cmpxchg.h 
b/arch/parisc/include/asm/cmpxchg.h
index 5f274be105671e..c1d776bb16b4ed 100644
--- a/arch/parisc/include/asm/cmpxchg.h
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -22,7 +22,7 @@ extern unsigned long __xchg64(unsigned long, volatile 
unsigned long *);
 
 /* optimizer better get rid of switch since size is a constant */
 static inline unsigned long
-__xchg(unsigned long x, volatile void *ptr, int size)
+__arch_xchg(unsigned long x, volatile void *ptr, int size)
 {
        switch (size) {
 #ifdef CONFIG_64BIT
@@ -49,7 +49,7 @@ __xchg(unsigned long x, volatile void *ptr, int size)
        __typeof__(*(ptr)) __ret;                                       \
        __typeof__(*(ptr)) _x_ = (x);                                   \
        __ret = (__typeof__(*(ptr)))                                    \
-               __xchg((unsigned long)_x_, (ptr), sizeof(*(ptr)));      \
+               __arch_xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \
        __ret;                                                          \
 })
 
-- 
2.34.1

Reply via email to