Commit-ID:  79d442461df7478cdd0c50d9b8a76f431f150fa3
Gitweb:     https://git.kernel.org/tip/79d442461df7478cdd0c50d9b8a76f431f150fa3
Author:     Andrea Parri <parri.and...@gmail.com>
AuthorDate: Thu, 22 Feb 2018 10:24:29 +0100
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Fri, 23 Feb 2018 08:38:15 +0100

locking/xchg/alpha: Clean up barrier usage by using smp_mb() in place of 
__ASM__MB

Replace each occurrence of __ASM__MB with a (trailing) smp_mb() in
xchg(), cmpxchg(), and remove the now unused __ASM__MB definitions;
this improves readability, with no additional synchronization cost.

Suggested-by: Will Deacon <will.dea...@arm.com>
Signed-off-by: Andrea Parri <parri.and...@gmail.com>
Acked-by: Paul E. McKenney <paul...@linux.vnet.ibm.com>
Cc: Alan Stern <st...@rowland.harvard.edu>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Ivan Kokshaysky <i...@jurassic.park.msu.ru>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Matt Turner <matts...@gmail.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Richard Henderson <r...@twiddle.net>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: linux-al...@vger.kernel.org
Link: 
http://lkml.kernel.org/r/1519291469-5702-1-git-send-email-parri.and...@gmail.com
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 arch/alpha/include/asm/cmpxchg.h |  6 ------
 arch/alpha/include/asm/xchg.h    | 16 ++++++++--------
 2 files changed, 8 insertions(+), 14 deletions(-)

diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
index 46ebf14aed4e..8a2b331e43fe 100644
--- a/arch/alpha/include/asm/cmpxchg.h
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -6,7 +6,6 @@
  * Atomic exchange routines.
  */
 
-#define __ASM__MB
 #define ____xchg(type, args...)                __xchg ## type ## _local(args)
 #define ____cmpxchg(type, args...)     __cmpxchg ## type ## _local(args)
 #include <asm/xchg.h>
@@ -33,10 +32,6 @@
        cmpxchg_local((ptr), (o), (n));                                 \
 })
 
-#ifdef CONFIG_SMP
-#undef __ASM__MB
-#define __ASM__MB      "\tmb\n"
-#endif
 #undef ____xchg
 #undef ____cmpxchg
 #define ____xchg(type, args...)                __xchg ##type(args)
@@ -64,7 +59,6 @@
        cmpxchg((ptr), (o), (n));                                       \
 })
 
-#undef __ASM__MB
 #undef ____cmpxchg
 
 #endif /* _ALPHA_CMPXCHG_H */
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
index e2660866ce97..e1facf6fc244 100644
--- a/arch/alpha/include/asm/xchg.h
+++ b/arch/alpha/include/asm/xchg.h
@@ -28,12 +28,12 @@ ____xchg(_u8, volatile char *m, unsigned long val)
        "       or      %1,%2,%2\n"
        "       stq_c   %2,0(%3)\n"
        "       beq     %2,2f\n"
-               __ASM__MB
        ".subsection 2\n"
        "2:     br      1b\n"
        ".previous"
        : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
        : "r" ((long)m), "1" (val) : "memory");
+       smp_mb();
 
        return ret;
 }
@@ -52,12 +52,12 @@ ____xchg(_u16, volatile short *m, unsigned long val)
        "       or      %1,%2,%2\n"
        "       stq_c   %2,0(%3)\n"
        "       beq     %2,2f\n"
-               __ASM__MB
        ".subsection 2\n"
        "2:     br      1b\n"
        ".previous"
        : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
        : "r" ((long)m), "1" (val) : "memory");
+       smp_mb();
 
        return ret;
 }
@@ -72,12 +72,12 @@ ____xchg(_u32, volatile int *m, unsigned long val)
        "       bis $31,%3,%1\n"
        "       stl_c %1,%2\n"
        "       beq %1,2f\n"
-               __ASM__MB
        ".subsection 2\n"
        "2:     br 1b\n"
        ".previous"
        : "=&r" (val), "=&r" (dummy), "=m" (*m)
        : "rI" (val), "m" (*m) : "memory");
+       smp_mb();
 
        return val;
 }
@@ -92,12 +92,12 @@ ____xchg(_u64, volatile long *m, unsigned long val)
        "       bis $31,%3,%1\n"
        "       stq_c %1,%2\n"
        "       beq %1,2f\n"
-               __ASM__MB
        ".subsection 2\n"
        "2:     br 1b\n"
        ".previous"
        : "=&r" (val), "=&r" (dummy), "=m" (*m)
        : "rI" (val), "m" (*m) : "memory");
+       smp_mb();
 
        return val;
 }
@@ -150,12 +150,12 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, 
unsigned char new)
        "       stq_c   %2,0(%4)\n"
        "       beq     %2,3f\n"
        "2:\n"
-               __ASM__MB
        ".subsection 2\n"
        "3:     br      1b\n"
        ".previous"
        : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
        : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+       smp_mb();
 
        return prev;
 }
@@ -177,12 +177,12 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, 
unsigned short new)
        "       stq_c   %2,0(%4)\n"
        "       beq     %2,3f\n"
        "2:\n"
-               __ASM__MB
        ".subsection 2\n"
        "3:     br      1b\n"
        ".previous"
        : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
        : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+       smp_mb();
 
        return prev;
 }
@@ -200,12 +200,12 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
        "       stl_c %1,%2\n"
        "       beq %1,3f\n"
        "2:\n"
-               __ASM__MB
        ".subsection 2\n"
        "3:     br 1b\n"
        ".previous"
        : "=&r"(prev), "=&r"(cmp), "=m"(*m)
        : "r"((long) old), "r"(new), "m"(*m) : "memory");
+       smp_mb();
 
        return prev;
 }
@@ -223,12 +223,12 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, 
unsigned long new)
        "       stq_c %1,%2\n"
        "       beq %1,3f\n"
        "2:\n"
-               __ASM__MB
        ".subsection 2\n"
        "3:     br 1b\n"
        ".previous"
        : "=&r"(prev), "=&r"(cmp), "=m"(*m)
        : "r"((long) old), "r"(new), "m"(*m) : "memory");
+       smp_mb();
 
        return prev;
 }

Reply via email to