Commit-ID: fbfcd0199170984bd3c2812e49ed0fe7b226959a
Gitweb: https://git.kernel.org/tip/fbfcd0199170984bd3c2812e49ed0fe7b226959a
Author: Andrea Parri
AuthorDate: Tue, 27 Feb 2018 05:00:58 +0100
Committer: Ingo Molnar
CommitDate: Mon, 12 Mar 2018 10:59:03 +0100
locking/xchg/alpha: Remove superfluous memory barriers from the _local()
variants
The following two commits:
79d442461df74 ("locking/xchg/alpha: Clean up barrier usage by using smp_mb()
in place of __ASM__MB")
472e8c55cf662 ("locking/xchg/alpha: Fix xchg() and cmpxchg() memory ordering
bugs")
... ended up adding unnecessary barriers to the _local() variants on Alpha,
which the previous code took care to avoid.
Fix them by adding the smp_mb() into the cmpxchg() macro rather than into the
cmpxchg() variants.
Reported-by: Will Deacon
Signed-off-by: Andrea Parri
Cc: Alan Stern
Cc: Andrew Morton
Cc: Ivan Kokshaysky
Cc: Linus Torvalds
Cc: Matt Turner
Cc: Paul E. McKenney
Cc: Peter Zijlstra
Cc: Richard Henderson
Cc: Thomas Gleixner
Cc: linux-al...@vger.kernel.org
Fixes: 472e8c55cf662 ("locking/xchg/alpha: Fix xchg() and cmpxchg() memory
ordering bugs")
Fixes: 79d442461df74 ("locking/xchg/alpha: Clean up barrier usage by using
smp_mb() in place of __ASM__MB")
Link:
http://lkml.kernel.org/r/1519704058-13430-1-git-send-email-parri.and...@gmail.com
Signed-off-by: Ingo Molnar
---
arch/alpha/include/asm/cmpxchg.h | 20
arch/alpha/include/asm/xchg.h| 27 ---
2 files changed, 16 insertions(+), 31 deletions(-)
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
index 8a2b331e43fe..6c7c39452471 100644
--- a/arch/alpha/include/asm/cmpxchg.h
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -38,19 +38,31 @@
#define cmpxchg(type, args...) __cmpxchg ##type(args)
#include
+/*
+ * The leading and the trailing memory barriers guarantee that these
+ * operations are fully ordered.
+ */
#define xchg(ptr, x) \
({ \
+ __typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, \
-sizeof(*(ptr))); \
+ smp_mb(); \
+ __ret = (__typeof__(*(ptr)))\
+ __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
+ smp_mb(); \
+ __ret; \
})
#define cmpxchg(ptr, o, n) \
({ \
+ __typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr)));\
+ smp_mb(); \
+ __ret = (__typeof__(*(ptr))) __cmpxchg((ptr), \
+ (unsigned long)_o_, (unsigned long)_n_, sizeof(*(ptr)));\
+ smp_mb(); \
+ __ret; \
})
#define cmpxchg64(ptr, o, n) \
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
index e2b59fac5257..7adb80c6746a 100644
--- a/arch/alpha/include/asm/xchg.h
+++ b/arch/alpha/include/asm/xchg.h
@@ -12,10 +12,6 @@
* Atomic exchange.
* Since it can be used to implement critical sections
* it must clobber "memory" (also for interrupts in UP).
- *
- * The leading and the trailing memory barriers guarantee that these
- * operations are fully ordered.
- *
*/
static inline unsigned long
@@ -23,7 +19,6 @@ xchg(_u8, volatile char *m, unsigned long val)
{
unsigned long ret, tmp, addr64;
- smp_mb();
__asm__ __volatile__(
" andnot %4,7,%3\n"
" insbl %1,%4,%1\n"
@@ -38,7 +33,6 @@ xchg(_u8, volatile char *m, unsigned long val)
".previous"
: "=" (ret), "=" (val), "=" (tmp), "=" (addr64)
: "r" ((long)m), "1" (val) : "memory");
- smp_mb();
return ret;
}
@@ -48,7 +42,6 @@ xchg(_u16, volatile short *m, unsigned long val)
{
unsigned long ret, tmp, addr64;
- smp_mb();
__asm__ __volatile__(
" andnot %4,7,%3\n"
" inswl %1,%4,%1\n"
@@ -63,7 +56,6 @@ xchg(_u16,