From: John David Anglin <dave.ang...@bell.net>

commit e96ebd589debd9a6a793608c4ec7019c38785dea upstream.

This patch implements the __smp_store_release and __smp_load_acquire barriers
using ordered stores and loads.  This avoids the sync instruction present in
the generic implementation.

Cc: <sta...@vger.kernel.org> # 4.14+
Signed-off-by: Dave Anglin <dave.ang...@bell.net>
Signed-off-by: Helge Deller <del...@gmx.de>
Signed-off-by: Greg Kroah-Hartman <gre...@linuxfoundation.org>

---
 arch/parisc/include/asm/barrier.h |   61 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 61 insertions(+)

--- a/arch/parisc/include/asm/barrier.h
+++ b/arch/parisc/include/asm/barrier.h
@@ -26,6 +26,67 @@
 #define __smp_rmb()    mb()
 #define __smp_wmb()    mb()
 
+#define __smp_store_release(p, v)                                      \
+do {                                                                   \
+       typeof(p) __p = (p);                                            \
+        union { typeof(*p) __val; char __c[1]; } __u =                 \
+                { .__val = (__force typeof(*p)) (v) };                 \
+       compiletime_assert_atomic_type(*p);                             \
+       switch (sizeof(*p)) {                                           \
+       case 1:                                                         \
+               asm volatile("stb,ma %0,0(%1)"                          \
+                               : : "r"(*(__u8 *)__u.__c), "r"(__p)     \
+                               : "memory");                            \
+               break;                                                  \
+       case 2:                                                         \
+               asm volatile("sth,ma %0,0(%1)"                          \
+                               : : "r"(*(__u16 *)__u.__c), "r"(__p)    \
+                               : "memory");                            \
+               break;                                                  \
+       case 4:                                                         \
+               asm volatile("stw,ma %0,0(%1)"                          \
+                               : : "r"(*(__u32 *)__u.__c), "r"(__p)    \
+                               : "memory");                            \
+               break;                                                  \
+       case 8:                                                         \
+               if (IS_ENABLED(CONFIG_64BIT))                           \
+                       asm volatile("std,ma %0,0(%1)"                  \
+                               : : "r"(*(__u64 *)__u.__c), "r"(__p)    \
+                               : "memory");                            \
+               break;                                                  \
+       }                                                               \
+} while (0)
+
+#define __smp_load_acquire(p)                                          \
+({                                                                     \
+       union { typeof(*p) __val; char __c[1]; } __u;                   \
+       typeof(p) __p = (p);                                            \
+       compiletime_assert_atomic_type(*p);                             \
+       switch (sizeof(*p)) {                                           \
+       case 1:                                                         \
+               asm volatile("ldb,ma 0(%1),%0"                          \
+                               : "=r"(*(__u8 *)__u.__c) : "r"(__p)     \
+                               : "memory");                            \
+               break;                                                  \
+       case 2:                                                         \
+               asm volatile("ldh,ma 0(%1),%0"                          \
+                               : "=r"(*(__u16 *)__u.__c) : "r"(__p)    \
+                               : "memory");                            \
+               break;                                                  \
+       case 4:                                                         \
+               asm volatile("ldw,ma 0(%1),%0"                          \
+                               : "=r"(*(__u32 *)__u.__c) : "r"(__p)    \
+                               : "memory");                            \
+               break;                                                  \
+       case 8:                                                         \
+               if (IS_ENABLED(CONFIG_64BIT))                           \
+                       asm volatile("ldd,ma 0(%1),%0"                  \
+                               : "=r"(*(__u64 *)__u.__c) : "r"(__p)    \
+                               : "memory");                            \
+               break;                                                  \
+       }                                                               \
+       __u.__val;                                                      \
+})
 #include <asm-generic/barrier.h>
 
 #endif /* !__ASSEMBLY__ */


Reply via email to