Annotate memory barriers *mb() with calls to kcsan_mb(), signaling to
compilers supporting KCSAN that the respective memory barrier has been
issued. Rename memory barrier *mb() to __*mb() to opt in for
asm-generic/barrier.h to generate the respective *mb() macro.

Signed-off-by: Rohan McLure <rmcl...@linux.ibm.com>
---
 arch/powerpc/include/asm/barrier.h | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/include/asm/barrier.h 
b/arch/powerpc/include/asm/barrier.h
index e80b2c0e9315..f51f4be5fa4e 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -35,9 +35,9 @@
  * However, on CPUs that don't support lwsync, lwsync actually maps to a
  * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
  */
-#define mb()   __asm__ __volatile__ ("sync" : : : "memory")
-#define rmb()  __asm__ __volatile__ ("sync" : : : "memory")
-#define wmb()  __asm__ __volatile__ ("sync" : : : "memory")
+#define __mb()   __asm__ __volatile__ ("sync" : : : "memory")
+#define __rmb()  __asm__ __volatile__ ("sync" : : : "memory")
+#define __wmb()  __asm__ __volatile__ ("sync" : : : "memory")
 
 /* The sub-arch has lwsync */
 #if defined(CONFIG_PPC64) || defined(CONFIG_PPC_E500MC)
@@ -51,8 +51,8 @@
 /* clang defines this macro for a builtin, which will not work with runtime 
patching */
 #undef __lwsync
 #define __lwsync()     __asm__ __volatile__ (stringify_in_c(LWSYNC) : : 
:"memory")
-#define dma_rmb()      __lwsync()
-#define dma_wmb()      __asm__ __volatile__ (stringify_in_c(SMPWMB) : : 
:"memory")
+#define __dma_rmb()    __lwsync()
+#define __dma_wmb()    __asm__ __volatile__ (stringify_in_c(SMPWMB) : : 
:"memory")
 
 #define __smp_lwsync() __lwsync()
 
-- 
2.37.2

Reply via email to