KCSAN instruments calls to atomic builtins, and will in turn call these
builtins itself. As such, architectures supporting KCSAN must have
compiler support for these atomic primitives.

Since 32-bit systems are unlikely to have 64-bit compiler builtins,
provide a stub for each missing builtin, and use BUG() to assert
unreachability.

In commit 725aea873261 ("xtensa: enable KCSAN"), xtensa implements these
locally, but does not advertise the fact with preprocessor macros. To
avoid production of duplicate symbols, do not build the stubs on xtensa.
A future patch will remove the xtensa implementation of these stubs.

Signed-off-by: Rohan McLure <rmcl...@linux.ibm.com>
---
v4: New patch
---
 kernel/kcsan/Makefile |  3 ++
 kernel/kcsan/stubs.c  | 78 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 81 insertions(+)
 create mode 100644 kernel/kcsan/stubs.c

diff --git a/kernel/kcsan/Makefile b/kernel/kcsan/Makefile
index 8cf70f068d92..5dfc5825aae9 100644
--- a/kernel/kcsan/Makefile
+++ b/kernel/kcsan/Makefile
@@ -12,6 +12,9 @@ CFLAGS_core.o := $(call cc-option,-fno-conserve-stack) \
        -fno-stack-protector -DDISABLE_BRANCH_PROFILING
 
 obj-y := core.o debugfs.o report.o
+ifndef XTENSA
+       obj-y += stubs.o
+endif
 
 KCSAN_INSTRUMENT_BARRIERS_selftest.o := y
 obj-$(CONFIG_KCSAN_SELFTEST) += selftest.o
diff --git a/kernel/kcsan/stubs.c b/kernel/kcsan/stubs.c
new file mode 100644
index 000000000000..ec5cd99be422
--- /dev/null
+++ b/kernel/kcsan/stubs.c
@@ -0,0 +1,78 @@
+// SPDX-License Identifier: GPL-2.0
+
+#include <linux/bug.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_32BIT
+
+#if !__has_builtin(__atomic_store_8)
+void __atomic_store_8(volatile void *p, u64 v, int i)
+{
+       BUG();
+}
+#endif
+
+#if !__has_builtin(__atomic_load_8)
+u64 __atomic_load_8(const volatile void *p, int i)
+{
+       BUG();
+}
+#endif
+
+#if !__has_builtin(__atomic_exchange_8)
+u64 __atomic_exchange_8(volatile void *p, u64 v, int i)
+{
+       BUG();
+}
+#endif
+
+#if !__has_builtin(__atomic_compare_exchange_8)
+bool __atomic_compare_exchange_8(volatile void *p1, void *p2, u64 v, bool b, 
int i1, int i2)
+{
+       BUG();
+}
+#endif
+
+#if !__has_builtin(__atomic_fetch_add_8)
+u64 __atomic_fetch_add_8(volatile void *p, u64 v, int i)
+{
+       BUG();
+}
+#endif
+
+#if !__has_builtin(__atomic_fetch_sub_8)
+u64 __atomic_fetch_sub_8(volatile void *p, u64 v, int i)
+{
+       BUG();
+}
+#endif
+
+#if !__has_builtin(__atomic_fetch_and_8)
+u64 __atomic_fetch_and_8(volatile void *p, u64 v, int i)
+{
+       BUG();
+}
+#endif
+
+#if !__has_builtin(__atomic_fetch_or_8)
+u64 __atomic_fetch_or_8(volatile void *p, u64 v, int i)
+{
+       BUG();
+}
+#endif
+
+#if !__has_builtin(__atomic_fetch_xor_8)
+u64 __atomic_fetch_xor_8(volatile void *p, u64 v, int i)
+{
+       BUG();
+}
+#endif
+
+#if !__has_builtin(__atomic_fetch_nand_8)
+u64 __atomic_fetch_nand_8(volatile void *p, u64 v, int i)
+{
+       BUG();
+}
+#endif
+
+#endif /* CONFIG_32BIT */
-- 
2.37.2

Reply via email to