On Thu, May 21, 2020 at 04:20:41PM +0200, Marco Elver wrote:
> Some compilers incorrectly inline small __no_kcsan functions, which then
> results in instrumenting the accesses. For this reason, the 'noinline'
> attribute was added to __no_kcsan_or_inline. All known versions of GCC
> are affected by this. Supported version of Clang are unaffected, and
> never inlines a no_sanitize function.
> 
> However, the attribute 'noinline' in __no_kcsan_or_inline causes
> unexpected code generation in functions that are __no_kcsan and call a
> __no_kcsan_or_inline function.
> 
> In certain situations it is expected that the __no_kcsan_or_inline
> function is actually inlined by the __no_kcsan function, and *no* calls
> are emitted. By removing the 'noinline' attribute we give the compiler
> the ability to inline and generate the expected code in __no_kcsan
> functions.


Doesn't this mean we can do the below?

---
 Documentation/dev-tools/kcsan.rst |  6 ------
 arch/x86/include/asm/bitops.h     |  6 +-----
 include/linux/compiler_types.h    | 14 ++++----------
 kernel/kcsan/kcsan-test.c         |  4 ++--
 4 files changed, 7 insertions(+), 23 deletions(-)

diff --git a/Documentation/dev-tools/kcsan.rst 
b/Documentation/dev-tools/kcsan.rst
index ce4bbd918648..b38379f06194 100644
--- a/Documentation/dev-tools/kcsan.rst
+++ b/Documentation/dev-tools/kcsan.rst
@@ -114,12 +114,6 @@ functions, compilation units, or entire subsystems.  For 
static blacklisting,
   To dynamically limit for which functions to generate reports, see the
   `DebugFS interface`_ blacklist/whitelist feature.
 
-  For ``__always_inline`` functions, replace ``__always_inline`` with
-  ``__no_kcsan_or_inline`` (which implies ``__always_inline``)::
-
-    static __no_kcsan_or_inline void foo(void) {
-        ...
-
 * To disable data race detection for a particular compilation unit, add to the
   ``Makefile``::
 
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 35460fef39b8..0367efdc5b7a 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -201,12 +201,8 @@ arch_test_and_change_bit(long nr, volatile unsigned long 
*addr)
        return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", 
nr);
 }
 
-static __no_kcsan_or_inline bool constant_test_bit(long nr, const volatile 
unsigned long *addr)
+static __always_inline bool constant_test_bit(long nr, const volatile unsigned 
long *addr)
 {
-       /*
-        * Because this is a plain access, we need to disable KCSAN here to
-        * avoid double instrumentation via instrumented bitops.
-        */
        return ((1UL << (nr & (BITS_PER_LONG-1))) &
                (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
 }
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 4e4982d6f3b0..6a2c0f857ac3 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -118,10 +118,6 @@ struct ftrace_likely_data {
 #define notrace                        
__attribute__((__no_instrument_function__))
 #endif
 
-/* Section for code which can't be instrumented at all */
-#define noinstr                                                                
\
-       noinline notrace __attribute((__section__(".noinstr.text")))
-
 /*
  * it doesn't make sense on ARM (currently the only user of __naked)
  * to trace naked functions because then mcount is called without
@@ -192,17 +188,15 @@ struct ftrace_likely_data {
 #endif
 
 #define __no_kcsan __no_sanitize_thread
-#ifdef __SANITIZE_THREAD__
-# define __no_kcsan_or_inline __no_kcsan notrace __maybe_unused
-# define __no_sanitize_or_inline __no_kcsan_or_inline
-#else
-# define __no_kcsan_or_inline __always_inline
-#endif
 
 #ifndef __no_sanitize_or_inline
 #define __no_sanitize_or_inline __always_inline
 #endif
 
+/* Section for code which can't be instrumented at all */
+#define noinstr                                                                
\
+       noinline notrace __attribute((__section__(".noinstr.text"))) __no_kcsan
+
 #endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */
diff --git a/kernel/kcsan/kcsan-test.c b/kernel/kcsan/kcsan-test.c
index a8c11506dd2a..374263ddffe2 100644
--- a/kernel/kcsan/kcsan-test.c
+++ b/kernel/kcsan/kcsan-test.c
@@ -43,7 +43,7 @@ static struct {
 };
 
 /* Setup test checking loop. */
-static __no_kcsan_or_inline void
+static __no_kcsan inline void
 begin_test_checks(void (*func1)(void), void (*func2)(void))
 {
        kcsan_disable_current();
@@ -60,7 +60,7 @@ begin_test_checks(void (*func1)(void), void (*func2)(void))
 }
 
 /* End test checking loop. */
-static __no_kcsan_or_inline bool
+static __no_kcsan inline bool
 end_test_checks(bool stop)
 {
        if (!stop && time_before(jiffies, end_time)) {

Reply via email to