On Fri, 29 May 2020 at 19:08, Peter Zijlstra <pet...@infradead.org> wrote: [...] > > Doesn't this mean we can do the below?
If nobody complains about the lack of __no_kcsan_or_inline, let's do it. See comments below. > --- > Documentation/dev-tools/kcsan.rst | 6 ------ > arch/x86/include/asm/bitops.h | 6 +----- > include/linux/compiler_types.h | 14 ++++---------- > kernel/kcsan/kcsan-test.c | 4 ++-- > 4 files changed, 7 insertions(+), 23 deletions(-) > > diff --git a/Documentation/dev-tools/kcsan.rst > b/Documentation/dev-tools/kcsan.rst > index ce4bbd918648..b38379f06194 100644 > --- a/Documentation/dev-tools/kcsan.rst > +++ b/Documentation/dev-tools/kcsan.rst > @@ -114,12 +114,6 @@ functions, compilation units, or entire subsystems. For > static blacklisting, > To dynamically limit for which functions to generate reports, see the > `DebugFS interface`_ blacklist/whitelist feature. > > - For ``__always_inline`` functions, replace ``__always_inline`` with > - ``__no_kcsan_or_inline`` (which implies ``__always_inline``):: > - > - static __no_kcsan_or_inline void foo(void) { > - ... > - > * To disable data race detection for a particular compilation unit, add to > the > ``Makefile``:: I suppose, if we say that __no_kcsan_or_inline should just disappear because '__no_kcsan inline' is now good enough, we can delete it. I think functions that absolutely must be __always_inline would break with __no_kcsan_or_inline under KCSAN anyway. So, let's simplify. > diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h > index 35460fef39b8..0367efdc5b7a 100644 > --- a/arch/x86/include/asm/bitops.h > +++ b/arch/x86/include/asm/bitops.h > @@ -201,12 +201,8 @@ arch_test_and_change_bit(long nr, volatile unsigned long > *addr) > return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", > nr); > } > > -static __no_kcsan_or_inline bool constant_test_bit(long nr, const volatile > unsigned long *addr) > +static __always_inline bool constant_test_bit(long nr, const volatile > unsigned long *addr) > { > - /* > - * Because this is a plain access, we need to disable KCSAN here to > - * avoid double instrumentation via instrumented bitops. > - */ Yes, we should have reverted this eventually. > return ((1UL << (nr & (BITS_PER_LONG-1))) & > (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; > } > diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h > index 4e4982d6f3b0..6a2c0f857ac3 100644 > --- a/include/linux/compiler_types.h > +++ b/include/linux/compiler_types.h > @@ -118,10 +118,6 @@ struct ftrace_likely_data { > #define notrace > __attribute__((__no_instrument_function__)) > #endif > > -/* Section for code which can't be instrumented at all */ > -#define noinstr > \ > - noinline notrace __attribute((__section__(".noinstr.text"))) > - > /* > * it doesn't make sense on ARM (currently the only user of __naked) > * to trace naked functions because then mcount is called without > @@ -192,17 +188,15 @@ struct ftrace_likely_data { > #endif > > #define __no_kcsan __no_sanitize_thread > -#ifdef __SANITIZE_THREAD__ > -# define __no_kcsan_or_inline __no_kcsan notrace __maybe_unused > -# define __no_sanitize_or_inline __no_kcsan_or_inline I think we just want to keep __no_sanitize_or_inline, for READ_ONCE_NOCHECK. Having READ_ONCE_NOCHECK do KCSAN-checking seems wrong, and I don't know what might break. > -#else > -# define __no_kcsan_or_inline __always_inline > -#endif > > #ifndef __no_sanitize_or_inline > #define __no_sanitize_or_inline __always_inline > #endif > > +/* Section for code which can't be instrumented at all */ > +#define noinstr > \ > + noinline notrace __attribute((__section__(".noinstr.text"))) > __no_kcsan > + Will this eventually need __no_sanitize_address? > #endif /* __KERNEL__ */ > > #endif /* __ASSEMBLY__ */ > diff --git a/kernel/kcsan/kcsan-test.c b/kernel/kcsan/kcsan-test.c > index a8c11506dd2a..374263ddffe2 100644 > --- a/kernel/kcsan/kcsan-test.c > +++ b/kernel/kcsan/kcsan-test.c > @@ -43,7 +43,7 @@ static struct { > }; > > /* Setup test checking loop. */ > -static __no_kcsan_or_inline void > +static __no_kcsan inline void > begin_test_checks(void (*func1)(void), void (*func2)(void)) > { > kcsan_disable_current(); > @@ -60,7 +60,7 @@ begin_test_checks(void (*func1)(void), void (*func2)(void)) > } > > /* End test checking loop. */ > -static __no_kcsan_or_inline bool > +static __no_kcsan inline bool > end_test_checks(bool stop) > { > if (!stop && time_before(jiffies, end_time)) { Acked -- if you send a patch, do split the test-related change, so that Paul can apply it to the test which is currently only in -rcu. Thanks, -- Marco