On Fri, Jul 3, 2020 at 3:40 PM Marco Elver <[email protected]> wrote:
>
> Some architectures (currently e.g. s390 partially) implement atomics
> using the compiler's atomic builtins (__atomic_*, __sync_*). To support
> enabling KCSAN on such architectures in future, or support experimental
> use of these builtins, implement support for them.
>
> We should also avoid breaking KCSAN kernels due to use (accidental or
> otherwise) of atomic builtins in drivers, as has happened in the past:
> https://lkml.kernel.org/r/[email protected]
>
> The instrumentation is subtly different from regular reads/writes: TSAN
> instrumentation replaces the use of atomic builtins with a call into the
> runtime, and the runtime's job is to also execute the desired atomic
> operation. We rely on the __atomic_* compiler builtins, available with
> all KCSAN-supported compilers, to implement each TSAN atomic
> instrumentation function.
>
> Signed-off-by: Marco Elver <[email protected]>

Reviewed-by: Dmitry Vyukov <[email protected]>

> ---
>  kernel/kcsan/core.c | 110 ++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 110 insertions(+)
>
> diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
> index d803765603fb..6843169da759 100644
> --- a/kernel/kcsan/core.c
> +++ b/kernel/kcsan/core.c
> @@ -856,3 +856,113 @@ void __tsan_init(void)
>  {
>  }
>  EXPORT_SYMBOL(__tsan_init);
> +
> +/*
> + * Instrumentation for atomic builtins (__atomic_*, __sync_*).
> + *
> + * Normal kernel code _should not_ be using them directly, but some
> + * architectures may implement some or all atomics using the compilers'
> + * builtins.
> + *
> + * Note: If an architecture decides to fully implement atomics using the
> + * builtins, because they are implicitly instrumented by KCSAN (and KASAN,
> + * etc.), implementing the ARCH_ATOMIC interface (to get instrumentation via
> + * atomic-instrumented) is no longer necessary.
> + *
> + * TSAN instrumentation replaces atomic accesses with calls to any of the 
> below
> + * functions, whose job is to also execute the operation itself.
> + */
> +
> +#define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits)                                  
>                       \
> +       u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder); 
>                      \
> +       u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder)  
>                      \
> +       {                                                                     
>                      \
> +               check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC); 
>                      \
> +               return __atomic_load_n(ptr, memorder);                        
>                      \
> +       }                                                                     
>                      \
> +       EXPORT_SYMBOL(__tsan_atomic##bits##_load);                            
>                      \
> +       void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int 
> memorder);                   \
> +       void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int 
> memorder)                    \
> +       {                                                                     
>                      \
> +               check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_WRITE | 
> KCSAN_ACCESS_ATOMIC); \
> +               __atomic_store_n(ptr, v, memorder);                           
>                      \
> +       }                                                                     
>                      \
> +       EXPORT_SYMBOL(__tsan_atomic##bits##_store)
> +
> +#define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix)                             
>                       \
> +       u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int 
> memorder);                 \
> +       u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int 
> memorder)                  \
> +       {                                                                     
>                      \
> +               check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_WRITE | 
> KCSAN_ACCESS_ATOMIC); \
> +               return __atomic_##op##suffix(ptr, v, memorder);               
>                      \
> +       }                                                                     
>                      \
> +       EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
> +
> +/*
> + * Note: CAS operations are always classified as write, even in case they
> + * fail. We cannot perform check_access() after a write, as it might lead to
> + * false positives, in cases such as:
> + *
> + *     T0: __atomic_compare_exchange_n(&p->flag, &old, 1, ...)
> + *
> + *     T1: if (__atomic_load_n(&p->flag, ...)) {
> + *             modify *p;
> + *             p->flag = 0;
> + *         }
> + *
> + * The only downside is that, if there are 3 threads, with one CAS that
> + * succeeds, another CAS that fails, and an unmarked racing operation, we may
> + * point at the wrong CAS as the source of the race. However, if we assume 
> that
> + * all CAS can succeed in some other execution, the data race is still valid.
> + */
> +#define DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strength, weak)                     
>                       \
> +       int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, 
> u##bits *exp,          \
> +                                                             u##bits val, 
> int mo, int fail_mo);   \
> +       int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, 
> u##bits *exp,          \
> +                                                             u##bits val, 
> int mo, int fail_mo)    \
> +       {                                                                     
>                      \
> +               check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_WRITE | 
> KCSAN_ACCESS_ATOMIC); \
> +               return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, 
> fail_mo);              \
> +       }                                                                     
>                      \
> +       EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_##strength)
> +
> +#define DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)                                 
>                       \
> +       u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, 
> u##bits exp, u##bits val, \
> +                                                          int mo, int 
> fail_mo);                   \
> +       u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, 
> u##bits exp, u##bits val, \
> +                                                          int mo, int 
> fail_mo)                    \
> +       {                                                                     
>                      \
> +               check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_WRITE | 
> KCSAN_ACCESS_ATOMIC); \
> +               __atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo);  
>                      \
> +               return exp;                                                   
>                      \
> +       }                                                                     
>                      \
> +       EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_val)
> +
> +#define DEFINE_TSAN_ATOMIC_OPS(bits)                                         
>                       \
> +       DEFINE_TSAN_ATOMIC_LOAD_STORE(bits);                                  
>                      \
> +       DEFINE_TSAN_ATOMIC_RMW(exchange, bits, _n);                           
>                      \
> +       DEFINE_TSAN_ATOMIC_RMW(fetch_add, bits, );                            
>                      \
> +       DEFINE_TSAN_ATOMIC_RMW(fetch_sub, bits, );                            
>                      \
> +       DEFINE_TSAN_ATOMIC_RMW(fetch_and, bits, );                            
>                      \
> +       DEFINE_TSAN_ATOMIC_RMW(fetch_or, bits, );                             
>                      \
> +       DEFINE_TSAN_ATOMIC_RMW(fetch_xor, bits, );                            
>                      \
> +       DEFINE_TSAN_ATOMIC_RMW(fetch_nand, bits, );                           
>                      \
> +       DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strong, 0);                          
>                      \
> +       DEFINE_TSAN_ATOMIC_CMPXCHG(bits, weak, 1);                            
>                      \
> +       DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)
> +
> +DEFINE_TSAN_ATOMIC_OPS(8);
> +DEFINE_TSAN_ATOMIC_OPS(16);
> +DEFINE_TSAN_ATOMIC_OPS(32);
> +DEFINE_TSAN_ATOMIC_OPS(64);
> +
> +void __tsan_atomic_thread_fence(int memorder);
> +void __tsan_atomic_thread_fence(int memorder)
> +{
> +       __atomic_thread_fence(memorder);
> +}
> +EXPORT_SYMBOL(__tsan_atomic_thread_fence);
> +
> +void __tsan_atomic_signal_fence(int memorder);
> +void __tsan_atomic_signal_fence(int memorder) { }
> +EXPORT_SYMBOL(__tsan_atomic_signal_fence);
> --
> 2.27.0.212.ge8ba1cc988-goog
>

Reply via email to