[tip: perf/core] perf: Apply PERF_EVENT_IOC_MODIFY_ATTRIBUTES to children

2021-04-16 Thread tip-bot2 for Marco Elver
The following commit has been merged into the perf/core branch of tip:

Commit-ID: 47f661eca0700928012e11c57ea0328f5ccfc3b9
Gitweb:
https://git.kernel.org/tip/47f661eca0700928012e11c57ea0328f5ccfc3b9
Author:Marco Elver 
AuthorDate:Thu, 08 Apr 2021 12:35:57 +02:00
Committer: Peter Zijlstra 
CommitterDate: Fri, 16 Apr 2021 16:32:40 +02:00

perf: Apply PERF_EVENT_IOC_MODIFY_ATTRIBUTES to children

As with other ioctls (such as PERF_EVENT_IOC_{ENABLE,DISABLE}), fix up
handling of PERF_EVENT_IOC_MODIFY_ATTRIBUTES to also apply to children.

Suggested-by: Dmitry Vyukov 
Signed-off-by: Marco Elver 
Signed-off-by: Peter Zijlstra (Intel) 
Reviewed-by: Dmitry Vyukov 
Link: https://lkml.kernel.org/r/20210408103605.1676875-3-el...@google.com
---
 kernel/events/core.c | 22 +-
 1 file changed, 21 insertions(+), 1 deletion(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index 318ff7b..10ed2cd 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3200,16 +3200,36 @@ static int perf_event_modify_breakpoint(struct 
perf_event *bp,
 static int perf_event_modify_attr(struct perf_event *event,
  struct perf_event_attr *attr)
 {
+   int (*func)(struct perf_event *, struct perf_event_attr *);
+   struct perf_event *child;
+   int err;
+
if (event->attr.type != attr->type)
return -EINVAL;
 
switch (event->attr.type) {
case PERF_TYPE_BREAKPOINT:
-   return perf_event_modify_breakpoint(event, attr);
+   func = perf_event_modify_breakpoint;
+   break;
default:
/* Place holder for future additions. */
return -EOPNOTSUPP;
}
+
+   WARN_ON_ONCE(event->ctx->parent_ctx);
+
+   mutex_lock(>child_mutex);
+   err = func(event, attr);
+   if (err)
+   goto out;
+   list_for_each_entry(child, >child_list, child_list) {
+   err = func(child, attr);
+   if (err)
+   goto out;
+   }
+out:
+   mutex_unlock(>child_mutex);
+   return err;
 }
 
 static void ctx_sched_out(struct perf_event_context *ctx,


[tip: perf/core] perf: Support only inheriting events if cloned with CLONE_THREAD

2021-04-16 Thread tip-bot2 for Marco Elver
The following commit has been merged into the perf/core branch of tip:

Commit-ID: 2b26f0aa004995f49f7b6f4100dd0e4c39a9ed5f
Gitweb:
https://git.kernel.org/tip/2b26f0aa004995f49f7b6f4100dd0e4c39a9ed5f
Author:Marco Elver 
AuthorDate:Thu, 08 Apr 2021 12:35:58 +02:00
Committer: Peter Zijlstra 
CommitterDate: Fri, 16 Apr 2021 16:32:40 +02:00

perf: Support only inheriting events if cloned with CLONE_THREAD

Adds bit perf_event_attr::inherit_thread, to restricting inheriting
events only if the child was cloned with CLONE_THREAD.

This option supports the case where an event is supposed to be
process-wide only (including subthreads), but should not propagate
beyond the current process's shared environment.

Suggested-by: Peter Zijlstra 
Signed-off-by: Marco Elver 
Signed-off-by: Peter Zijlstra (Intel) 
Link: 
https://lore.kernel.org/lkml/ybvj6ejr%2fdy2t...@hirez.programming.kicks-ass.net/
---
 include/linux/perf_event.h  |  5 +++--
 include/uapi/linux/perf_event.h |  3 ++-
 kernel/events/core.c| 21 ++---
 kernel/fork.c   |  2 +-
 4 files changed, 20 insertions(+), 11 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 3d478ab..1660039 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -958,7 +958,7 @@ extern void __perf_event_task_sched_in(struct task_struct 
*prev,
   struct task_struct *task);
 extern void __perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next);
-extern int perf_event_init_task(struct task_struct *child);
+extern int perf_event_init_task(struct task_struct *child, u64 clone_flags);
 extern void perf_event_exit_task(struct task_struct *child);
 extern void perf_event_free_task(struct task_struct *task);
 extern void perf_event_delayed_put(struct task_struct *task);
@@ -1449,7 +1449,8 @@ perf_event_task_sched_in(struct task_struct *prev,
 static inline void
 perf_event_task_sched_out(struct task_struct *prev,
  struct task_struct *next) { }
-static inline int perf_event_init_task(struct task_struct *child)  { 
return 0; }
+static inline int perf_event_init_task(struct task_struct *child,
+  u64 clone_flags) { 
return 0; }
 static inline void perf_event_exit_task(struct task_struct *child) { }
 static inline void perf_event_free_task(struct task_struct *task)  { }
 static inline void perf_event_delayed_put(struct task_struct *task){ }
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index ad15e40..813efb6 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -389,7 +389,8 @@ struct perf_event_attr {
cgroup :  1, /* include cgroup events */
text_poke  :  1, /* include text poke 
events */
build_id   :  1, /* use build id in mmap2 
events */
-   __reserved_1   : 29;
+   inherit_thread :  1, /* children only inherit 
if cloned with CLONE_THREAD */
+   __reserved_1   : 28;
 
union {
__u32   wakeup_events;/* wakeup every n events */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 10ed2cd..3e3c00f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -11653,6 +11653,9 @@ static int perf_copy_attr(struct perf_event_attr __user 
*uattr,
(attr->sample_type & PERF_SAMPLE_WEIGHT_STRUCT))
return -EINVAL;
 
+   if (!attr->inherit && attr->inherit_thread)
+   return -EINVAL;
+
 out:
return ret;
 
@@ -12873,12 +12876,13 @@ static int
 inherit_task_group(struct perf_event *event, struct task_struct *parent,
   struct perf_event_context *parent_ctx,
   struct task_struct *child, int ctxn,
-  int *inherited_all)
+  u64 clone_flags, int *inherited_all)
 {
int ret;
struct perf_event_context *child_ctx;
 
-   if (!event->attr.inherit) {
+   if (!event->attr.inherit ||
+   (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD))) {
*inherited_all = 0;
return 0;
}
@@ -12910,7 +12914,8 @@ inherit_task_group(struct perf_event *event, struct 
task_struct *parent,
 /*
  * Initialize the perf_event context in task_struct
  */
-static int perf_event_init_context(struct task_struct *child, int ctxn)
+static int perf_event_init_context(struct task_struct *child, int ctxn,
+  u64 clone_flags)
 {
struct perf_event_context *child_ctx, *parent_ctx;
struct perf_event_context *cloned_ctx;
@@ -12950,7 +12955,8 @@ static int 

[tip: perf/core] signal: Introduce TRAP_PERF si_code and si_perf to siginfo

2021-04-16 Thread tip-bot2 for Marco Elver
The following commit has been merged into the perf/core branch of tip:

Commit-ID: fb6cc127e0b6e629252cdd0f77d5a1f49db95b92
Gitweb:
https://git.kernel.org/tip/fb6cc127e0b6e629252cdd0f77d5a1f49db95b92
Author:Marco Elver 
AuthorDate:Thu, 08 Apr 2021 12:36:00 +02:00
Committer: Peter Zijlstra 
CommitterDate: Fri, 16 Apr 2021 16:32:41 +02:00

signal: Introduce TRAP_PERF si_code and si_perf to siginfo

Introduces the TRAP_PERF si_code, and associated siginfo_t field
si_perf. These will be used by the perf event subsystem to send signals
(if requested) to the task where an event occurred.

Signed-off-by: Marco Elver 
Signed-off-by: Peter Zijlstra (Intel) 
Acked-by: Geert Uytterhoeven  # m68k
Acked-by: Arnd Bergmann  # asm-generic
Link: https://lkml.kernel.org/r/20210408103605.1676875-6-el...@google.com
---
 arch/m68k/kernel/signal.c  |  3 +++
 arch/x86/kernel/signal_compat.c|  5 -
 fs/signalfd.c  |  4 
 include/linux/compat.h |  2 ++
 include/linux/signal.h |  1 +
 include/uapi/asm-generic/siginfo.h |  6 +-
 include/uapi/linux/signalfd.h  |  4 +++-
 kernel/signal.c| 11 +++
 8 files changed, 33 insertions(+), 3 deletions(-)

diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index 349570f..a4b7ee1 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -622,6 +622,9 @@ static inline void siginfo_build_tests(void)
/* _sigfault._addr_pkey */
BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x12);
 
+   /* _sigfault._perf */
+   BUILD_BUG_ON(offsetof(siginfo_t, si_perf) != 0x10);
+
/* _sigpoll */
BUILD_BUG_ON(offsetof(siginfo_t, si_band)   != 0x0c);
BUILD_BUG_ON(offsetof(siginfo_t, si_fd) != 0x10);
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index a5330ff..0e5d0a7 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -29,7 +29,7 @@ static inline void signal_compat_build_tests(void)
BUILD_BUG_ON(NSIGFPE  != 15);
BUILD_BUG_ON(NSIGSEGV != 9);
BUILD_BUG_ON(NSIGBUS  != 5);
-   BUILD_BUG_ON(NSIGTRAP != 5);
+   BUILD_BUG_ON(NSIGTRAP != 6);
BUILD_BUG_ON(NSIGCHLD != 6);
BUILD_BUG_ON(NSIGSYS  != 2);
 
@@ -138,6 +138,9 @@ static inline void signal_compat_build_tests(void)
BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x20);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pkey) != 0x14);
 
+   BUILD_BUG_ON(offsetof(siginfo_t, si_perf) != 0x18);
+   BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf) != 0x10);
+
CHECK_CSI_OFFSET(_sigpoll);
CHECK_CSI_SIZE  (_sigpoll, 2*sizeof(int));
CHECK_SI_SIZE   (_sigpoll, 4*sizeof(int));
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 456046e..040a114 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -134,6 +134,10 @@ static int signalfd_copyinfo(struct signalfd_siginfo 
__user *uinfo,
 #endif
new.ssi_addr_lsb = (short) kinfo->si_addr_lsb;
break;
+   case SIL_PERF_EVENT:
+   new.ssi_addr = (long) kinfo->si_addr;
+   new.ssi_perf = kinfo->si_perf;
+   break;
case SIL_CHLD:
new.ssi_pid= kinfo->si_pid;
new.ssi_uid= kinfo->si_uid;
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 6e65be7..c8821d9 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -236,6 +236,8 @@ typedef struct compat_siginfo {
char 
_dummy_pkey[__COMPAT_ADDR_BND_PKEY_PAD];
u32 _pkey;
} _addr_pkey;
+   /* used when si_code=TRAP_PERF */
+   compat_u64 _perf;
};
} _sigfault;
 
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 205526c..1e98548 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -43,6 +43,7 @@ enum siginfo_layout {
SIL_FAULT_MCEERR,
SIL_FAULT_BNDERR,
SIL_FAULT_PKUERR,
+   SIL_PERF_EVENT,
SIL_CHLD,
SIL_RT,
SIL_SYS,
diff --git a/include/uapi/asm-generic/siginfo.h 
b/include/uapi/asm-generic/siginfo.h
index d259700..d0bb912 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -91,6 +91,8 @@ union __sifields {
char _dummy_pkey[__ADDR_BND_PKEY_PAD];
__u32 _pkey;
} _addr_pkey;
+   /* used when si_code=TRAP_PERF */
+   __u64 _perf;
};
} _sigfault;
 
@@ -155,6 +157,7 @@ typedef struct siginfo {
 #define si_lower   _sifields._sigfault._addr_bnd._lower
 #define si_upper   _sifields._sigfault._addr_bnd._upper
 #define si_pkey

[tip: perf/core] perf: Add support for SIGTRAP on perf events

2021-04-16 Thread tip-bot2 for Marco Elver
The following commit has been merged into the perf/core branch of tip:

Commit-ID: 97ba62b278674293762c3d91f724f1bb922f04e0
Gitweb:
https://git.kernel.org/tip/97ba62b278674293762c3d91f724f1bb922f04e0
Author:Marco Elver 
AuthorDate:Thu, 08 Apr 2021 12:36:01 +02:00
Committer: Peter Zijlstra 
CommitterDate: Fri, 16 Apr 2021 16:32:41 +02:00

perf: Add support for SIGTRAP on perf events

Adds bit perf_event_attr::sigtrap, which can be set to cause events to
send SIGTRAP (with si_code TRAP_PERF) to the task where the event
occurred. The primary motivation is to support synchronous signals on
perf events in the task where an event (such as breakpoints) triggered.

To distinguish perf events based on the event type, the type is set in
si_errno. For events that are associated with an address, si_addr is
copied from perf_sample_data.

The new field perf_event_attr::sig_data is copied to si_perf, which
allows user space to disambiguate which event (of the same type)
triggered the signal. For example, user space could encode the relevant
information it cares about in sig_data.

We note that the choice of an opaque u64 provides the simplest and most
flexible option. Alternatives where a reference to some user space data
is passed back suffer from the problem that modification of referenced
data (be it the event fd, or the perf_event_attr) can race with the
signal being delivered (of course, the same caveat applies if user space
decides to store a pointer in sig_data, but the ABI explicitly avoids
prescribing such a design).

Suggested-by: Peter Zijlstra 
Signed-off-by: Marco Elver 
Signed-off-by: Peter Zijlstra (Intel) 
Acked-by: Dmitry Vyukov 
Link: 
https://lore.kernel.org/lkml/ybv3rat566k+6...@hirez.programming.kicks-ass.net/
---
 include/linux/perf_event.h  |  1 +-
 include/uapi/linux/perf_event.h | 10 ++-
 kernel/events/core.c| 49 +++-
 3 files changed, 58 insertions(+), 2 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1660039..7d7280a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -735,6 +735,7 @@ struct perf_event {
int pending_wakeup;
int pending_kill;
int pending_disable;
+   unsigned long   pending_addr;   /* SIGTRAP */
struct irq_work pending;
 
atomic_tevent_limit;
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 8c5b9f5..31b00e3 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -311,6 +311,7 @@ enum perf_event_read_format {
 #define PERF_ATTR_SIZE_VER4104 /* add: sample_regs_intr */
 #define PERF_ATTR_SIZE_VER5112 /* add: aux_watermark */
 #define PERF_ATTR_SIZE_VER6120 /* add: aux_sample_size */
+#define PERF_ATTR_SIZE_VER7128 /* add: sig_data */
 
 /*
  * Hardware event_id to monitor via a performance monitoring event:
@@ -391,7 +392,8 @@ struct perf_event_attr {
build_id   :  1, /* use build id in mmap2 
events */
inherit_thread :  1, /* children only inherit 
if cloned with CLONE_THREAD */
remove_on_exec :  1, /* event is removed from 
task on exec */
-   __reserved_1   : 27;
+   sigtrap:  1, /* send synchronous 
SIGTRAP on event */
+   __reserved_1   : 26;
 
union {
__u32   wakeup_events;/* wakeup every n events */
@@ -443,6 +445,12 @@ struct perf_event_attr {
__u16   __reserved_2;
__u32   aux_sample_size;
__u32   __reserved_3;
+
+   /*
+* User provided data if sigtrap=1, passed back to user via
+* siginfo_t::si_perf, e.g. to permit user to identify the event.
+*/
+   __u64   sig_data;
 };
 
 /*
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e4a584b..6f0723c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6392,6 +6392,33 @@ void perf_event_wakeup(struct perf_event *event)
}
 }
 
+static void perf_sigtrap(struct perf_event *event)
+{
+   struct kernel_siginfo info;
+
+   /*
+* We'd expect this to only occur if the irq_work is delayed and either
+* ctx->task or current has changed in the meantime. This can be the
+* case on architectures that do not implement arch_irq_work_raise().
+*/
+   if (WARN_ON_ONCE(event->ctx->task != current))
+   return;
+
+   /*
+* perf_pending_event() can race with the task exiting.
+*/
+   if (current->flags & PF_EXITING)
+   return;
+
+   clear_siginfo();
+   info.si_signo = SIGTRAP;
+   info.si_code = TRAP_PERF;
+   

[tip: perf/core] perf: Add support for event removal on exec

2021-04-16 Thread tip-bot2 for Marco Elver
The following commit has been merged into the perf/core branch of tip:

Commit-ID: 2e498d0a74e5b88a6689ae1b811f247f91ff188e
Gitweb:
https://git.kernel.org/tip/2e498d0a74e5b88a6689ae1b811f247f91ff188e
Author:Marco Elver 
AuthorDate:Thu, 08 Apr 2021 12:35:59 +02:00
Committer: Peter Zijlstra 
CommitterDate: Fri, 16 Apr 2021 16:32:41 +02:00

perf: Add support for event removal on exec

Adds bit perf_event_attr::remove_on_exec, to support removing an event
from a task on exec.

This option supports the case where an event is supposed to be
process-wide only, and should not propagate beyond exec, to limit
monitoring to the original process image only.

Suggested-by: Peter Zijlstra 
Signed-off-by: Marco Elver 
Signed-off-by: Peter Zijlstra (Intel) 
Link: https://lkml.kernel.org/r/20210408103605.1676875-5-el...@google.com
---
 include/uapi/linux/perf_event.h |  3 +-
 kernel/events/core.c| 70 
 2 files changed, 64 insertions(+), 9 deletions(-)

diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 813efb6..8c5b9f5 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -390,7 +390,8 @@ struct perf_event_attr {
text_poke  :  1, /* include text poke 
events */
build_id   :  1, /* use build id in mmap2 
events */
inherit_thread :  1, /* children only inherit 
if cloned with CLONE_THREAD */
-   __reserved_1   : 28;
+   remove_on_exec :  1, /* event is removed from 
task on exec */
+   __reserved_1   : 27;
 
union {
__u32   wakeup_events;/* wakeup every n events */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 3e3c00f..e4a584b 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4248,6 +4248,57 @@ out:
put_ctx(clone_ctx);
 }
 
+static void perf_remove_from_owner(struct perf_event *event);
+static void perf_event_exit_event(struct perf_event *event,
+ struct perf_event_context *ctx);
+
+/*
+ * Removes all events from the current task that have been marked
+ * remove-on-exec, and feeds their values back to parent events.
+ */
+static void perf_event_remove_on_exec(int ctxn)
+{
+   struct perf_event_context *ctx, *clone_ctx = NULL;
+   struct perf_event *event, *next;
+   LIST_HEAD(free_list);
+   unsigned long flags;
+   bool modified = false;
+
+   ctx = perf_pin_task_context(current, ctxn);
+   if (!ctx)
+   return;
+
+   mutex_lock(>mutex);
+
+   if (WARN_ON_ONCE(ctx->task != current))
+   goto unlock;
+
+   list_for_each_entry_safe(event, next, >event_list, event_entry) {
+   if (!event->attr.remove_on_exec)
+   continue;
+
+   if (!is_kernel_event(event))
+   perf_remove_from_owner(event);
+
+   modified = true;
+
+   perf_event_exit_event(event, ctx);
+   }
+
+   raw_spin_lock_irqsave(>lock, flags);
+   if (modified)
+   clone_ctx = unclone_ctx(ctx);
+   --ctx->pin_count;
+   raw_spin_unlock_irqrestore(>lock, flags);
+
+unlock:
+   mutex_unlock(>mutex);
+
+   put_ctx(ctx);
+   if (clone_ctx)
+   put_ctx(clone_ctx);
+}
+
 struct perf_read_data {
struct perf_event *event;
bool group;
@@ -7560,18 +7611,18 @@ void perf_event_exec(void)
struct perf_event_context *ctx;
int ctxn;
 
-   rcu_read_lock();
for_each_task_context_nr(ctxn) {
-   ctx = current->perf_event_ctxp[ctxn];
-   if (!ctx)
-   continue;
-
perf_event_enable_on_exec(ctxn);
+   perf_event_remove_on_exec(ctxn);
 
-   perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL,
-  true);
+   rcu_read_lock();
+   ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+   if (ctx) {
+   perf_iterate_ctx(ctx, perf_event_addr_filters_exec,
+NULL, true);
+   }
+   rcu_read_unlock();
}
-   rcu_read_unlock();
 }
 
 struct remote_output {
@@ -11656,6 +11707,9 @@ static int perf_copy_attr(struct perf_event_attr __user 
*uattr,
if (!attr->inherit && attr->inherit_thread)
return -EINVAL;
 
+   if (attr->remove_on_exec && attr->enable_on_exec)
+   return -EINVAL;
+
 out:
return ret;
 


[tip: perf/core] selftests/perf_events: Add kselftest for process-wide sigtrap handling

2021-04-16 Thread tip-bot2 for Marco Elver
The following commit has been merged into the perf/core branch of tip:

Commit-ID: f2c3c32f45002de19c6dec33f32fd259e82f2557
Gitweb:
https://git.kernel.org/tip/f2c3c32f45002de19c6dec33f32fd259e82f2557
Author:Marco Elver 
AuthorDate:Thu, 08 Apr 2021 12:36:02 +02:00
Committer: Peter Zijlstra 
CommitterDate: Fri, 16 Apr 2021 16:32:42 +02:00

selftests/perf_events: Add kselftest for process-wide sigtrap handling

Add a kselftest for testing process-wide perf events with synchronous
SIGTRAP on events (using breakpoints). In particular, we want to test
that changes to the event propagate to all children, and the SIGTRAPs
are in fact synchronously sent to the thread where the event occurred.

Note: The "signal_stress" test case is also added later in the series to
perf tool's built-in tests. The test here is more elaborate in that
respect, which on one hand avoids bloating the perf tool unnecessarily,
but we also benefit from structured tests with TAP-compliant output that
the kselftest framework provides.

Signed-off-by: Marco Elver 
Signed-off-by: Peter Zijlstra (Intel) 
Link: https://lkml.kernel.org/r/20210408103605.1676875-8-el...@google.com
---
 tools/testing/selftests/perf_events/.gitignore|   2 +-
 tools/testing/selftests/perf_events/Makefile  |   6 +-
 tools/testing/selftests/perf_events/config|   1 +-
 tools/testing/selftests/perf_events/settings  |   1 +-
 tools/testing/selftests/perf_events/sigtrap_threads.c | 210 +-
 5 files changed, 220 insertions(+)
 create mode 100644 tools/testing/selftests/perf_events/.gitignore
 create mode 100644 tools/testing/selftests/perf_events/Makefile
 create mode 100644 tools/testing/selftests/perf_events/config
 create mode 100644 tools/testing/selftests/perf_events/settings
 create mode 100644 tools/testing/selftests/perf_events/sigtrap_threads.c

diff --git a/tools/testing/selftests/perf_events/.gitignore 
b/tools/testing/selftests/perf_events/.gitignore
new file mode 100644
index 000..4dc43e1
--- /dev/null
+++ b/tools/testing/selftests/perf_events/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+sigtrap_threads
diff --git a/tools/testing/selftests/perf_events/Makefile 
b/tools/testing/selftests/perf_events/Makefile
new file mode 100644
index 000..973a2c3
--- /dev/null
+++ b/tools/testing/selftests/perf_events/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+CFLAGS += -Wl,-no-as-needed -Wall -I../../../../usr/include
+LDFLAGS += -lpthread
+
+TEST_GEN_PROGS := sigtrap_threads
+include ../lib.mk
diff --git a/tools/testing/selftests/perf_events/config 
b/tools/testing/selftests/perf_events/config
new file mode 100644
index 000..ba58ff2
--- /dev/null
+++ b/tools/testing/selftests/perf_events/config
@@ -0,0 +1 @@
+CONFIG_PERF_EVENTS=y
diff --git a/tools/testing/selftests/perf_events/settings 
b/tools/testing/selftests/perf_events/settings
new file mode 100644
index 000..6091b45
--- /dev/null
+++ b/tools/testing/selftests/perf_events/settings
@@ -0,0 +1 @@
+timeout=120
diff --git a/tools/testing/selftests/perf_events/sigtrap_threads.c 
b/tools/testing/selftests/perf_events/sigtrap_threads.c
new file mode 100644
index 000..9c0fd44
--- /dev/null
+++ b/tools/testing/selftests/perf_events/sigtrap_threads.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test for perf events with SIGTRAP across all threads.
+ *
+ * Copyright (C) 2021, Google LLC.
+ */
+
+#define _GNU_SOURCE
+
+/* We need the latest siginfo from the kernel repo. */
+#include 
+#include 
+#define __have_siginfo_t 1
+#define __have_sigval_t 1
+#define __have_sigevent_t 1
+#define __siginfo_t_defined
+#define __sigval_t_defined
+#define __sigevent_t_defined
+#define _BITS_SIGINFO_CONSTS_H 1
+#define _BITS_SIGEVENT_CONSTS_H 1
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "../kselftest_harness.h"
+
+#define NUM_THREADS 5
+
+/* Data shared between test body, threads, and signal handler. */
+static struct {
+   int tids_want_signal;   /* Which threads still want a signal. */
+   int signal_count;   /* Sanity check number of signals 
received. */
+   volatile int iterate_on;/* Variable to set breakpoint on. */
+   siginfo_t first_siginfo;/* First observed siginfo_t. */
+} ctx;
+
+/* Unique value to check si_perf is correctly set from 
perf_event_attr::sig_data. */
+#define TEST_SIG_DATA(addr) (~(uint64_t)(addr))
+
+static struct perf_event_attr make_event_attr(bool enabled, volatile void 
*addr)
+{
+   struct perf_event_attr attr = {
+   .type   = PERF_TYPE_BREAKPOINT,
+   .size   = sizeof(attr),
+   .sample_period  = 1,
+   .disabled   = !enabled,
+   .bp_addr= (unsigned long)addr,
+   .bp_type= HW_BREAKPOINT_RW,
+   

[tip: perf/core] selftests/perf_events: Add kselftest for remove_on_exec

2021-04-16 Thread tip-bot2 for Marco Elver
The following commit has been merged into the perf/core branch of tip:

Commit-ID: 6216798bf98e82c382922f1b71ecc4a13d6e65cb
Gitweb:
https://git.kernel.org/tip/6216798bf98e82c382922f1b71ecc4a13d6e65cb
Author:Marco Elver 
AuthorDate:Thu, 08 Apr 2021 12:36:03 +02:00
Committer: Peter Zijlstra 
CommitterDate: Fri, 16 Apr 2021 16:32:42 +02:00

selftests/perf_events: Add kselftest for remove_on_exec

Add kselftest to test that remove_on_exec removes inherited events from
child tasks.

Signed-off-by: Marco Elver 
Signed-off-by: Peter Zijlstra (Intel) 
Link: https://lkml.kernel.org/r/20210408103605.1676875-9-el...@google.com
---
 tools/testing/selftests/perf_events/.gitignore   |   1 +-
 tools/testing/selftests/perf_events/Makefile |   2 +-
 tools/testing/selftests/perf_events/remove_on_exec.c | 260 ++-
 3 files changed, 262 insertions(+), 1 deletion(-)
 create mode 100644 tools/testing/selftests/perf_events/remove_on_exec.c

diff --git a/tools/testing/selftests/perf_events/.gitignore 
b/tools/testing/selftests/perf_events/.gitignore
index 4dc43e1..790c470 100644
--- a/tools/testing/selftests/perf_events/.gitignore
+++ b/tools/testing/selftests/perf_events/.gitignore
@@ -1,2 +1,3 @@
 # SPDX-License-Identifier: GPL-2.0-only
 sigtrap_threads
+remove_on_exec
diff --git a/tools/testing/selftests/perf_events/Makefile 
b/tools/testing/selftests/perf_events/Makefile
index 973a2c3..fcafa5f 100644
--- a/tools/testing/selftests/perf_events/Makefile
+++ b/tools/testing/selftests/perf_events/Makefile
@@ -2,5 +2,5 @@
 CFLAGS += -Wl,-no-as-needed -Wall -I../../../../usr/include
 LDFLAGS += -lpthread
 
-TEST_GEN_PROGS := sigtrap_threads
+TEST_GEN_PROGS := sigtrap_threads remove_on_exec
 include ../lib.mk
diff --git a/tools/testing/selftests/perf_events/remove_on_exec.c 
b/tools/testing/selftests/perf_events/remove_on_exec.c
new file mode 100644
index 000..5814611
--- /dev/null
+++ b/tools/testing/selftests/perf_events/remove_on_exec.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test for remove_on_exec.
+ *
+ * Copyright (C) 2021, Google LLC.
+ */
+
+#define _GNU_SOURCE
+
+/* We need the latest siginfo from the kernel repo. */
+#include 
+#include 
+#define __have_siginfo_t 1
+#define __have_sigval_t 1
+#define __have_sigevent_t 1
+#define __siginfo_t_defined
+#define __sigval_t_defined
+#define __sigevent_t_defined
+#define _BITS_SIGINFO_CONSTS_H 1
+#define _BITS_SIGEVENT_CONSTS_H 1
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "../kselftest_harness.h"
+
+static volatile int signal_count;
+
+static struct perf_event_attr make_event_attr(void)
+{
+   struct perf_event_attr attr = {
+   .type   = PERF_TYPE_HARDWARE,
+   .size   = sizeof(attr),
+   .config = PERF_COUNT_HW_INSTRUCTIONS,
+   .sample_period  = 1000,
+   .exclude_kernel = 1,
+   .exclude_hv = 1,
+   .disabled   = 1,
+   .inherit= 1,
+   /*
+* Children normally retain their inherited event on exec; with
+* remove_on_exec, we'll remove their event, but the parent and
+* any other non-exec'd children will keep their events.
+*/
+   .remove_on_exec = 1,
+   .sigtrap= 1,
+   };
+   return attr;
+}
+
+static void sigtrap_handler(int signum, siginfo_t *info, void *ucontext)
+{
+   if (info->si_code != TRAP_PERF) {
+   fprintf(stderr, "%s: unexpected si_code %d\n", __func__, 
info->si_code);
+   return;
+   }
+
+   signal_count++;
+}
+
+FIXTURE(remove_on_exec)
+{
+   struct sigaction oldact;
+   int fd;
+};
+
+FIXTURE_SETUP(remove_on_exec)
+{
+   struct perf_event_attr attr = make_event_attr();
+   struct sigaction action = {};
+
+   signal_count = 0;
+
+   /* Initialize sigtrap handler. */
+   action.sa_flags = SA_SIGINFO | SA_NODEFER;
+   action.sa_sigaction = sigtrap_handler;
+   sigemptyset(_mask);
+   ASSERT_EQ(sigaction(SIGTRAP, , >oldact), 0);
+
+   /* Initialize perf event. */
+   self->fd = syscall(__NR_perf_event_open, , 0, -1, -1, 
PERF_FLAG_FD_CLOEXEC);
+   ASSERT_NE(self->fd, -1);
+}
+
+FIXTURE_TEARDOWN(remove_on_exec)
+{
+   close(self->fd);
+   sigaction(SIGTRAP, >oldact, NULL);
+}
+
+/* Verify event propagates to fork'd child. */
+TEST_F(remove_on_exec, fork_only)
+{
+   int status;
+   pid_t pid = fork();
+
+   if (pid == 0) {
+   ASSERT_EQ(signal_count, 0);
+   ASSERT_EQ(ioctl(self->fd, PERF_EVENT_IOC_ENABLE, 0), 0);
+   while (!signal_count);
+   _exit(42);
+   }
+
+   while (!signal_count); /* Child enables event. */
+   EXPECT_EQ(waitpid(pid, , 0), pid);
+   EXPECT_EQ(WEXITSTATUS(status), 

[tip: locking/core] kcsan, debugfs: Move debugfs file creation out of early init

2021-04-11 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: e36299efe7d749976fbdaaf756dee6ef32543c2c
Gitweb:
https://git.kernel.org/tip/e36299efe7d749976fbdaaf756dee6ef32543c2c
Author:Marco Elver 
AuthorDate:Wed, 03 Mar 2021 10:38:45 +01:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 08 Mar 2021 14:27:43 -08:00

kcsan, debugfs: Move debugfs file creation out of early init

Commit 56348560d495 ("debugfs: do not attempt to create a new file
before the filesystem is initalized") forbids creating new debugfs files
until debugfs is fully initialized.  This means that KCSAN's debugfs
file creation, which happened at the end of __init(), no longer works.
And was apparently never supposed to work!

However, there is no reason to create KCSAN's debugfs file so early.
This commit therefore moves its creation to a late_initcall() callback.

Cc: "Rafael J. Wysocki" 
Cc: stable 
Fixes: 56348560d495 ("debugfs: do not attempt to create a new file before the 
filesystem is initalized")
Reviewed-by: Greg Kroah-Hartman 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/core.c| 2 --
 kernel/kcsan/debugfs.c | 4 +++-
 kernel/kcsan/kcsan.h   | 5 -
 3 files changed, 3 insertions(+), 8 deletions(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 3bf98db..23e7acb 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -639,8 +639,6 @@ void __init kcsan_init(void)
 
BUG_ON(!in_task());
 
-   kcsan_debugfs_init();
-
for_each_possible_cpu(cpu)
per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
 
diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index 3c8093a..209ad8d 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -261,7 +261,9 @@ static const struct file_operations debugfs_ops =
.release = single_release
 };
 
-void __init kcsan_debugfs_init(void)
+static void __init kcsan_debugfs_init(void)
 {
debugfs_create_file("kcsan", 0644, NULL, NULL, _ops);
 }
+
+late_initcall(kcsan_debugfs_init);
diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
index 8d4bf34..87ccdb3 100644
--- a/kernel/kcsan/kcsan.h
+++ b/kernel/kcsan/kcsan.h
@@ -31,11 +31,6 @@ void kcsan_save_irqtrace(struct task_struct *task);
 void kcsan_restore_irqtrace(struct task_struct *task);
 
 /*
- * Initialize debugfs file.
- */
-void kcsan_debugfs_init(void);
-
-/*
  * Statistics counters displayed via debugfs; should only be modified in
  * slow-paths.
  */


[tip: locking/core] kcsan: Make test follow KUnit style recommendations

2021-04-11 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: a146fed56f8a06a6f17ac11ebdc7ca3f396bcb55
Gitweb:
https://git.kernel.org/tip/a146fed56f8a06a6f17ac11ebdc7ca3f396bcb55
Author:Marco Elver 
AuthorDate:Wed, 13 Jan 2021 17:05:56 +01:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 08 Mar 2021 14:27:43 -08:00

kcsan: Make test follow KUnit style recommendations

Per recently added KUnit style recommendations at
Documentation/dev-tools/kunit/style.rst, make the following changes to
the KCSAN test:

1. Rename 'kcsan-test.c' to 'kcsan_test.c'.

2. Rename suite name 'kcsan-test' to 'kcsan'.

3. Rename CONFIG_KCSAN_TEST to CONFIG_KCSAN_KUNIT_TEST and
   default to KUNIT_ALL_TESTS.

Reviewed-by: David Gow 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/Makefile |4 +-
 kernel/kcsan/kcsan-test.c | 1207 +
 kernel/kcsan/kcsan_test.c | 1207 -
 lib/Kconfig.kcsan |5 +-
 4 files changed, 1212 insertions(+), 1211 deletions(-)
 delete mode 100644 kernel/kcsan/kcsan-test.c
 create mode 100644 kernel/kcsan/kcsan_test.c

diff --git a/kernel/kcsan/Makefile b/kernel/kcsan/Makefile
index 65ca553..c2bb07f 100644
--- a/kernel/kcsan/Makefile
+++ b/kernel/kcsan/Makefile
@@ -13,5 +13,5 @@ CFLAGS_core.o := $(call cc-option,-fno-conserve-stack) \
 obj-y := core.o debugfs.o report.o
 obj-$(CONFIG_KCSAN_SELFTEST) += selftest.o
 
-CFLAGS_kcsan-test.o := $(CFLAGS_KCSAN) -g -fno-omit-frame-pointer
-obj-$(CONFIG_KCSAN_TEST) += kcsan-test.o
+CFLAGS_kcsan_test.o := $(CFLAGS_KCSAN) -g -fno-omit-frame-pointer
+obj-$(CONFIG_KCSAN_KUNIT_TEST) += kcsan_test.o
diff --git a/kernel/kcsan/kcsan-test.c b/kernel/kcsan/kcsan-test.c
deleted file mode 100644
index ebe7fd2..000
--- a/kernel/kcsan/kcsan-test.c
+++ /dev/null
@@ -1,1207 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * KCSAN test with various race scenarious to test runtime behaviour. Since the
- * interface with which KCSAN's reports are obtained is via the console, this 
is
- * the output we should verify. For each test case checks the presence (or
- * absence) of generated reports. Relies on 'console' tracepoint to capture
- * reports as they appear in the kernel log.
- *
- * Makes use of KUnit for test organization, and the Torture framework for test
- * thread control.
- *
- * Copyright (C) 2020, Google LLC.
- * Author: Marco Elver 
- */
-
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-
-#ifdef CONFIG_CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE
-#define __KCSAN_ACCESS_RW(alt) (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
-#else
-#define __KCSAN_ACCESS_RW(alt) (alt)
-#endif
-
-/* Points to current test-case memory access "kernels". */
-static void (*access_kernels[2])(void);
-
-static struct task_struct **threads; /* Lists of threads. */
-static unsigned long end_time;   /* End time of test. */
-
-/* Report as observed from console. */
-static struct {
-   spinlock_t lock;
-   int nlines;
-   char lines[3][512];
-} observed = {
-   .lock = __SPIN_LOCK_UNLOCKED(observed.lock),
-};
-
-/* Setup test checking loop. */
-static __no_kcsan inline void
-begin_test_checks(void (*func1)(void), void (*func2)(void))
-{
-   kcsan_disable_current();
-
-   /*
-* Require at least as long as KCSAN_REPORT_ONCE_IN_MS, to ensure at
-* least one race is reported.
-*/
-   end_time = jiffies + msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS + 
500);
-
-   /* Signal start; release potential initialization of shared data. */
-   smp_store_release(_kernels[0], func1);
-   smp_store_release(_kernels[1], func2);
-}
-
-/* End test checking loop. */
-static __no_kcsan inline bool
-end_test_checks(bool stop)
-{
-   if (!stop && time_before(jiffies, end_time)) {
-   /* Continue checking */
-   might_sleep();
-   return false;
-   }
-
-   kcsan_enable_current();
-   return true;
-}
-
-/*
- * Probe for console output: checks if a race was reported, and obtains 
observed
- * lines of interest.
- */
-__no_kcsan
-static void probe_console(void *ignore, const char *buf, size_t len)
-{
-   unsigned long flags;
-   int nlines;
-
-   /*
-* Note that KCSAN reports under a global lock, so we do not risk the
-* possibility of having multiple reports interleaved. If that were the
-* case, we'd expect tests to fail.
-*/
-
-   spin_lock_irqsave(, flags);
-   nlines = observed.nlines;
-
-   if (strnstr(buf, "BUG: KCSAN: ", len) && strnstr(buf, "test_", len)) {
-   /*
-* KCSAN report and related to the test.
-*
-* The provided @buf is not NUL-terminated; copy no more than
-* @len 

[tip: locking/core] kcsan: Switch to KUNIT_CASE_PARAM for parameterized tests

2021-04-11 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: f6a149140321274cbd955dee50798fe191841f94
Gitweb:
https://git.kernel.org/tip/f6a149140321274cbd955dee50798fe191841f94
Author:Marco Elver 
AuthorDate:Wed, 13 Jan 2021 17:05:57 +01:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 08 Mar 2021 14:27:43 -08:00

kcsan: Switch to KUNIT_CASE_PARAM for parameterized tests

Since KUnit now support parameterized tests via KUNIT_CASE_PARAM, update
KCSAN's test to switch to it for parameterized tests. This simplifies
parameterized tests and gets rid of the "parameters in case name"
workaround (hack).

At the same time, we can increase the maximum number of threads used,
because on systems with too few CPUs, KUnit allows us to now stop at the
maximum useful threads and not unnecessarily execute redundant test
cases with (the same) limited threads as had been the case before.

Reviewed-by: David Gow 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/kcsan_test.c | 116 +
 1 file changed, 54 insertions(+), 62 deletions(-)

diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c
index f16f632..b71751f 100644
--- a/kernel/kcsan/kcsan_test.c
+++ b/kernel/kcsan/kcsan_test.c
@@ -13,6 +13,8 @@
  * Author: Marco Elver 
  */
 
+#define pr_fmt(fmt) "kcsan_test: " fmt
+
 #include 
 #include 
 #include 
@@ -951,22 +953,53 @@ static void test_atomic_builtins(struct kunit *test)
 }
 
 /*
- * Each test case is run with different numbers of threads. Until KUnit 
supports
- * passing arguments for each test case, we encode #threads in the test case
- * name (read by get_num_threads()). [The '-' was chosen as a stylistic
- * preference to separate test name and #threads.]
+ * Generate thread counts for all test cases. Values generated are in interval
+ * [2, 5] followed by exponentially increasing thread counts from 8 to 32.
  *
  * The thread counts are chosen to cover potentially interesting boundaries and
- * corner cases (range 2-5), and then stress the system with larger counts.
+ * corner cases (2 to 5), and then stress the system with larger counts.
  */
-#define KCSAN_KUNIT_CASE(test_name)
\
-   { .run_case = test_name, .name = #test_name "-02" },   \
-   { .run_case = test_name, .name = #test_name "-03" },   \
-   { .run_case = test_name, .name = #test_name "-04" },   \
-   { .run_case = test_name, .name = #test_name "-05" },   \
-   { .run_case = test_name, .name = #test_name "-08" },   \
-   { .run_case = test_name, .name = #test_name "-16" }
+static const void *nthreads_gen_params(const void *prev, char *desc)
+{
+   long nthreads = (long)prev;
+
+   if (nthreads < 0 || nthreads >= 32)
+   nthreads = 0; /* stop */
+   else if (!nthreads)
+   nthreads = 2; /* initial value */
+   else if (nthreads < 5)
+   nthreads++;
+   else if (nthreads == 5)
+   nthreads = 8;
+   else
+   nthreads *= 2;
 
+   if (!IS_ENABLED(CONFIG_PREEMPT) || 
!IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER)) {
+   /*
+* Without any preemption, keep 2 CPUs free for other tasks, one
+* of which is the main test case function checking for
+* completion or failure.
+*/
+   const long min_unused_cpus = IS_ENABLED(CONFIG_PREEMPT_NONE) ? 
2 : 0;
+   const long min_required_cpus = 2 + min_unused_cpus;
+
+   if (num_online_cpus() < min_required_cpus) {
+   pr_err_once("Too few online CPUs (%u < %d) for test\n",
+   num_online_cpus(), min_required_cpus);
+   nthreads = 0;
+   } else if (nthreads >= num_online_cpus() - min_unused_cpus) {
+   /* Use negative value to indicate last param. */
+   nthreads = -(num_online_cpus() - min_unused_cpus);
+   pr_warn_once("Limiting number of threads to %ld (only 
%d online CPUs)\n",
+-nthreads, num_online_cpus());
+   }
+   }
+
+   snprintf(desc, KUNIT_PARAM_DESC_SIZE, "threads=%ld", abs(nthreads));
+   return (void *)nthreads;
+}
+
+#define KCSAN_KUNIT_CASE(test_name) KUNIT_CASE_PARAM(test_name, 
nthreads_gen_params)
 static struct kunit_case kcsan_test_cases[] = {
KCSAN_KUNIT_CASE(test_basic),
KCSAN_KUNIT_CASE(test_concurrent_races),
@@ -996,24 +1029,6 @@ static struct kunit_case kcsan_test_cases[] = {
 
 /* = End test cases = */
 
-/* Get number of threads encoded in test name. */
-static bool __no_kcsan
-get_num_threads(const char *test, int *nthreads)
-{
-   int len = strlen(test);
-
-   if (WARN_ON(len < 3))
-   

[tip: locking/core] kcsan: Add missing license and copyright headers

2021-04-11 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: bd0ccc4afca2d6ae0029cae35c4f1d2e2ade7579
Gitweb:
https://git.kernel.org/tip/bd0ccc4afca2d6ae0029cae35c4f1d2e2ade7579
Author:Marco Elver 
AuthorDate:Fri, 15 Jan 2021 18:09:53 +01:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 08 Mar 2021 14:27:43 -08:00

kcsan: Add missing license and copyright headers

Adds missing license and/or copyright headers for KCSAN source files.

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 Documentation/dev-tools/kcsan.rst | 3 +++
 include/linux/kcsan-checks.h  | 6 ++
 include/linux/kcsan.h | 7 +++
 kernel/kcsan/atomic.h | 5 +
 kernel/kcsan/core.c   | 5 +
 kernel/kcsan/debugfs.c| 5 +
 kernel/kcsan/encoding.h   | 5 +
 kernel/kcsan/kcsan.h  | 3 ++-
 kernel/kcsan/report.c | 5 +
 kernel/kcsan/selftest.c   | 5 +
 10 files changed, 48 insertions(+), 1 deletion(-)

diff --git a/Documentation/dev-tools/kcsan.rst 
b/Documentation/dev-tools/kcsan.rst
index be7a0b0..d85ce23 100644
--- a/Documentation/dev-tools/kcsan.rst
+++ b/Documentation/dev-tools/kcsan.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. Copyright (C) 2019, Google LLC.
+
 The Kernel Concurrency Sanitizer (KCSAN)
 
 
diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index cf14840..9fd0ad8 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -1,4 +1,10 @@
 /* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KCSAN access checks and modifiers. These can be used to explicitly check
+ * uninstrumented accesses, or change KCSAN checking behaviour of accesses.
+ *
+ * Copyright (C) 2019, Google LLC.
+ */
 
 #ifndef _LINUX_KCSAN_CHECKS_H
 #define _LINUX_KCSAN_CHECKS_H
diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h
index 53340d8..fc266ec 100644
--- a/include/linux/kcsan.h
+++ b/include/linux/kcsan.h
@@ -1,4 +1,11 @@
 /* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The Kernel Concurrency Sanitizer (KCSAN) infrastructure. Public interface 
and
+ * data structures to set up runtime. See kcsan-checks.h for explicit checks 
and
+ * modifiers. For more info please see Documentation/dev-tools/kcsan.rst.
+ *
+ * Copyright (C) 2019, Google LLC.
+ */
 
 #ifndef _LINUX_KCSAN_H
 #define _LINUX_KCSAN_H
diff --git a/kernel/kcsan/atomic.h b/kernel/kcsan/atomic.h
index 75fe701..530ae1b 100644
--- a/kernel/kcsan/atomic.h
+++ b/kernel/kcsan/atomic.h
@@ -1,4 +1,9 @@
 /* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Rules for implicitly atomic memory accesses.
+ *
+ * Copyright (C) 2019, Google LLC.
+ */
 
 #ifndef _KERNEL_KCSAN_ATOMIC_H
 #define _KERNEL_KCSAN_ATOMIC_H
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 23e7acb..45c821d 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -1,4 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0
+/*
+ * KCSAN core runtime.
+ *
+ * Copyright (C) 2019, Google LLC.
+ */
 
 #define pr_fmt(fmt) "kcsan: " fmt
 
diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index 209ad8d..c1dd02f 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -1,4 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0
+/*
+ * KCSAN debugfs interface.
+ *
+ * Copyright (C) 2019, Google LLC.
+ */
 
 #define pr_fmt(fmt) "kcsan: " fmt
 
diff --git a/kernel/kcsan/encoding.h b/kernel/kcsan/encoding.h
index 7ee4055..170a2bb 100644
--- a/kernel/kcsan/encoding.h
+++ b/kernel/kcsan/encoding.h
@@ -1,4 +1,9 @@
 /* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KCSAN watchpoint encoding.
+ *
+ * Copyright (C) 2019, Google LLC.
+ */
 
 #ifndef _KERNEL_KCSAN_ENCODING_H
 #define _KERNEL_KCSAN_ENCODING_H
diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
index 87ccdb3..9881099 100644
--- a/kernel/kcsan/kcsan.h
+++ b/kernel/kcsan/kcsan.h
@@ -1,8 +1,9 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-
 /*
  * The Kernel Concurrency Sanitizer (KCSAN) infrastructure. For more info 
please
  * see Documentation/dev-tools/kcsan.rst.
+ *
+ * Copyright (C) 2019, Google LLC.
  */
 
 #ifndef _KERNEL_KCSAN_KCSAN_H
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index d3bf87e..13dce3c 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -1,4 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0
+/*
+ * KCSAN reporting.
+ *
+ * Copyright (C) 2019, Google LLC.
+ */
 
 #include 
 #include 
diff --git a/kernel/kcsan/selftest.c b/kernel/kcsan/selftest.c
index 9014a3a..7f29cb0 100644
--- a/kernel/kcsan/selftest.c
+++ b/kernel/kcsan/selftest.c
@@ -1,4 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0
+/*
+ * KCSAN short boot-time selftests.
+ *
+ * Copyright (C) 2019, Google LLC.
+ */
 
 #define pr_fmt(fmt) "kcsan: " fmt
 


[tip: locking/core] random32: Re-enable KCSAN instrumentation

2021-02-15 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: 567a83e6872c15b2080d1d03de71868cd0ae7cea
Gitweb:
https://git.kernel.org/tip/567a83e6872c15b2080d1d03de71868cd0ae7cea
Author:Marco Elver 
AuthorDate:Tue, 24 Nov 2020 12:02:10 +01:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 04 Jan 2021 14:39:07 -08:00

random32: Re-enable KCSAN instrumentation

Re-enable KCSAN instrumentation, now that KCSAN no longer relies on code
in lib/random32.c.

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 lib/Makefile | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/lib/Makefile b/lib/Makefile
index afeff05..dc09208 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -27,9 +27,6 @@ KASAN_SANITIZE_string.o := n
 CFLAGS_string.o += -fno-stack-protector
 endif
 
-# Used by KCSAN while enabled, avoid recursion.
-KCSAN_SANITIZE_random32.o := n
-
 lib-y := ctype.o string.o vsprintf.o cmdline.o \
 rbtree.o radix-tree.o timerqueue.o xarray.o \
 idr.o extable.o sha1.o irq_regs.o argv_split.o \


[tip: locking/core] kcsan: Rewrite kcsan_prandom_u32_max() without prandom_u32_state()

2021-02-15 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: 71a076f4a61a6c779794ad286f356b39725edc3b
Gitweb:
https://git.kernel.org/tip/71a076f4a61a6c779794ad286f356b39725edc3b
Author:Marco Elver 
AuthorDate:Tue, 24 Nov 2020 12:02:09 +01:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 04 Jan 2021 14:39:07 -08:00

kcsan: Rewrite kcsan_prandom_u32_max() without prandom_u32_state()

Rewrite kcsan_prandom_u32_max() to not depend on code that might be
instrumented, removing any dependency on lib/random32.c. The rewrite
implements a simple linear congruential generator, that is sufficient
for our purposes (for udelay() and skip_watch counter randomness).

The initial motivation for this was to allow enabling KCSAN for
kernel/sched (remove KCSAN_SANITIZE := n from kernel/sched/Makefile),
with CONFIG_DEBUG_PREEMPT=y. Without this change, we could observe
recursion:

check_access() [via instrumentation]
  kcsan_setup_watchpoint()
reset_kcsan_skip()
  kcsan_prandom_u32_max()
get_cpu_var()
  preempt_disable()
preempt_count_add() [in kernel/sched/core.c]
  check_access() [via instrumentation]

Note, while this currently does not affect an unmodified kernel, it'd be
good to keep a KCSAN kernel working when KCSAN_SANITIZE := n is removed
from kernel/sched/Makefile to permit testing scheduler code with KCSAN
if desired.

Fixes: cd290ec24633 ("kcsan: Use tracing-safe version of prandom")
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/core.c | 26 +-
 1 file changed, 13 insertions(+), 13 deletions(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 3994a21..3bf98db 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -12,7 +12,6 @@
 #include 
 #include 
 #include 
-#include 
 #include 
 #include 
 
@@ -101,7 +100,7 @@ static atomic_long_t 
watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
 static DEFINE_PER_CPU(long, kcsan_skip);
 
 /* For kcsan_prandom_u32_max(). */
-static DEFINE_PER_CPU(struct rnd_state, kcsan_rand_state);
+static DEFINE_PER_CPU(u32, kcsan_rand_state);
 
 static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
  size_t size,
@@ -275,20 +274,17 @@ should_watch(const volatile void *ptr, size_t size, int 
type, struct kcsan_ctx *
 }
 
 /*
- * Returns a pseudo-random number in interval [0, ep_ro). See prandom_u32_max()
- * for more details.
- *
- * The open-coded version here is using only safe primitives for all contexts
- * where we can have KCSAN instrumentation. In particular, we cannot use
- * prandom_u32() directly, as its tracepoint could cause recursion.
+ * Returns a pseudo-random number in interval [0, ep_ro). Simple linear
+ * congruential generator, using constants from "Numerical Recipes".
  */
 static u32 kcsan_prandom_u32_max(u32 ep_ro)
 {
-   struct rnd_state *state = _cpu_var(kcsan_rand_state);
-   const u32 res = prandom_u32_state(state);
+   u32 state = this_cpu_read(kcsan_rand_state);
+
+   state = 1664525 * state + 1013904223;
+   this_cpu_write(kcsan_rand_state, state);
 
-   put_cpu_var(kcsan_rand_state);
-   return (u32)(((u64) res * ep_ro) >> 32);
+   return state % ep_ro;
 }
 
 static inline void reset_kcsan_skip(void)
@@ -639,10 +635,14 @@ static __always_inline void check_access(const volatile 
void *ptr, size_t size,
 
 void __init kcsan_init(void)
 {
+   int cpu;
+
BUG_ON(!in_task());
 
kcsan_debugfs_init();
-   prandom_seed_full_state(_rand_state);
+
+   for_each_possible_cpu(cpu)
+   per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
 
/*
 * We are in the init task, and no other tasks should be running;


[tip: core/rcu] kcsan: Never set up watchpoints on NULL pointers

2020-12-13 Thread tip-bot2 for Marco Elver
The following commit has been merged into the core/rcu branch of tip:

Commit-ID: 55a2346c7ac4bbf6ee6972394237bf31e29a1c05
Gitweb:
https://git.kernel.org/tip/55a2346c7ac4bbf6ee6972394237bf31e29a1c05
Author:Marco Elver 
AuthorDate:Thu, 22 Oct 2020 13:45:53 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 02 Nov 2020 17:08:51 -08:00

kcsan: Never set up watchpoints on NULL pointers

Avoid setting up watchpoints on NULL pointers, as otherwise we would
crash inside the KCSAN runtime (when checking for value changes) instead
of the instrumented code.

Because that may be confusing, skip any address less than PAGE_SIZE.

Reviewed-by: Dmitry Vyukov 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/encoding.h | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/kernel/kcsan/encoding.h b/kernel/kcsan/encoding.h
index 1a6db2f..4f73db6 100644
--- a/kernel/kcsan/encoding.h
+++ b/kernel/kcsan/encoding.h
@@ -48,7 +48,11 @@
 
 static inline bool check_encodable(unsigned long addr, size_t size)
 {
-   return size <= MAX_ENCODABLE_SIZE;
+   /*
+* While we can encode addrs= PAGE_SIZE && size <= MAX_ENCODABLE_SIZE;
 }
 
 static inline long


[tip: core/rcu] kcsan: Fix encoding masks and regain address bit

2020-12-13 Thread tip-bot2 for Marco Elver
The following commit has been merged into the core/rcu branch of tip:

Commit-ID: 1d094cefc37e5ed4dec44a41841c8628f6b548a2
Gitweb:
https://git.kernel.org/tip/1d094cefc37e5ed4dec44a41841c8628f6b548a2
Author:Marco Elver 
AuthorDate:Fri, 06 Nov 2020 10:34:56 +01:00
Committer: Paul E. McKenney 
CommitterDate: Fri, 06 Nov 2020 17:19:26 -08:00

kcsan: Fix encoding masks and regain address bit

The watchpoint encoding masks for size and address were off-by-one bit
each, with the size mask using 1 unnecessary bit and the address mask
missing 1 bit. However, due to the way the size is shifted into the
encoded watchpoint, we were effectively wasting and never using the
extra bit.

For example, on x86 with PAGE_SIZE==4K, we have 1 bit for the is-write
bit, 14 bits for the size bits, and then 49 bits left for the address.
Prior to this fix we would end up with this usage:

[ write<1> | size<14> | wasted<1> | address<48> ]

Fix it by subtracting 1 bit from the GENMASK() end and start ranges of
size and address respectively. The added static_assert()s verify that
the masks are as expected. With the fixed version, we get the expected
usage:

[ write<1> | size<14> | address<49> ]

Functionally no change is expected, since that extra address bit is
insignificant for enabled architectures.

Acked-by: Boqun Feng 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/encoding.h | 14 ++
 1 file changed, 6 insertions(+), 8 deletions(-)

diff --git a/kernel/kcsan/encoding.h b/kernel/kcsan/encoding.h
index 4f73db6..7ee4055 100644
--- a/kernel/kcsan/encoding.h
+++ b/kernel/kcsan/encoding.h
@@ -37,14 +37,12 @@
  */
 #define WATCHPOINT_ADDR_BITS (BITS_PER_LONG-1 - WATCHPOINT_SIZE_BITS)
 
-/*
- * Masks to set/retrieve the encoded data.
- */
-#define WATCHPOINT_WRITE_MASK BIT(BITS_PER_LONG-1)
-#define WATCHPOINT_SIZE_MASK   
\
-   GENMASK(BITS_PER_LONG-2, BITS_PER_LONG-2 - WATCHPOINT_SIZE_BITS)
-#define WATCHPOINT_ADDR_MASK   
\
-   GENMASK(BITS_PER_LONG-3 - WATCHPOINT_SIZE_BITS, 0)
+/* Bitmasks for the encoded watchpoint access information. */
+#define WATCHPOINT_WRITE_MASK  BIT(BITS_PER_LONG-1)
+#define WATCHPOINT_SIZE_MASK   GENMASK(BITS_PER_LONG-2, WATCHPOINT_ADDR_BITS)
+#define WATCHPOINT_ADDR_MASK   GENMASK(WATCHPOINT_ADDR_BITS-1, 0)
+static_assert(WATCHPOINT_ADDR_MASK == (1UL << WATCHPOINT_ADDR_BITS) - 1);
+static_assert((WATCHPOINT_WRITE_MASK ^ WATCHPOINT_SIZE_MASK ^ 
WATCHPOINT_ADDR_MASK) == ~0UL);
 
 static inline bool check_encodable(unsigned long addr, size_t size)
 {


[tip: core/rcu] kcsan: selftest: Ensure that address is at least PAGE_SIZE

2020-12-13 Thread tip-bot2 for Marco Elver
The following commit has been merged into the core/rcu branch of tip:

Commit-ID: 4761612ffe3c1655e58f1ef9cf867c6f67d46fe2
Gitweb:
https://git.kernel.org/tip/4761612ffe3c1655e58f1ef9cf867c6f67d46fe2
Author:Marco Elver 
AuthorDate:Thu, 22 Oct 2020 13:45:52 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 02 Nov 2020 17:08:50 -08:00

kcsan: selftest: Ensure that address is at least PAGE_SIZE

In preparation of supporting only addresses not within the NULL page,
change the selftest to never use addresses that are less than PAGE_SIZE.

Reviewed-by: Dmitry Vyukov 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/selftest.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/kernel/kcsan/selftest.c b/kernel/kcsan/selftest.c
index d98bc20..9014a3a 100644
--- a/kernel/kcsan/selftest.c
+++ b/kernel/kcsan/selftest.c
@@ -33,6 +33,9 @@ static bool test_encode_decode(void)
unsigned long addr;
 
prandom_bytes(, sizeof(addr));
+   if (addr < PAGE_SIZE)
+   addr = PAGE_SIZE;
+
if (WARN_ON(!check_encodable(addr, size)))
return false;
 


[tip: locking/core] bitops, kcsan: Partially revert instrumentation for non-atomic bitops

2020-10-09 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: 068df05363b79f54241bd6bd612055b8c16c5964
Gitweb:
https://git.kernel.org/tip/068df05363b79f54241bd6bd612055b8c16c5964
Author:Marco Elver 
AuthorDate:Thu, 13 Aug 2020 18:38:59 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 24 Aug 2020 15:10:24 -07:00

bitops, kcsan: Partially revert instrumentation for non-atomic bitops

Previous to the change to distinguish read-write accesses, when
CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=y is set, KCSAN would consider
the non-atomic bitops as atomic. We want to partially revert to this
behaviour, but with one important distinction: report racing
modifications, since lost bits due to non-atomicity are certainly
possible.

Given the operations here only modify a single bit, assuming
non-atomicity of the writer is sufficient may be reasonable for certain
usage (and follows the permissible nature of the "assume plain writes
atomic" rule). In other words:

1. We want non-atomic read-modify-write races to be reported;
   this is accomplished by kcsan_check_read(), where any
   concurrent write (atomic or not) will generate a report.

2. We do not want to report races with marked readers, but -do-
   want to report races with unmarked readers; this is
   accomplished by the instrument_write() ("assume atomic
   write" with Kconfig option set).

With the above rules, when KCSAN_ASSUME_PLAIN_WRITES_ATOMIC is selected,
it is hoped that KCSAN's reporting behaviour is better aligned with
current expected permissible usage for non-atomic bitops.

Note that, a side-effect of not telling KCSAN that the accesses are
read-writes, is that this information is not displayed in the access
summary in the report. It is, however, visible in inline-expanded stack
traces. For now, it does not make sense to introduce yet another special
case to KCSAN's runtime, only to cater to the case here.

Cc: Dmitry Vyukov 
Cc: Paul E. McKenney 
Cc: Will Deacon 
Cc: Arnd Bergmann 
Cc: Daniel Axtens 
Cc: Michael Ellerman 
Cc: 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 include/asm-generic/bitops/instrumented-non-atomic.h | 30 +--
 1 file changed, 27 insertions(+), 3 deletions(-)

diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h 
b/include/asm-generic/bitops/instrumented-non-atomic.h
index f86234c..37363d5 100644
--- a/include/asm-generic/bitops/instrumented-non-atomic.h
+++ b/include/asm-generic/bitops/instrumented-non-atomic.h
@@ -58,6 +58,30 @@ static inline void __change_bit(long nr, volatile unsigned 
long *addr)
arch___change_bit(nr, addr);
 }
 
+static inline void __instrument_read_write_bitop(long nr, volatile unsigned 
long *addr)
+{
+   if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC)) {
+   /*
+* We treat non-atomic read-write bitops a little more special.
+* Given the operations here only modify a single bit, assuming
+* non-atomicity of the writer is sufficient may be reasonable
+* for certain usage (and follows the permissible nature of the
+* assume-plain-writes-atomic rule):
+* 1. report read-modify-write races -> check read;
+* 2. do not report races with marked readers, but do report
+*races with unmarked readers -> check "atomic" write.
+*/
+   kcsan_check_read(addr + BIT_WORD(nr), sizeof(long));
+   /*
+* Use generic write instrumentation, in case other sanitizers
+* or tools are enabled alongside KCSAN.
+*/
+   instrument_write(addr + BIT_WORD(nr), sizeof(long));
+   } else {
+   instrument_read_write(addr + BIT_WORD(nr), sizeof(long));
+   }
+}
+
 /**
  * __test_and_set_bit - Set a bit and return its old value
  * @nr: Bit to set
@@ -68,7 +92,7 @@ static inline void __change_bit(long nr, volatile unsigned 
long *addr)
  */
 static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
 {
-   instrument_read_write(addr + BIT_WORD(nr), sizeof(long));
+   __instrument_read_write_bitop(nr, addr);
return arch___test_and_set_bit(nr, addr);
 }
 
@@ -82,7 +106,7 @@ static inline bool __test_and_set_bit(long nr, volatile 
unsigned long *addr)
  */
 static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
 {
-   instrument_read_write(addr + BIT_WORD(nr), sizeof(long));
+   __instrument_read_write_bitop(nr, addr);
return arch___test_and_clear_bit(nr, addr);
 }
 
@@ -96,7 +120,7 @@ static inline bool __test_and_clear_bit(long nr, volatile 
unsigned long *addr)
  */
 static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
 {
-   instrument_read_write(addr + BIT_WORD(nr), sizeof(long));
+   

[tip: locking/core] kcsan: Use tracing-safe version of prandom

2020-10-09 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: cd290ec24633f51029dab0d25505fae7da0e1eda
Gitweb:
https://git.kernel.org/tip/cd290ec24633f51029dab0d25505fae7da0e1eda
Author:Marco Elver 
AuthorDate:Fri, 21 Aug 2020 14:31:26 +02:00
Committer: Paul E. McKenney 
CommitterDate: Sun, 30 Aug 2020 21:50:13 -07:00

kcsan: Use tracing-safe version of prandom

In the core runtime, we must minimize any calls to external library
functions to avoid any kind of recursion. This can happen even though
instrumentation is disabled for called functions, but tracing is
enabled.

Most recently, prandom_u32() added a tracepoint, which can cause
problems for KCSAN even if the rcuidle variant is used. For example:
kcsan -> prandom_u32() -> trace_prandom_u32_rcuidle ->
srcu_read_lock_notrace -> __srcu_read_lock -> kcsan ...

While we could disable KCSAN in kcsan_setup_watchpoint(), this does not
solve other unexpected behaviour we may get due recursing into functions
that may not be tolerant to such recursion:
__srcu_read_lock -> kcsan -> ... -> __srcu_read_lock

Therefore, switch to using prandom_u32_state(), which is uninstrumented,
and does not have a tracepoint.

Link: https://lkml.kernel.org/r/20200821063043.1949509-1-el...@google.com
Link: https://lkml.kernel.org/r/20200820172046.ga177...@elver.google.com
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/core.c | 35 +--
 1 file changed, 29 insertions(+), 6 deletions(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 8a1ff60..3994a21 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -100,6 +100,9 @@ static atomic_long_t 
watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
  */
 static DEFINE_PER_CPU(long, kcsan_skip);
 
+/* For kcsan_prandom_u32_max(). */
+static DEFINE_PER_CPU(struct rnd_state, kcsan_rand_state);
+
 static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
  size_t size,
  bool expect_write,
@@ -271,11 +274,28 @@ should_watch(const volatile void *ptr, size_t size, int 
type, struct kcsan_ctx *
return true;
 }
 
+/*
+ * Returns a pseudo-random number in interval [0, ep_ro). See prandom_u32_max()
+ * for more details.
+ *
+ * The open-coded version here is using only safe primitives for all contexts
+ * where we can have KCSAN instrumentation. In particular, we cannot use
+ * prandom_u32() directly, as its tracepoint could cause recursion.
+ */
+static u32 kcsan_prandom_u32_max(u32 ep_ro)
+{
+   struct rnd_state *state = _cpu_var(kcsan_rand_state);
+   const u32 res = prandom_u32_state(state);
+
+   put_cpu_var(kcsan_rand_state);
+   return (u32)(((u64) res * ep_ro) >> 32);
+}
+
 static inline void reset_kcsan_skip(void)
 {
long skip_count = kcsan_skip_watch -
  (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
-  prandom_u32_max(kcsan_skip_watch) :
+  kcsan_prandom_u32_max(kcsan_skip_watch) :
   0);
this_cpu_write(kcsan_skip, skip_count);
 }
@@ -285,16 +305,18 @@ static __always_inline bool kcsan_is_enabled(void)
return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
 }
 
-static inline unsigned int get_delay(int type)
+/* Introduce delay depending on context and configuration. */
+static void delay_access(int type)
 {
unsigned int delay = in_task() ? kcsan_udelay_task : 
kcsan_udelay_interrupt;
/* For certain access types, skew the random delay to be longer. */
unsigned int skew_delay_order =
(type & (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_ASSERT)) ? 1 : 0;
 
-   return delay - (IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
-   prandom_u32_max(delay >> skew_delay_order) :
-   0);
+   delay -= IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
+  kcsan_prandom_u32_max(delay >> skew_delay_order) 
:
+  0;
+   udelay(delay);
 }
 
 void kcsan_save_irqtrace(struct task_struct *task)
@@ -476,7 +498,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t 
size, int type)
 * Delay this thread, to increase probability of observing a racy
 * conflicting access.
 */
-   udelay(get_delay(type));
+   delay_access(type);
 
/*
 * Re-read value, and check if it is as expected; if not, we infer a
@@ -620,6 +642,7 @@ void __init kcsan_init(void)
BUG_ON(!in_task());
 
kcsan_debugfs_init();
+   prandom_seed_full_state(_rand_state);
 
/*
 * We are in the init task, and no other tasks should be running;


[tip: locking/core] instrumented.h: Introduce read-write instrumentation hooks

2020-10-09 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: 00047c2e6d7c576c1a847f7db07ef0fc58085f22
Gitweb:
https://git.kernel.org/tip/00047c2e6d7c576c1a847f7db07ef0fc58085f22
Author:Marco Elver 
AuthorDate:Fri, 24 Jul 2020 09:00:06 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 24 Aug 2020 15:09:58 -07:00

instrumented.h: Introduce read-write instrumentation hooks

Introduce read-write instrumentation hooks, to more precisely denote an
operation's behaviour.

KCSAN is able to distinguish compound instrumentation, and with the new
instrumentation we then benefit from improved reporting. More
importantly, read-write compound operations should not implicitly be
treated as atomic, if they aren't actually atomic.

Acked-by: Peter Zijlstra (Intel) 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 include/linux/instrumented.h | 30 ++
 1 file changed, 30 insertions(+)

diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h
index 43e6ea5..42faebb 100644
--- a/include/linux/instrumented.h
+++ b/include/linux/instrumented.h
@@ -43,6 +43,21 @@ static __always_inline void instrument_write(const volatile 
void *v, size_t size
 }
 
 /**
+ * instrument_read_write - instrument regular read-write access
+ *
+ * Instrument a regular write access. The instrumentation should be inserted
+ * before the actual write happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_read_write(const volatile void *v, 
size_t size)
+{
+   kasan_check_write(v, size);
+   kcsan_check_read_write(v, size);
+}
+
+/**
  * instrument_atomic_read - instrument atomic read access
  *
  * Instrument an atomic read access. The instrumentation should be inserted
@@ -73,6 +88,21 @@ static __always_inline void instrument_atomic_write(const 
volatile void *v, size
 }
 
 /**
+ * instrument_atomic_read_write - instrument atomic read-write access
+ *
+ * Instrument an atomic read-write access. The instrumentation should be
+ * inserted before the actual write happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_atomic_read_write(const volatile void 
*v, size_t size)
+{
+   kasan_check_write(v, size);
+   kcsan_check_atomic_read_write(v, size);
+}
+
+/**
  * instrument_copy_to_user - instrument reads of copy_to_user
  *
  * Instrument reads from kernel memory, that are due to copy_to_user (and


[tip: locking/core] asm-generic/bitops: Use instrument_read_write() where appropriate

2020-10-09 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: b159eeccb75a7916278d95e2ff5540e670682748
Gitweb:
https://git.kernel.org/tip/b159eeccb75a7916278d95e2ff5540e670682748
Author:Marco Elver 
AuthorDate:Fri, 24 Jul 2020 09:00:07 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 24 Aug 2020 15:09:59 -07:00

asm-generic/bitops: Use instrument_read_write() where appropriate

Use the new instrument_read_write() where appropriate.

Acked-by: Peter Zijlstra (Intel) 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 include/asm-generic/bitops/instrumented-atomic.h | 6 +++---
 include/asm-generic/bitops/instrumented-lock.h   | 2 +-
 include/asm-generic/bitops/instrumented-non-atomic.h | 6 +++---
 3 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/include/asm-generic/bitops/instrumented-atomic.h 
b/include/asm-generic/bitops/instrumented-atomic.h
index fb2cb33..81915dc 100644
--- a/include/asm-generic/bitops/instrumented-atomic.h
+++ b/include/asm-generic/bitops/instrumented-atomic.h
@@ -67,7 +67,7 @@ static inline void change_bit(long nr, volatile unsigned long 
*addr)
  */
 static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
 {
-   instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+   instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit(nr, addr);
 }
 
@@ -80,7 +80,7 @@ static inline bool test_and_set_bit(long nr, volatile 
unsigned long *addr)
  */
 static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
 {
-   instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+   instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_clear_bit(nr, addr);
 }
 
@@ -93,7 +93,7 @@ static inline bool test_and_clear_bit(long nr, volatile 
unsigned long *addr)
  */
 static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
 {
-   instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+   instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_change_bit(nr, addr);
 }
 
diff --git a/include/asm-generic/bitops/instrumented-lock.h 
b/include/asm-generic/bitops/instrumented-lock.h
index b9bec46..75ef606 100644
--- a/include/asm-generic/bitops/instrumented-lock.h
+++ b/include/asm-generic/bitops/instrumented-lock.h
@@ -52,7 +52,7 @@ static inline void __clear_bit_unlock(long nr, volatile 
unsigned long *addr)
  */
 static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
 {
-   instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+   instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit_lock(nr, addr);
 }
 
diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h 
b/include/asm-generic/bitops/instrumented-non-atomic.h
index 20f788a..f86234c 100644
--- a/include/asm-generic/bitops/instrumented-non-atomic.h
+++ b/include/asm-generic/bitops/instrumented-non-atomic.h
@@ -68,7 +68,7 @@ static inline void __change_bit(long nr, volatile unsigned 
long *addr)
  */
 static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
 {
-   instrument_write(addr + BIT_WORD(nr), sizeof(long));
+   instrument_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_set_bit(nr, addr);
 }
 
@@ -82,7 +82,7 @@ static inline bool __test_and_set_bit(long nr, volatile 
unsigned long *addr)
  */
 static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
 {
-   instrument_write(addr + BIT_WORD(nr), sizeof(long));
+   instrument_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_clear_bit(nr, addr);
 }
 
@@ -96,7 +96,7 @@ static inline bool __test_and_clear_bit(long nr, volatile 
unsigned long *addr)
  */
 static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
 {
-   instrument_write(addr + BIT_WORD(nr), sizeof(long));
+   instrument_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_change_bit(nr, addr);
 }
 


[tip: locking/core] kcsan: Simplify debugfs counter to name mapping

2020-10-09 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: 69b2c81bc894606670204f0ae08f406dbcce836d
Gitweb:
https://git.kernel.org/tip/69b2c81bc894606670204f0ae08f406dbcce836d
Author:Marco Elver 
AuthorDate:Fri, 31 Jul 2020 10:17:19 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 24 Aug 2020 15:10:21 -07:00

kcsan: Simplify debugfs counter to name mapping

Simplify counter ID to name mapping by using an array with designated
inits. This way, we can turn a run-time BUG() into a compile-time static
assertion failure if a counter name is missing.

No functional change intended.

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/debugfs.c | 33 +
 1 file changed, 13 insertions(+), 20 deletions(-)

diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index 023e49c..3a9566a 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -19,6 +19,18 @@
  * Statistics counters.
  */
 static atomic_long_t counters[KCSAN_COUNTER_COUNT];
+static const char *const counter_names[] = {
+   [KCSAN_COUNTER_USED_WATCHPOINTS]= "used_watchpoints",
+   [KCSAN_COUNTER_SETUP_WATCHPOINTS]   = "setup_watchpoints",
+   [KCSAN_COUNTER_DATA_RACES]  = "data_races",
+   [KCSAN_COUNTER_ASSERT_FAILURES] = "assert_failures",
+   [KCSAN_COUNTER_NO_CAPACITY] = "no_capacity",
+   [KCSAN_COUNTER_REPORT_RACES]= "report_races",
+   [KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]= 
"races_unknown_origin",
+   [KCSAN_COUNTER_UNENCODABLE_ACCESSES]= 
"unencodable_accesses",
+   [KCSAN_COUNTER_ENCODING_FALSE_POSITIVES]= 
"encoding_false_positives",
+};
+static_assert(ARRAY_SIZE(counter_names) == KCSAN_COUNTER_COUNT);
 
 /*
  * Addresses for filtering functions from reporting. This list can be used as a
@@ -39,24 +51,6 @@ static struct {
 };
 static DEFINE_SPINLOCK(report_filterlist_lock);
 
-static const char *counter_to_name(enum kcsan_counter_id id)
-{
-   switch (id) {
-   case KCSAN_COUNTER_USED_WATCHPOINTS:return 
"used_watchpoints";
-   case KCSAN_COUNTER_SETUP_WATCHPOINTS:   return 
"setup_watchpoints";
-   case KCSAN_COUNTER_DATA_RACES:  return "data_races";
-   case KCSAN_COUNTER_ASSERT_FAILURES: return 
"assert_failures";
-   case KCSAN_COUNTER_NO_CAPACITY: return "no_capacity";
-   case KCSAN_COUNTER_REPORT_RACES:return "report_races";
-   case KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN:return 
"races_unknown_origin";
-   case KCSAN_COUNTER_UNENCODABLE_ACCESSES:return 
"unencodable_accesses";
-   case KCSAN_COUNTER_ENCODING_FALSE_POSITIVES:return 
"encoding_false_positives";
-   case KCSAN_COUNTER_COUNT:
-   BUG();
-   }
-   return NULL;
-}
-
 void kcsan_counter_inc(enum kcsan_counter_id id)
 {
atomic_long_inc([id]);
@@ -271,8 +265,7 @@ static int show_info(struct seq_file *file, void *v)
/* show stats */
seq_printf(file, "enabled: %i\n", READ_ONCE(kcsan_enabled));
for (i = 0; i < KCSAN_COUNTER_COUNT; ++i)
-   seq_printf(file, "%s: %ld\n", counter_to_name(i),
-  atomic_long_read([i]));
+   seq_printf(file, "%s: %ld\n", counter_names[i], 
atomic_long_read([i]));
 
/* show filter functions, and filter type */
spin_lock_irqsave(_filterlist_lock, flags);


[tip: locking/core] kcsan: Show message if enabled early

2020-10-09 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: 2778793072c31e3eb33842f3bd7da82dfc7efc6b
Gitweb:
https://git.kernel.org/tip/2778793072c31e3eb33842f3bd7da82dfc7efc6b
Author:Marco Elver 
AuthorDate:Fri, 31 Jul 2020 10:17:22 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 24 Aug 2020 15:10:22 -07:00

kcsan: Show message if enabled early

Show a message in the kernel log if KCSAN was enabled early.

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/core.c | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 99e5044..b176400 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -1,5 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 
+#define pr_fmt(fmt) "kcsan: " fmt
+
 #include 
 #include 
 #include 
@@ -463,7 +465,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t 
size, int type)
 
if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) {
kcsan_disable_current();
-   pr_err("KCSAN: watching %s, size: %zu, addr: %px [slot: %d, 
encoded: %lx]\n",
+   pr_err("watching %s, size: %zu, addr: %px [slot: %d, encoded: 
%lx]\n",
   is_write ? "write" : "read", size, ptr,
   watchpoint_slot((unsigned long)ptr),
   encode_watchpoint((unsigned long)ptr, size, is_write));
@@ -623,8 +625,10 @@ void __init kcsan_init(void)
 * We are in the init task, and no other tasks should be running;
 * WRITE_ONCE without memory barrier is sufficient.
 */
-   if (kcsan_early_enable)
+   if (kcsan_early_enable) {
+   pr_info("enabled early\n");
WRITE_ONCE(kcsan_enabled, true);
+   }
 }
 
 /* === Exported interface === 
*/


[tip: locking/core] kcsan: Skew delay to be longer for certain access types

2020-10-09 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: 106a307fd0a762e2d47e1cf99e6da43763887a18
Gitweb:
https://git.kernel.org/tip/106a307fd0a762e2d47e1cf99e6da43763887a18
Author:Marco Elver 
AuthorDate:Fri, 24 Jul 2020 09:00:03 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 24 Aug 2020 15:09:57 -07:00

kcsan: Skew delay to be longer for certain access types

For compound instrumentation and assert accesses, skew the watchpoint
delay to be longer if randomized. This is useful to improve race
detection for such accesses.

For compound accesses we should increase the delay as we've aggregated
both read and write instrumentation. By giving up 1 call into the
runtime, we're less likely to set up a watchpoint and thus less likely
to detect a race. We can balance this by increasing the watchpoint
delay.

For assert accesses, we know these are of increased interest, and we
wish to increase our chances of detecting races for such checks.

Note that, kcsan_udelay_{task,interrupt} define the upper bound delays.
When randomized, delays are uniformly distributed between [0, delay].
Skewing the delay does not break this promise as long as the defined
upper bounds are still adhered to. The current skew results in delays
uniformly distributed between [delay/2, delay].

Acked-by: Peter Zijlstra (Intel) 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/core.c | 10 +++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 4c8b40b..95a364e 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -283,11 +283,15 @@ static __always_inline bool kcsan_is_enabled(void)
return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
 }
 
-static inline unsigned int get_delay(void)
+static inline unsigned int get_delay(int type)
 {
unsigned int delay = in_task() ? kcsan_udelay_task : 
kcsan_udelay_interrupt;
+   /* For certain access types, skew the random delay to be longer. */
+   unsigned int skew_delay_order =
+   (type & (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_ASSERT)) ? 1 : 0;
+
return delay - (IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
-   prandom_u32_max(delay) :
+   prandom_u32_max(delay >> skew_delay_order) :
0);
 }
 
@@ -470,7 +474,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t 
size, int type)
 * Delay this thread, to increase probability of observing a racy
 * conflicting access.
 */
-   udelay(get_delay());
+   udelay(get_delay(type));
 
/*
 * Re-read value, and check if it is as expected; if not, we infer a


[tip: locking/core] kcsan: Add missing CONFIG_KCSAN_IGNORE_ATOMICS checks

2020-10-09 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: 9d1335cc1e97cc3da0d14f640dd716e614083e8b
Gitweb:
https://git.kernel.org/tip/9d1335cc1e97cc3da0d14f640dd716e614083e8b
Author:Marco Elver 
AuthorDate:Fri, 24 Jul 2020 09:00:04 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 24 Aug 2020 15:09:57 -07:00

kcsan: Add missing CONFIG_KCSAN_IGNORE_ATOMICS checks

Add missing CONFIG_KCSAN_IGNORE_ATOMICS checks for the builtin atomics
instrumentation.

Acked-by: Peter Zijlstra (Intel) 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/core.c | 30 ++
 1 file changed, 22 insertions(+), 8 deletions(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 95a364e..99e5044 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -914,14 +914,19 @@ EXPORT_SYMBOL(__tsan_init);
u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder);   
   \
u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder)
   \
{   
   \
-   check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC);   
   \
+   if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { 
   \
+   check_access(ptr, bits / BITS_PER_BYTE, 
KCSAN_ACCESS_ATOMIC);  \
+   }   
   \
return __atomic_load_n(ptr, memorder);  
   \
}   
   \
EXPORT_SYMBOL(__tsan_atomic##bits##_load);  
   \
void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int 
memorder);   \
void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder) 
   \
{   
   \
-   check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_WRITE | 
KCSAN_ACCESS_ATOMIC); \
+   if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { 
   \
+   check_access(ptr, bits / BITS_PER_BYTE, 
   \
+KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC); 
   \
+   }   
   \
__atomic_store_n(ptr, v, memorder); 
   \
}   
   \
EXPORT_SYMBOL(__tsan_atomic##bits##_store)
@@ -930,8 +935,11 @@ EXPORT_SYMBOL(__tsan_init);
u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int 
memorder); \
u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int 
memorder)  \
{   
   \
-   check_access(ptr, bits / BITS_PER_BYTE, 
   \
-KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | 
KCSAN_ACCESS_ATOMIC);\
+   if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { 
   \
+   check_access(ptr, bits / BITS_PER_BYTE, 
   \
+KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE 
|  \
+KCSAN_ACCESS_ATOMIC);  
   \
+   }   
   \
return __atomic_##op##suffix(ptr, v, memorder); 
   \
}   
   \
EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
@@ -959,8 +967,11 @@ EXPORT_SYMBOL(__tsan_init);
int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, 
u##bits *exp,  \
  u##bits val, int 
mo, int fail_mo)\
{   
   \
-   check_access(ptr, bits / BITS_PER_BYTE, 
   \
-KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | 
KCSAN_ACCESS_ATOMIC);\
+   if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { 
   \
+

[tip: locking/core] kcsan: Support compounded read-write instrumentation

2020-10-09 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: 14e2ac8de0f91f12122a49f09897b0cd05256460
Gitweb:
https://git.kernel.org/tip/14e2ac8de0f91f12122a49f09897b0cd05256460
Author:Marco Elver 
AuthorDate:Fri, 24 Jul 2020 09:00:01 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 24 Aug 2020 15:09:32 -07:00

kcsan: Support compounded read-write instrumentation

Add support for compounded read-write instrumentation if supported by
the compiler. Adds the necessary instrumentation functions, and a new
type which is used to generate a more descriptive report.

Furthermore, such compounded memory access instrumentation is excluded
from the "assume aligned writes up to word size are atomic" rule,
because we cannot assume that the compiler emits code that is atomic for
compound ops.

LLVM/Clang added support for the feature in:
https://github.com/llvm/llvm-project/commit/785d41a261d136b64ab6c15c5d35f2adc5ad53e3

The new instrumentation is emitted for sets of memory accesses in the
same basic block to the same address with at least one read appearing
before a write. These typically result from compound operations such as
++, --, +=, -=, |=, &=, etc. but also equivalent forms such as "var =
var + 1". Where the compiler determines that it is equivalent to emit a
call to a single __tsan_read_write instead of separate __tsan_read and
__tsan_write, we can then benefit from improved performance and better
reporting for such access patterns.

The new reports now show that the ops are both reads and writes, for
example:

read-write to 0x90548a38 of 8 bytes by task 143 on cpu 3:
 test_kernel_rmw_array+0x45/0xa0
 access_thread+0x71/0xb0
 kthread+0x21e/0x240
 ret_from_fork+0x22/0x30

read-write to 0x90548a38 of 8 bytes by task 144 on cpu 2:
 test_kernel_rmw_array+0x45/0xa0
 access_thread+0x71/0xb0
 kthread+0x21e/0x240
 ret_from_fork+0x22/0x30

Acked-by: Peter Zijlstra (Intel) 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 include/linux/kcsan-checks.h | 45 +++
 kernel/kcsan/core.c  | 23 ++
 kernel/kcsan/report.c|  4 +++-
 scripts/Makefile.kcsan   |  2 +-
 4 files changed, 53 insertions(+), 21 deletions(-)

diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index c5f6c1d..cf14840 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -7,19 +7,13 @@
 #include 
 #include 
 
-/*
- * ACCESS TYPE MODIFIERS
- *
- *   : normal read access;
- *   WRITE : write access;
- *   ATOMIC: access is atomic;
- *   ASSERT: access is not a regular access, but an assertion;
- *   SCOPED: access is a scoped access;
- */
-#define KCSAN_ACCESS_WRITE  0x1
-#define KCSAN_ACCESS_ATOMIC 0x2
-#define KCSAN_ACCESS_ASSERT 0x4
-#define KCSAN_ACCESS_SCOPED 0x8
+/* Access types -- if KCSAN_ACCESS_WRITE is not set, the access is a read. */
+#define KCSAN_ACCESS_WRITE (1 << 0) /* Access is a write. */
+#define KCSAN_ACCESS_COMPOUND  (1 << 1) /* Compounded read-write 
instrumentation. */
+#define KCSAN_ACCESS_ATOMIC(1 << 2) /* Access is atomic. */
+/* The following are special, and never due to compiler instrumentation. */
+#define KCSAN_ACCESS_ASSERT(1 << 3) /* Access is an assertion. */
+#define KCSAN_ACCESS_SCOPED(1 << 4) /* Access is a scoped access. */
 
 /*
  * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be 
used
@@ -205,6 +199,15 @@ static inline void __kcsan_disable_current(void) { }
__kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
 
 /**
+ * __kcsan_check_read_write - check regular read-write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define __kcsan_check_read_write(ptr, size)
\
+   __kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | 
KCSAN_ACCESS_WRITE)
+
+/**
  * kcsan_check_read - check regular read access for races
  *
  * @ptr: address of access
@@ -221,18 +224,30 @@ static inline void __kcsan_disable_current(void) { }
 #define kcsan_check_write(ptr, size)   
\
kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
 
+/**
+ * kcsan_check_read_write - check regular read-write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define kcsan_check_read_write(ptr, size)  
\
+   kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | 
KCSAN_ACCESS_WRITE)
+
 /*
  * Check for atomic accesses: if atomic accesses are not ignored, this simply
  * aliases to kcsan_check_access(), otherwise becomes a no-op.
  */
 #ifdef CONFIG_KCSAN_IGNORE_ATOMICS
-#define kcsan_check_atomic_read(...)   do { } while (0)
-#define kcsan_check_atomic_write(...)  do { } while (0)
+#define kcsan_check_atomic_read(...)   do { } while (0)

[tip: locking/core] kcsan: Test support for compound instrumentation

2020-10-09 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: bec4a2474890a6884eb890c778ea02bccaaae6eb
Gitweb:
https://git.kernel.org/tip/bec4a2474890a6884eb890c778ea02bccaaae6eb
Author:Marco Elver 
AuthorDate:Fri, 24 Jul 2020 09:00:05 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 24 Aug 2020 15:09:58 -07:00

kcsan: Test support for compound instrumentation

Changes kcsan-test module to support checking reports that include
compound instrumentation. Since we should not fail the test if this
support is unavailable, we have to add a config variable that the test
can use to decide what to check for.

Acked-by: Peter Zijlstra (Intel) 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/kcsan-test.c | 65 +-
 lib/Kconfig.kcsan |  5 +++-
 2 files changed, 56 insertions(+), 14 deletions(-)

diff --git a/kernel/kcsan/kcsan-test.c b/kernel/kcsan/kcsan-test.c
index 721180c..ebe7fd2 100644
--- a/kernel/kcsan/kcsan-test.c
+++ b/kernel/kcsan/kcsan-test.c
@@ -27,6 +27,12 @@
 #include 
 #include 
 
+#ifdef CONFIG_CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE
+#define __KCSAN_ACCESS_RW(alt) (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
+#else
+#define __KCSAN_ACCESS_RW(alt) (alt)
+#endif
+
 /* Points to current test-case memory access "kernels". */
 static void (*access_kernels[2])(void);
 
@@ -186,20 +192,21 @@ static bool report_matches(const struct expect_report *r)
 
/* Access 1 & 2 */
for (i = 0; i < 2; ++i) {
+   const int ty = r->access[i].type;
const char *const access_type =
-   (r->access[i].type & KCSAN_ACCESS_ASSERT) ?
-   ((r->access[i].type & KCSAN_ACCESS_WRITE) ?
-"assert no accesses" :
-"assert no writes") :
-   ((r->access[i].type & KCSAN_ACCESS_WRITE) ?
-"write" :
-"read");
+   (ty & KCSAN_ACCESS_ASSERT) ?
+ ((ty & KCSAN_ACCESS_WRITE) ?
+  "assert no accesses" :
+  "assert no writes") :
+ ((ty & KCSAN_ACCESS_WRITE) ?
+  ((ty & KCSAN_ACCESS_COMPOUND) ?
+   "read-write" :
+   "write") :
+  "read");
const char *const access_type_aux =
-   (r->access[i].type & KCSAN_ACCESS_ATOMIC) ?
-   " (marked)" :
-   ((r->access[i].type & KCSAN_ACCESS_SCOPED) ?
-" (scoped)" :
-"");
+   (ty & KCSAN_ACCESS_ATOMIC) ?
+ " (marked)" :
+ ((ty & KCSAN_ACCESS_SCOPED) ? " (scoped)" 
: "");
 
if (i == 1) {
/* Access 2 */
@@ -277,6 +284,12 @@ static noinline void test_kernel_write_atomic(void)
WRITE_ONCE(test_var, READ_ONCE_NOCHECK(test_sink) + 1);
 }
 
+static noinline void test_kernel_atomic_rmw(void)
+{
+   /* Use builtin, so we can set up the "bad" atomic/non-atomic scenario. 
*/
+   __atomic_fetch_add(_var, 1, __ATOMIC_RELAXED);
+}
+
 __no_kcsan
 static noinline void test_kernel_write_uninstrumented(void) { test_var++; }
 
@@ -439,8 +452,8 @@ static void test_concurrent_races(struct kunit *test)
const struct expect_report expect = {
.access = {
/* NULL will match any address. */
-   { test_kernel_rmw_array, NULL, 0, KCSAN_ACCESS_WRITE },
-   { test_kernel_rmw_array, NULL, 0, 0 },
+   { test_kernel_rmw_array, NULL, 0, 
__KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
+   { test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(0) 
},
},
};
static const struct expect_report never = {
@@ -629,6 +642,29 @@ static void test_read_plain_atomic_write(struct kunit 
*test)
KUNIT_EXPECT_TRUE(test, match_expect);
 }
 
+/* Test that atomic RMWs generate correct report. */
+__no_kcsan
+static void test_read_plain_atomic_rmw(struct kunit *test)
+{
+   const struct expect_report expect = {
+   .access = {
+   { test_kernel_read, _var, sizeof(test_var), 0 },
+   { test_kernel_atomic_rmw, _var, sizeof(test_var),
+   KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | 
KCSAN_ACCESS_ATOMIC },
+   },
+   };
+   bool 

[tip: locking/core] kcsan: Remove debugfs test command

2020-10-09 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: 4700ccdf18fa3002d66769b69cf715cc58beea37
Gitweb:
https://git.kernel.org/tip/4700ccdf18fa3002d66769b69cf715cc58beea37
Author:Marco Elver 
AuthorDate:Fri, 31 Jul 2020 10:17:21 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 24 Aug 2020 15:10:22 -07:00

kcsan: Remove debugfs test command

Remove the debugfs test command, as it is no longer needed now that we
have the KUnit+Torture based kcsan-test module. This is to avoid
confusion around how KCSAN should be tested, as only the kcsan-test
module is maintained.

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/debugfs.c | 66 +-
 1 file changed, 66 deletions(-)

diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index 116bdd8..de1da1b 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -98,66 +98,6 @@ static noinline void microbenchmark(unsigned long iters)
current->kcsan_ctx = ctx_save;
 }
 
-/*
- * Simple test to create conflicting accesses. Write 'test=' to KCSAN's
- * debugfs file from multiple tasks to generate real conflicts and show 
reports.
- */
-static long test_dummy;
-static long test_flags;
-static long test_scoped;
-static noinline void test_thread(unsigned long iters)
-{
-   const long CHANGE_BITS = 0xff00ff00ff00ff00L;
-   const struct kcsan_ctx ctx_save = current->kcsan_ctx;
-   cycles_t cycles;
-
-   /* We may have been called from an atomic region; reset context. */
-   memset(>kcsan_ctx, 0, sizeof(current->kcsan_ctx));
-
-   pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
-   pr_info("test_dummy@%px, test_flags@%px, test_scoped@%px,\n",
-   _dummy, _flags, _scoped);
-
-   cycles = get_cycles();
-   while (iters--) {
-   /* These all should generate reports. */
-   __kcsan_check_read(_dummy, sizeof(test_dummy));
-   ASSERT_EXCLUSIVE_WRITER(test_dummy);
-   ASSERT_EXCLUSIVE_ACCESS(test_dummy);
-
-   ASSERT_EXCLUSIVE_BITS(test_flags, ~CHANGE_BITS); /* no report */
-   __kcsan_check_read(_flags, sizeof(test_flags)); /* no 
report */
-
-   ASSERT_EXCLUSIVE_BITS(test_flags, CHANGE_BITS); /* report */
-   __kcsan_check_read(_flags, sizeof(test_flags)); /* no 
report */
-
-   /* not actually instrumented */
-   WRITE_ONCE(test_dummy, iters);  /* to observe value-change */
-   __kcsan_check_write(_dummy, sizeof(test_dummy));
-
-   test_flags ^= CHANGE_BITS; /* generate value-change */
-   __kcsan_check_write(_flags, sizeof(test_flags));
-
-   BUG_ON(current->kcsan_ctx.scoped_accesses.prev);
-   {
-   /* Should generate reports anywhere in this block. */
-   ASSERT_EXCLUSIVE_WRITER_SCOPED(test_scoped);
-   ASSERT_EXCLUSIVE_ACCESS_SCOPED(test_scoped);
-   BUG_ON(!current->kcsan_ctx.scoped_accesses.prev);
-   /* Unrelated accesses. */
-   __kcsan_check_access(, sizeof(cycles), 0);
-   __kcsan_check_access(, sizeof(cycles), 
KCSAN_ACCESS_ATOMIC);
-   }
-   BUG_ON(current->kcsan_ctx.scoped_accesses.prev);
-   }
-   cycles = get_cycles() - cycles;
-
-   pr_info("KCSAN: %s end   | cycles: %llu\n", __func__, cycles);
-
-   /* restore context */
-   current->kcsan_ctx = ctx_save;
-}
-
 static int cmp_filterlist_addrs(const void *rhs, const void *lhs)
 {
const unsigned long a = *(const unsigned long *)rhs;
@@ -306,12 +246,6 @@ debugfs_write(struct file *file, const char __user *buf, 
size_t count, loff_t *o
if (kstrtoul([strlen("microbench=")], 0, ))
return -EINVAL;
microbenchmark(iters);
-   } else if (str_has_prefix(arg, "test=")) {
-   unsigned long iters;
-
-   if (kstrtoul([strlen("test=")], 0, ))
-   return -EINVAL;
-   test_thread(iters);
} else if (!strcmp(arg, "whitelist")) {
set_report_filterlist_whitelist(true);
} else if (!strcmp(arg, "blacklist")) {


[tip: locking/core] kcsan: Use pr_fmt for consistency

2020-10-09 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: 178a1877d782c034f466edd80e30a107af5469df
Gitweb:
https://git.kernel.org/tip/178a1877d782c034f466edd80e30a107af5469df
Author:Marco Elver 
AuthorDate:Fri, 31 Jul 2020 10:17:23 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 24 Aug 2020 15:10:23 -07:00

kcsan: Use pr_fmt for consistency

Use the same pr_fmt throughout for consistency. [ The only exception is
report.c, where the format must be kept precisely as-is. ]

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/debugfs.c  | 8 +---
 kernel/kcsan/selftest.c | 8 +---
 2 files changed, 10 insertions(+), 6 deletions(-)

diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index de1da1b..6c4914f 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -1,5 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 
+#define pr_fmt(fmt) "kcsan: " fmt
+
 #include 
 #include 
 #include 
@@ -80,7 +82,7 @@ static noinline void microbenchmark(unsigned long iters)
 */
WRITE_ONCE(kcsan_enabled, false);
 
-   pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
+   pr_info("%s begin | iters: %lu\n", __func__, iters);
 
cycles = get_cycles();
while (iters--) {
@@ -91,7 +93,7 @@ static noinline void microbenchmark(unsigned long iters)
}
cycles = get_cycles() - cycles;
 
-   pr_info("KCSAN: %s end   | cycles: %llu\n", __func__, cycles);
+   pr_info("%s end   | cycles: %llu\n", __func__, cycles);
 
WRITE_ONCE(kcsan_enabled, was_enabled);
/* restore context */
@@ -154,7 +156,7 @@ static ssize_t insert_report_filterlist(const char *func)
ssize_t ret = 0;
 
if (!addr) {
-   pr_err("KCSAN: could not find function: '%s'\n", func);
+   pr_err("could not find function: '%s'\n", func);
return -ENOENT;
}
 
diff --git a/kernel/kcsan/selftest.c b/kernel/kcsan/selftest.c
index d26a052..d98bc20 100644
--- a/kernel/kcsan/selftest.c
+++ b/kernel/kcsan/selftest.c
@@ -1,5 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 
+#define pr_fmt(fmt) "kcsan: " fmt
+
 #include 
 #include 
 #include 
@@ -116,16 +118,16 @@ static int __init kcsan_selftest(void)
if (do_test()) \
++passed;  \
else   \
-   pr_err("KCSAN selftest: " #do_test " failed"); \
+   pr_err("selftest: " #do_test " failed");   \
} while (0)
 
RUN_TEST(test_requires);
RUN_TEST(test_encode_decode);
RUN_TEST(test_matching_access);
 
-   pr_info("KCSAN selftest: %d/%d tests passed\n", passed, total);
+   pr_info("selftest: %d/%d tests passed\n", passed, total);
if (passed != total)
-   panic("KCSAN selftests failed");
+   panic("selftests failed");
return 0;
 }
 postcore_initcall(kcsan_selftest);


[tip: locking/core] kcsan: Add support for atomic builtins

2020-10-09 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: 0f8ad5f2e93425812c393c91ceb5af3d95e79b10
Gitweb:
https://git.kernel.org/tip/0f8ad5f2e93425812c393c91ceb5af3d95e79b10
Author:Marco Elver 
AuthorDate:Fri, 03 Jul 2020 15:40:29 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 24 Aug 2020 15:09:05 -07:00

kcsan: Add support for atomic builtins

Some architectures (currently e.g. s390 partially) implement atomics
using the compiler's atomic builtins (__atomic_*, __sync_*). To support
enabling KCSAN on such architectures in future, or support experimental
use of these builtins, implement support for them.

We should also avoid breaking KCSAN kernels due to use (accidental or
otherwise) of atomic builtins in drivers, as has happened in the past:
https://lkml.kernel.org/r/5231d2c0-41d9-6721-e15f-a7eedf3ce...@infradead.org

The instrumentation is subtly different from regular reads/writes: TSAN
instrumentation replaces the use of atomic builtins with a call into the
runtime, and the runtime's job is to also execute the desired atomic
operation. We rely on the __atomic_* compiler builtins, available with
all KCSAN-supported compilers, to implement each TSAN atomic
instrumentation function.

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/core.c | 110 +++-
 1 file changed, 110 insertions(+)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 9147ff6..682d9fd 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -879,3 +879,113 @@ void __tsan_init(void)
 {
 }
 EXPORT_SYMBOL(__tsan_init);
+
+/*
+ * Instrumentation for atomic builtins (__atomic_*, __sync_*).
+ *
+ * Normal kernel code _should not_ be using them directly, but some
+ * architectures may implement some or all atomics using the compilers'
+ * builtins.
+ *
+ * Note: If an architecture decides to fully implement atomics using the
+ * builtins, because they are implicitly instrumented by KCSAN (and KASAN,
+ * etc.), implementing the ARCH_ATOMIC interface (to get instrumentation via
+ * atomic-instrumented) is no longer necessary.
+ *
+ * TSAN instrumentation replaces atomic accesses with calls to any of the below
+ * functions, whose job is to also execute the operation itself.
+ */
+
+#define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits)
\
+   u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder);   
   \
+   u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder)
   \
+   {   
   \
+   check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC);   
   \
+   return __atomic_load_n(ptr, memorder);  
   \
+   }   
   \
+   EXPORT_SYMBOL(__tsan_atomic##bits##_load);  
   \
+   void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int 
memorder);   \
+   void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder) 
   \
+   {   
   \
+   check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_WRITE | 
KCSAN_ACCESS_ATOMIC); \
+   __atomic_store_n(ptr, v, memorder); 
   \
+   }   
   \
+   EXPORT_SYMBOL(__tsan_atomic##bits##_store)
+
+#define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix)   
\
+   u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int 
memorder); \
+   u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int 
memorder)  \
+   {   
   \
+   check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_WRITE | 
KCSAN_ACCESS_ATOMIC); \
+   return __atomic_##op##suffix(ptr, v, memorder); 
   \
+   }   
   \
+   EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
+
+/*
+ * Note: CAS operations are always classified as write, even in case they
+ * fail. We cannot perform check_access() after a write, as it might lead to
+ * false positives, in cases such as:
+ *
+ * T0: __atomic_compare_exchange_n(>flag, , 1, ...)
+ *
+ * T1: if (__atomic_load_n(>flag, ...)) {
+ * modify *p;
+ * p->flag = 0;
+ * }
+ *
+ * The only downside is that, if 

[tip: locking/core] locking/atomics: Use read-write instrumentation for atomic RMWs

2020-10-09 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: 3570a1bcf45e9a7ddf9ba0e8d6d57cc67675cfef
Gitweb:
https://git.kernel.org/tip/3570a1bcf45e9a7ddf9ba0e8d6d57cc67675cfef
Author:Marco Elver 
AuthorDate:Fri, 24 Jul 2020 09:00:08 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 24 Aug 2020 15:09:59 -07:00

locking/atomics: Use read-write instrumentation for atomic RMWs

Use instrument_atomic_read_write() for atomic RMW ops.

Cc: Will Deacon 
Cc: Boqun Feng 
Cc: Arnd Bergmann 
Cc: 
Acked-by: Peter Zijlstra (Intel) 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 include/asm-generic/atomic-instrumented.h | 330 ++---
 scripts/atomic/gen-atomic-instrumented.sh |  21 +-
 2 files changed, 180 insertions(+), 171 deletions(-)

diff --git a/include/asm-generic/atomic-instrumented.h 
b/include/asm-generic/atomic-instrumented.h
index 379986e..cd223b6 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -60,7 +60,7 @@ atomic_set_release(atomic_t *v, int i)
 static __always_inline void
 atomic_add(int i, atomic_t *v)
 {
-   instrument_atomic_write(v, sizeof(*v));
+   instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_add(i, v);
 }
 #define atomic_add atomic_add
@@ -69,7 +69,7 @@ atomic_add(int i, atomic_t *v)
 static __always_inline int
 atomic_add_return(int i, atomic_t *v)
 {
-   instrument_atomic_write(v, sizeof(*v));
+   instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return(i, v);
 }
 #define atomic_add_return atomic_add_return
@@ -79,7 +79,7 @@ atomic_add_return(int i, atomic_t *v)
 static __always_inline int
 atomic_add_return_acquire(int i, atomic_t *v)
 {
-   instrument_atomic_write(v, sizeof(*v));
+   instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return_acquire(i, v);
 }
 #define atomic_add_return_acquire atomic_add_return_acquire
@@ -89,7 +89,7 @@ atomic_add_return_acquire(int i, atomic_t *v)
 static __always_inline int
 atomic_add_return_release(int i, atomic_t *v)
 {
-   instrument_atomic_write(v, sizeof(*v));
+   instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return_release(i, v);
 }
 #define atomic_add_return_release atomic_add_return_release
@@ -99,7 +99,7 @@ atomic_add_return_release(int i, atomic_t *v)
 static __always_inline int
 atomic_add_return_relaxed(int i, atomic_t *v)
 {
-   instrument_atomic_write(v, sizeof(*v));
+   instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return_relaxed(i, v);
 }
 #define atomic_add_return_relaxed atomic_add_return_relaxed
@@ -109,7 +109,7 @@ atomic_add_return_relaxed(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_add(int i, atomic_t *v)
 {
-   instrument_atomic_write(v, sizeof(*v));
+   instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add(i, v);
 }
 #define atomic_fetch_add atomic_fetch_add
@@ -119,7 +119,7 @@ atomic_fetch_add(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_add_acquire(int i, atomic_t *v)
 {
-   instrument_atomic_write(v, sizeof(*v));
+   instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_acquire(i, v);
 }
 #define atomic_fetch_add_acquire atomic_fetch_add_acquire
@@ -129,7 +129,7 @@ atomic_fetch_add_acquire(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_add_release(int i, atomic_t *v)
 {
-   instrument_atomic_write(v, sizeof(*v));
+   instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_release(i, v);
 }
 #define atomic_fetch_add_release atomic_fetch_add_release
@@ -139,7 +139,7 @@ atomic_fetch_add_release(int i, atomic_t *v)
 static __always_inline int
 atomic_fetch_add_relaxed(int i, atomic_t *v)
 {
-   instrument_atomic_write(v, sizeof(*v));
+   instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_relaxed(i, v);
 }
 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
@@ -148,7 +148,7 @@ atomic_fetch_add_relaxed(int i, atomic_t *v)
 static __always_inline void
 atomic_sub(int i, atomic_t *v)
 {
-   instrument_atomic_write(v, sizeof(*v));
+   instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_sub(i, v);
 }
 #define atomic_sub atomic_sub
@@ -157,7 +157,7 @@ atomic_sub(int i, atomic_t *v)
 static __always_inline int
 atomic_sub_return(int i, atomic_t *v)
 {
-   instrument_atomic_write(v, sizeof(*v));
+   instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return(i, v);
 }
 #define atomic_sub_return atomic_sub_return
@@ -167,7 +167,7 @@ atomic_sub_return(int i, atomic_t *v)
 static __always_inline int
 atomic_sub_return_acquire(int i, atomic_t *v)
 {
-   instrument_atomic_write(v, sizeof(*v));
+   instrument_atomic_read_write(v, sizeof(*v));
return 

[tip: locking/core] kcsan: Optimize debugfs stats counters

2020-10-09 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: 2e986b81f698e73c95e6456183f27b861f47bb87
Gitweb:
https://git.kernel.org/tip/2e986b81f698e73c95e6456183f27b861f47bb87
Author:Marco Elver 
AuthorDate:Mon, 10 Aug 2020 10:06:25 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 24 Aug 2020 15:10:23 -07:00

kcsan: Optimize debugfs stats counters

Remove kcsan_counter_inc/dec() functions, as they perform no other
logic, and are no longer needed.

This avoids several calls in kcsan_setup_watchpoint() and
kcsan_found_watchpoint(), as well as lets the compiler warn us about
potential out-of-bounds accesses as the array's size is known at all
usage sites at compile-time.

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/core.c| 22 +++---
 kernel/kcsan/debugfs.c | 21 +
 kernel/kcsan/kcsan.h   | 12 ++--
 kernel/kcsan/report.c  |  2 +-
 4 files changed, 23 insertions(+), 34 deletions(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index b176400..8a1ff60 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -367,13 +367,13 @@ static noinline void kcsan_found_watchpoint(const 
volatile void *ptr,
 * already removed the watchpoint, or another thread consumed
 * the watchpoint before this thread.
 */
-   kcsan_counter_inc(KCSAN_COUNTER_REPORT_RACES);
+   atomic_long_inc(_counters[KCSAN_COUNTER_REPORT_RACES]);
}
 
if ((type & KCSAN_ACCESS_ASSERT) != 0)
-   kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
+   atomic_long_inc(_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
else
-   kcsan_counter_inc(KCSAN_COUNTER_DATA_RACES);
+   atomic_long_inc(_counters[KCSAN_COUNTER_DATA_RACES]);
 
user_access_restore(flags);
 }
@@ -414,7 +414,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t 
size, int type)
goto out;
 
if (!check_encodable((unsigned long)ptr, size)) {
-   kcsan_counter_inc(KCSAN_COUNTER_UNENCODABLE_ACCESSES);
+   
atomic_long_inc(_counters[KCSAN_COUNTER_UNENCODABLE_ACCESSES]);
goto out;
}
 
@@ -434,12 +434,12 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t 
size, int type)
 * with which should_watch() returns true should be tweaked so
 * that this case happens very rarely.
 */
-   kcsan_counter_inc(KCSAN_COUNTER_NO_CAPACITY);
+   atomic_long_inc(_counters[KCSAN_COUNTER_NO_CAPACITY]);
goto out_unlock;
}
 
-   kcsan_counter_inc(KCSAN_COUNTER_SETUP_WATCHPOINTS);
-   kcsan_counter_inc(KCSAN_COUNTER_USED_WATCHPOINTS);
+   atomic_long_inc(_counters[KCSAN_COUNTER_SETUP_WATCHPOINTS]);
+   atomic_long_inc(_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
 
/*
 * Read the current value, to later check and infer a race if the data
@@ -541,16 +541,16 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t 
size, int type)
 * increment this counter.
 */
if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
-   kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
+   
atomic_long_inc(_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
 
kcsan_report(ptr, size, type, value_change, 
KCSAN_REPORT_RACE_SIGNAL,
 watchpoint - watchpoints);
} else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
/* Inferring a race, since the value should not have changed. */
 
-   kcsan_counter_inc(KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN);
+   
atomic_long_inc(_counters[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]);
if (is_assert)
-   kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
+   
atomic_long_inc(_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
 
if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || 
is_assert)
kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE,
@@ -563,7 +563,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t 
size, int type)
 * reused after this point.
 */
remove_watchpoint(watchpoint);
-   kcsan_counter_dec(KCSAN_COUNTER_USED_WATCHPOINTS);
+   atomic_long_dec(_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
 out_unlock:
if (!kcsan_interrupt_watcher)
local_irq_restore(irq_flags);
diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index 6c4914f..3c8093a 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -17,10 +17,7 @@
 
 #include "kcsan.h"
 
-/*
- * Statistics counters.
- */
-static atomic_long_t counters[KCSAN_COUNTER_COUNT];
+atomic_long_t 

[tip: locking/core] kcsan: Simplify constant string handling

2020-10-09 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: a4e74fa5f0d3e2a11f2d0deb522681d219a81426
Gitweb:
https://git.kernel.org/tip/a4e74fa5f0d3e2a11f2d0deb522681d219a81426
Author:Marco Elver 
AuthorDate:Fri, 31 Jul 2020 10:17:20 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 24 Aug 2020 15:10:22 -07:00

kcsan: Simplify constant string handling

Simplify checking prefixes and length calculation of constant strings.
For the former, the kernel provides str_has_prefix(), and the latter we
should just use strlen("..") because GCC and Clang have optimizations
that optimize these into constants.

No functional change intended.

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/debugfs.c | 8 
 kernel/kcsan/report.c  | 4 ++--
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index 3a9566a..116bdd8 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -300,16 +300,16 @@ debugfs_write(struct file *file, const char __user *buf, 
size_t count, loff_t *o
WRITE_ONCE(kcsan_enabled, true);
} else if (!strcmp(arg, "off")) {
WRITE_ONCE(kcsan_enabled, false);
-   } else if (!strncmp(arg, "microbench=", sizeof("microbench=") - 1)) {
+   } else if (str_has_prefix(arg, "microbench=")) {
unsigned long iters;
 
-   if (kstrtoul([sizeof("microbench=") - 1], 0, ))
+   if (kstrtoul([strlen("microbench=")], 0, ))
return -EINVAL;
microbenchmark(iters);
-   } else if (!strncmp(arg, "test=", sizeof("test=") - 1)) {
+   } else if (str_has_prefix(arg, "test=")) {
unsigned long iters;
 
-   if (kstrtoul([sizeof("test=") - 1], 0, ))
+   if (kstrtoul([strlen("test=")], 0, ))
return -EINVAL;
test_thread(iters);
} else if (!strcmp(arg, "whitelist")) {
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index 3e83a69..bf1d594 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -279,8 +279,8 @@ static int get_stack_skipnr(const unsigned long 
stack_entries[], int num_entries
 
cur = strnstr(buf, "kcsan_", len);
if (cur) {
-   cur += sizeof("kcsan_") - 1;
-   if (strncmp(cur, "test", sizeof("test") - 1))
+   cur += strlen("kcsan_");
+   if (!str_has_prefix(cur, "test"))
continue; /* KCSAN runtime function. */
/* KCSAN related test. */
}


[tip: locking/core] objtool, kcsan: Add __tsan_read_write to uaccess whitelist

2020-10-09 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: a81b37590ff2e2507940ec278910b1d315dc73b3
Gitweb:
https://git.kernel.org/tip/a81b37590ff2e2507940ec278910b1d315dc73b3
Author:Marco Elver 
AuthorDate:Fri, 24 Jul 2020 09:00:02 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 24 Aug 2020 15:09:32 -07:00

objtool, kcsan: Add __tsan_read_write to uaccess whitelist

Adds the new __tsan_read_write compound instrumentation to objtool's
uaccess whitelist.

Acked-by: Peter Zijlstra (Intel) 
Cc: Josh Poimboeuf 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 tools/objtool/check.c | 5 +
 1 file changed, 5 insertions(+)

diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 7546a9d..5eee156 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -528,6 +528,11 @@ static const char *uaccess_safe_builtin[] = {
"__tsan_write4",
"__tsan_write8",
"__tsan_write16",
+   "__tsan_read_write1",
+   "__tsan_read_write2",
+   "__tsan_read_write4",
+   "__tsan_read_write8",
+   "__tsan_read_write16",
"__tsan_atomic8_load",
"__tsan_atomic16_load",
"__tsan_atomic32_load",


[tip: locking/core] kcsan: Add atomic builtin test case

2020-10-09 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: f9ea63193135473ed6b6ff06f016eb6248100041
Gitweb:
https://git.kernel.org/tip/f9ea63193135473ed6b6ff06f016eb6248100041
Author:Marco Elver 
AuthorDate:Fri, 03 Jul 2020 15:40:31 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 24 Aug 2020 15:09:31 -07:00

kcsan: Add atomic builtin test case

Adds test case to kcsan-test module, to test atomic builtin
instrumentation works.

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/kcsan-test.c | 63 ++-
 1 file changed, 63 insertions(+)

diff --git a/kernel/kcsan/kcsan-test.c b/kernel/kcsan/kcsan-test.c
index fed6fcb..721180c 100644
--- a/kernel/kcsan/kcsan-test.c
+++ b/kernel/kcsan/kcsan-test.c
@@ -390,6 +390,15 @@ static noinline void test_kernel_seqlock_writer(void)
write_sequnlock_irqrestore(_seqlock, flags);
 }
 
+static noinline void test_kernel_atomic_builtins(void)
+{
+   /*
+* Generate concurrent accesses, expecting no reports, ensuring KCSAN
+* treats builtin atomics as actually atomic.
+*/
+   __atomic_load_n(_var, __ATOMIC_RELAXED);
+}
+
 /* = Test cases = */
 
 /* Simple test with normal data race. */
@@ -853,6 +862,59 @@ static void test_seqlock_noreport(struct kunit *test)
 }
 
 /*
+ * Test atomic builtins work and required instrumentation functions exist. We
+ * also test that KCSAN understands they're atomic by racing with them via
+ * test_kernel_atomic_builtins(), and expect no reports.
+ *
+ * The atomic builtins _SHOULD NOT_ be used in normal kernel code!
+ */
+static void test_atomic_builtins(struct kunit *test)
+{
+   bool match_never = false;
+
+   begin_test_checks(test_kernel_atomic_builtins, 
test_kernel_atomic_builtins);
+   do {
+   long tmp;
+
+   kcsan_enable_current();
+
+   __atomic_store_n(_var, 42L, __ATOMIC_RELAXED);
+   KUNIT_EXPECT_EQ(test, 42L, __atomic_load_n(_var, 
__ATOMIC_RELAXED));
+
+   KUNIT_EXPECT_EQ(test, 42L, __atomic_exchange_n(_var, 20, 
__ATOMIC_RELAXED));
+   KUNIT_EXPECT_EQ(test, 20L, test_var);
+
+   tmp = 20L;
+   KUNIT_EXPECT_TRUE(test, __atomic_compare_exchange_n(_var, 
, 30L,
+   0, 
__ATOMIC_RELAXED,
+   
__ATOMIC_RELAXED));
+   KUNIT_EXPECT_EQ(test, tmp, 20L);
+   KUNIT_EXPECT_EQ(test, test_var, 30L);
+   KUNIT_EXPECT_FALSE(test, __atomic_compare_exchange_n(_var, 
, 40L,
+1, 
__ATOMIC_RELAXED,
+
__ATOMIC_RELAXED));
+   KUNIT_EXPECT_EQ(test, tmp, 30L);
+   KUNIT_EXPECT_EQ(test, test_var, 30L);
+
+   KUNIT_EXPECT_EQ(test, 30L, __atomic_fetch_add(_var, 1, 
__ATOMIC_RELAXED));
+   KUNIT_EXPECT_EQ(test, 31L, __atomic_fetch_sub(_var, 1, 
__ATOMIC_RELAXED));
+   KUNIT_EXPECT_EQ(test, 30L, __atomic_fetch_and(_var, 0xf, 
__ATOMIC_RELAXED));
+   KUNIT_EXPECT_EQ(test, 14L, __atomic_fetch_xor(_var, 0xf, 
__ATOMIC_RELAXED));
+   KUNIT_EXPECT_EQ(test, 1L, __atomic_fetch_or(_var, 0xf0, 
__ATOMIC_RELAXED));
+   KUNIT_EXPECT_EQ(test, 241L, __atomic_fetch_nand(_var, 0xf, 
__ATOMIC_RELAXED));
+   KUNIT_EXPECT_EQ(test, -2L, test_var);
+
+   __atomic_thread_fence(__ATOMIC_SEQ_CST);
+   __atomic_signal_fence(__ATOMIC_SEQ_CST);
+
+   kcsan_disable_current();
+
+   match_never = report_available();
+   } while (!end_test_checks(match_never));
+   KUNIT_EXPECT_FALSE(test, match_never);
+}
+
+/*
  * Each test case is run with different numbers of threads. Until KUnit 
supports
  * passing arguments for each test case, we encode #threads in the test case
  * name (read by get_num_threads()). [The '-' was chosen as a stylistic
@@ -891,6 +953,7 @@ static struct kunit_case kcsan_test_cases[] = {
KCSAN_KUNIT_CASE(test_assert_exclusive_access_scoped),
KCSAN_KUNIT_CASE(test_jiffies_noreport),
KCSAN_KUNIT_CASE(test_seqlock_noreport),
+   KCSAN_KUNIT_CASE(test_atomic_builtins),
{},
 };
 


[tip: locking/core] objtool: Add atomic builtin TSAN instrumentation to uaccess whitelist

2020-10-09 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: 883957b1c4ac5554ce515e882b7b2b20cbadfdd1
Gitweb:
https://git.kernel.org/tip/883957b1c4ac5554ce515e882b7b2b20cbadfdd1
Author:Marco Elver 
AuthorDate:Fri, 03 Jul 2020 15:40:30 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 24 Aug 2020 15:09:06 -07:00

objtool: Add atomic builtin TSAN instrumentation to uaccess whitelist

Adds the new TSAN functions that may be emitted for atomic builtins to
objtool's uaccess whitelist.

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
Cc: Josh Poimboeuf 
Cc: Peter Zijlstra 
---
 tools/objtool/check.c | 50 ++-
 1 file changed, 50 insertions(+)

diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index e034a8f..7546a9d 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -528,6 +528,56 @@ static const char *uaccess_safe_builtin[] = {
"__tsan_write4",
"__tsan_write8",
"__tsan_write16",
+   "__tsan_atomic8_load",
+   "__tsan_atomic16_load",
+   "__tsan_atomic32_load",
+   "__tsan_atomic64_load",
+   "__tsan_atomic8_store",
+   "__tsan_atomic16_store",
+   "__tsan_atomic32_store",
+   "__tsan_atomic64_store",
+   "__tsan_atomic8_exchange",
+   "__tsan_atomic16_exchange",
+   "__tsan_atomic32_exchange",
+   "__tsan_atomic64_exchange",
+   "__tsan_atomic8_fetch_add",
+   "__tsan_atomic16_fetch_add",
+   "__tsan_atomic32_fetch_add",
+   "__tsan_atomic64_fetch_add",
+   "__tsan_atomic8_fetch_sub",
+   "__tsan_atomic16_fetch_sub",
+   "__tsan_atomic32_fetch_sub",
+   "__tsan_atomic64_fetch_sub",
+   "__tsan_atomic8_fetch_and",
+   "__tsan_atomic16_fetch_and",
+   "__tsan_atomic32_fetch_and",
+   "__tsan_atomic64_fetch_and",
+   "__tsan_atomic8_fetch_or",
+   "__tsan_atomic16_fetch_or",
+   "__tsan_atomic32_fetch_or",
+   "__tsan_atomic64_fetch_or",
+   "__tsan_atomic8_fetch_xor",
+   "__tsan_atomic16_fetch_xor",
+   "__tsan_atomic32_fetch_xor",
+   "__tsan_atomic64_fetch_xor",
+   "__tsan_atomic8_fetch_nand",
+   "__tsan_atomic16_fetch_nand",
+   "__tsan_atomic32_fetch_nand",
+   "__tsan_atomic64_fetch_nand",
+   "__tsan_atomic8_compare_exchange_strong",
+   "__tsan_atomic16_compare_exchange_strong",
+   "__tsan_atomic32_compare_exchange_strong",
+   "__tsan_atomic64_compare_exchange_strong",
+   "__tsan_atomic8_compare_exchange_weak",
+   "__tsan_atomic16_compare_exchange_weak",
+   "__tsan_atomic32_compare_exchange_weak",
+   "__tsan_atomic64_compare_exchange_weak",
+   "__tsan_atomic8_compare_exchange_val",
+   "__tsan_atomic16_compare_exchange_val",
+   "__tsan_atomic32_compare_exchange_val",
+   "__tsan_atomic64_compare_exchange_val",
+   "__tsan_atomic_thread_fence",
+   "__tsan_atomic_signal_fence",
/* KCOV */
"write_comp_data",
"check_kcov_mode",


[tip: sched/urgent] sched: Use __always_inline on is_idle_task()

2020-08-27 Thread tip-bot2 for Marco Elver
The following commit has been merged into the sched/urgent branch of tip:

Commit-ID: c94a88f341c9b8f05d8639f62bb5d95936f881cd
Gitweb:
https://git.kernel.org/tip/c94a88f341c9b8f05d8639f62bb5d95936f881cd
Author:Marco Elver 
AuthorDate:Thu, 20 Aug 2020 19:20:46 +02:00
Committer: Peter Zijlstra 
CommitterDate: Wed, 26 Aug 2020 12:41:51 +02:00

sched: Use __always_inline on is_idle_task()

is_idle_task() may be used from noinstr functions such as
irqentry_enter(). Since the compiler is free to not inline regular
inline functions, switch to using __always_inline.

Signed-off-by: Marco Elver 
Signed-off-by: Peter Zijlstra (Intel) 
Link: https://lkml.kernel.org/r/20200820172046.ga177...@elver.google.com
---
 include/linux/sched.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 93ecd93..afe01e2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1666,7 +1666,7 @@ extern struct task_struct *idle_task(int cpu);
  *
  * Return: 1 if @p is an idle task. 0 otherwise.
  */
-static inline bool is_idle_task(const struct task_struct *p)
+static __always_inline bool is_idle_task(const struct task_struct *p)
 {
return !!(p->flags & PF_IDLE);
 }


[tip: core/rcu] torture: Pass --kmake-arg to all make invocations

2020-07-31 Thread tip-bot2 for Marco Elver
The following commit has been merged into the core/rcu branch of tip:

Commit-ID: 603d11ad6976e1289f19c2a19e2f75a83d0dc296
Gitweb:
https://git.kernel.org/tip/603d11ad6976e1289f19c2a19e2f75a83d0dc296
Author:Marco Elver 
AuthorDate:Tue, 16 Jun 2020 11:49:24 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 29 Jun 2020 12:01:44 -07:00

torture: Pass --kmake-arg to all make invocations

We need to pass the arguments provided to --kmake-arg to all make
invocations. In particular, the make invocations generating the configs
need to see the final make arguments, e.g. if config variables depend on
particular variables that are passed to make.

For example, when using '--kcsan --kmake-arg CC=clang-11', we would lose
CONFIG_KCSAN=y due to 'make oldconfig' not seeing that we want to use a
compiler that supports KCSAN.

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 tools/testing/selftests/rcutorture/bin/configinit.sh | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh 
b/tools/testing/selftests/rcutorture/bin/configinit.sh
index 93e80a4..d6e5ce0 100755
--- a/tools/testing/selftests/rcutorture/bin/configinit.sh
+++ b/tools/testing/selftests/rcutorture/bin/configinit.sh
@@ -32,11 +32,11 @@ if test -z "$TORTURE_TRUST_MAKE"
 then
make clean > $resdir/Make.clean 2>&1
 fi
-make $TORTURE_DEFCONFIG > $resdir/Make.defconfig.out 2>&1
+make $TORTURE_KMAKE_ARG $TORTURE_DEFCONFIG > $resdir/Make.defconfig.out 2>&1
 mv .config .config.sav
 sh $T/upd.sh < .config.sav > .config
 cp .config .config.new
-yes '' | make oldconfig > $resdir/Make.oldconfig.out 2> 
$resdir/Make.oldconfig.err
+yes '' | make $TORTURE_KMAKE_ARG oldconfig > $resdir/Make.oldconfig.out 2> 
$resdir/Make.oldconfig.err
 
 # verify new config matches specification.
 configcheck.sh .config $c


[tip: locking/core] kcsan: Improve IRQ state trace reporting

2020-07-29 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: 47490fdd411675707624fdfbf7bcfcd5f6a5e706
Gitweb:
https://git.kernel.org/tip/47490fdd411675707624fdfbf7bcfcd5f6a5e706
Author:Marco Elver 
AuthorDate:Wed, 29 Jul 2020 13:09:16 +02:00
Committer: Ingo Molnar 
CommitterDate: Wed, 29 Jul 2020 16:30:41 +02:00

kcsan: Improve IRQ state trace reporting

To improve the general usefulness of the IRQ state trace events with
KCSAN enabled, save and restore the trace information when entering and
exiting the KCSAN runtime as well as when generating a KCSAN report.

Without this, reporting the IRQ trace events (whether via a KCSAN report
or outside of KCSAN via a lockdep report) is rather useless due to
continuously being touched by KCSAN. This is because if KCSAN is
enabled, every instrumented memory access causes changes to IRQ trace
events (either by KCSAN disabling/enabling interrupts or taking
report_lock when generating a report).

Before "lockdep: Prepare for NMI IRQ state tracking", KCSAN avoided
touching the IRQ trace events via raw_local_irq_save/restore() and
lockdep_off/on().

Fixes: 248591f5d257 ("kcsan: Make KCSAN compatible with new IRQ state tracking")
Signed-off-by: Marco Elver 
Signed-off-by: Ingo Molnar 
Link: https://lore.kernel.org/r/20200729110916.3920464-2-el...@google.com
---
 include/linux/sched.h |  4 
 kernel/kcsan/core.c   | 23 +++
 kernel/kcsan/kcsan.h  |  7 +++
 kernel/kcsan/report.c |  3 +++
 4 files changed, 37 insertions(+)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 26adabe..2ede13a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1184,8 +1184,12 @@ struct task_struct {
 #ifdef CONFIG_KASAN
unsigned intkasan_depth;
 #endif
+
 #ifdef CONFIG_KCSAN
struct kcsan_ctxkcsan_ctx;
+#ifdef CONFIG_TRACE_IRQFLAGS
+   struct irqtrace_events  kcsan_save_irqtrace;
+#endif
 #endif
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 732623c..0fe0681 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -291,6 +291,20 @@ static inline unsigned int get_delay(void)
0);
 }
 
+void kcsan_save_irqtrace(struct task_struct *task)
+{
+#ifdef CONFIG_TRACE_IRQFLAGS
+   task->kcsan_save_irqtrace = task->irqtrace;
+#endif
+}
+
+void kcsan_restore_irqtrace(struct task_struct *task)
+{
+#ifdef CONFIG_TRACE_IRQFLAGS
+   task->irqtrace = task->kcsan_save_irqtrace;
+#endif
+}
+
 /*
  * Pull everything together: check_access() below contains the performance
  * critical operations; the fast-path (including check_access) functions should
@@ -336,9 +350,11 @@ static noinline void kcsan_found_watchpoint(const volatile 
void *ptr,
flags = user_access_save();
 
if (consumed) {
+   kcsan_save_irqtrace(current);
kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_MAYBE,
 KCSAN_REPORT_CONSUMED_WATCHPOINT,
 watchpoint - watchpoints);
+   kcsan_restore_irqtrace(current);
} else {
/*
 * The other thread may not print any diagnostics, as it has
@@ -396,6 +412,12 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t 
size, int type)
goto out;
}
 
+   /*
+* Save and restore the IRQ state trace touched by KCSAN, since KCSAN's
+* runtime is entered for every memory access, and potentially useful
+* information is lost if dirtied by KCSAN.
+*/
+   kcsan_save_irqtrace(current);
if (!kcsan_interrupt_watcher)
local_irq_save(irq_flags);
 
@@ -539,6 +561,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t 
size, int type)
 out_unlock:
if (!kcsan_interrupt_watcher)
local_irq_restore(irq_flags);
+   kcsan_restore_irqtrace(current);
 out:
user_access_restore(ua_flags);
 }
diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
index 763d6d0..2948001 100644
--- a/kernel/kcsan/kcsan.h
+++ b/kernel/kcsan/kcsan.h
@@ -9,6 +9,7 @@
 #define _KERNEL_KCSAN_KCSAN_H
 
 #include 
+#include 
 
 /* The number of adjacent watchpoints to check. */
 #define KCSAN_CHECK_ADJACENT 1
@@ -23,6 +24,12 @@ extern unsigned int kcsan_udelay_interrupt;
 extern bool kcsan_enabled;
 
 /*
+ * Save/restore IRQ flags state trace dirtied by KCSAN.
+ */
+void kcsan_save_irqtrace(struct task_struct *task);
+void kcsan_restore_irqtrace(struct task_struct *task);
+
+/*
  * Initialize debugfs file.
  */
 void kcsan_debugfs_init(void);
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index 6b2fb1a..9d07e17 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -308,6 +308,9 @@ static void print_verbose_info(struct task_struct *task)
if (!task)
return;
 
+   /* Restore 

[tip: locking/core] lockdep: Refactor IRQ trace events fields into struct

2020-07-29 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: 9cd8b723f823d007bd70a3252e681fde07143f6d
Gitweb:
https://git.kernel.org/tip/9cd8b723f823d007bd70a3252e681fde07143f6d
Author:Marco Elver 
AuthorDate:Wed, 29 Jul 2020 13:09:15 +02:00
Committer: Ingo Molnar 
CommitterDate: Wed, 29 Jul 2020 16:30:40 +02:00

lockdep: Refactor IRQ trace events fields into struct

Refactor the IRQ trace events fields, used for printing information
about the IRQ trace events, into a separate struct 'irqtrace_events'.

This improves readability by separating the information only used in
reporting, as well as enables (simplified) storing/restoring of
irqtrace_events snapshots.

No functional change intended.

Signed-off-by: Marco Elver 
Signed-off-by: Ingo Molnar 
Link: https://lore.kernel.org/r/20200729110916.3920464-1-el...@google.com
---
 include/linux/irqflags.h | 13 +-
 include/linux/sched.h| 11 +--
 kernel/fork.c| 16 +++
 kernel/locking/lockdep.c | 58 ---
 4 files changed, 50 insertions(+), 48 deletions(-)

diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 5811ee8..bd5c557 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -33,6 +33,19 @@
 
 #ifdef CONFIG_TRACE_IRQFLAGS
 
+/* Per-task IRQ trace events information. */
+struct irqtrace_events {
+   unsigned intirq_events;
+   unsigned long   hardirq_enable_ip;
+   unsigned long   hardirq_disable_ip;
+   unsigned inthardirq_enable_event;
+   unsigned inthardirq_disable_event;
+   unsigned long   softirq_disable_ip;
+   unsigned long   softirq_enable_ip;
+   unsigned intsoftirq_disable_event;
+   unsigned intsoftirq_enable_event;
+};
+
 DECLARE_PER_CPU(int, hardirqs_enabled);
 DECLARE_PER_CPU(int, hardirq_context);
 
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9a9d826..26adabe 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -18,6 +18,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -980,17 +981,9 @@ struct task_struct {
 #endif
 
 #ifdef CONFIG_TRACE_IRQFLAGS
-   unsigned intirq_events;
+   struct irqtrace_events  irqtrace;
unsigned inthardirq_threaded;
-   unsigned long   hardirq_enable_ip;
-   unsigned long   hardirq_disable_ip;
-   unsigned inthardirq_enable_event;
-   unsigned inthardirq_disable_event;
u64 hardirq_chain_key;
-   unsigned long   softirq_disable_ip;
-   unsigned long   softirq_enable_ip;
-   unsigned intsoftirq_disable_event;
-   unsigned intsoftirq_enable_event;
int softirqs_enabled;
int softirq_context;
int irq_config;
diff --git a/kernel/fork.c b/kernel/fork.c
index fc72f09..f831b82 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2035,17 +2035,11 @@ static __latent_entropy struct task_struct 
*copy_process(
seqcount_spinlock_init(>mems_allowed_seq, >alloc_lock);
 #endif
 #ifdef CONFIG_TRACE_IRQFLAGS
-   p->irq_events = 0;
-   p->hardirq_enable_ip = 0;
-   p->hardirq_enable_event = 0;
-   p->hardirq_disable_ip = _THIS_IP_;
-   p->hardirq_disable_event = 0;
-   p->softirqs_enabled = 1;
-   p->softirq_enable_ip = _THIS_IP_;
-   p->softirq_enable_event = 0;
-   p->softirq_disable_ip = 0;
-   p->softirq_disable_event = 0;
-   p->softirq_context = 0;
+   memset(>irqtrace, 0, sizeof(p->irqtrace));
+   p->irqtrace.hardirq_disable_ip  = _THIS_IP_;
+   p->irqtrace.softirq_enable_ip   = _THIS_IP_;
+   p->softirqs_enabled = 1;
+   p->softirq_context  = 0;
 #endif
 
p->pagefault_disabled = 0;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index c9ea05e..7b58003 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3484,19 +3484,21 @@ check_usage_backwards(struct task_struct *curr, struct 
held_lock *this,
 
 void print_irqtrace_events(struct task_struct *curr)
 {
-   printk("irq event stamp: %u\n", curr->irq_events);
+   const struct irqtrace_events *trace = >irqtrace;
+
+   printk("irq event stamp: %u\n", trace->irq_events);
printk("hardirqs last  enabled at (%u): [<%px>] %pS\n",
-   curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
-   (void *)curr->hardirq_enable_ip);
+   trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip,
+   (void *)trace->hardirq_enable_ip);
printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
-   

[tip: locking/core] kcsan: Make KCSAN compatible with new IRQ state tracking

2020-07-11 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/core branch of tip:

Commit-ID: 248591f5d257a19c1cba9ab9da3536bfbc2f0149
Gitweb:
https://git.kernel.org/tip/248591f5d257a19c1cba9ab9da3536bfbc2f0149
Author:Marco Elver 
AuthorDate:Wed, 24 Jun 2020 13:32:46 +02:00
Committer: Peter Zijlstra 
CommitterDate: Fri, 10 Jul 2020 12:00:00 +02:00

kcsan: Make KCSAN compatible with new IRQ state tracking

The new IRQ state tracking code does not honor lockdep_off(), and as
such we should again permit tracing by using non-raw functions in
core.c. Update the lockdep_off() comment in report.c, to reflect the
fact there is still a potential risk of deadlock due to using printk()
from scheduler code.

Suggested-by: Peter Zijlstra (Intel) 
Signed-off-by: Marco Elver 
Signed-off-by: Peter Zijlstra (Intel) 
Reviewed-by: Ingo Molnar 
Link: https://lkml.kernel.org/r/20200624113246.ga170...@elver.google.com
---
 kernel/kcsan/core.c   |  5 ++---
 kernel/kcsan/report.c |  9 +
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 15f6794..732623c 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -397,8 +397,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t 
size, int type)
}
 
if (!kcsan_interrupt_watcher)
-   /* Use raw to avoid lockdep recursion via IRQ flags tracing. */
-   raw_local_irq_save(irq_flags);
+   local_irq_save(irq_flags);
 
watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
if (watchpoint == NULL) {
@@ -539,7 +538,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t 
size, int type)
kcsan_counter_dec(KCSAN_COUNTER_USED_WATCHPOINTS);
 out_unlock:
if (!kcsan_interrupt_watcher)
-   raw_local_irq_restore(irq_flags);
+   local_irq_restore(irq_flags);
 out:
user_access_restore(ua_flags);
 }
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index ac5f834..6b2fb1a 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -606,10 +606,11 @@ void kcsan_report(const volatile void *ptr, size_t size, 
int access_type,
goto out;
 
/*
-* With TRACE_IRQFLAGS, lockdep's IRQ trace state becomes corrupted if
-* we do not turn off lockdep here; this could happen due to recursion
-* into lockdep via KCSAN if we detect a race in utilities used by
-* lockdep.
+* Because we may generate reports when we're in scheduler code, the use
+* of printk() could deadlock. Until such time that all printing code
+* called in print_report() is scheduler-safe, accept the risk, and just
+* get our message out. As such, also disable lockdep to hide the
+* warning, and avoid disabling lockdep for the rest of the kernel.
 */
lockdep_off();
 


[tip: x86/entry] kasan: Fix required compiler version

2020-06-25 Thread tip-bot2 for Marco Elver
The following commit has been merged into the x86/entry branch of tip:

Commit-ID: acf7b0bf7dcf5a96d9b44a0997227c7210d995c1
Gitweb:
https://git.kernel.org/tip/acf7b0bf7dcf5a96d9b44a0997227c7210d995c1
Author:Marco Elver 
AuthorDate:Tue, 23 Jun 2020 13:24:48 +02:00
Committer: Peter Zijlstra 
CommitterDate: Thu, 25 Jun 2020 13:45:39 +02:00

kasan: Fix required compiler version

The first working GCC version to satisfy
CC_HAS_WORKING_NOSANITIZE_ADDRESS is GCC 8.3.0.

Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89124
Reported-by: Stephen Rothwell 
Signed-off-by: Marco Elver 
Signed-off-by: Peter Zijlstra (Intel) 
Link: https://lkml.kernel.org/r/20200623112448.ga208...@elver.google.com
---
 lib/Kconfig.kasan | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index af0dd09..34b84bc 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -16,7 +16,7 @@ config CC_HAS_KASAN_SW_TAGS
def_bool $(cc-option, -fsanitize=kernel-hwaddress)
 
 config CC_HAS_WORKING_NOSANITIZE_ADDRESS
-   def_bool !CC_IS_GCC || GCC_VERSION >= 8
+   def_bool !CC_IS_GCC || GCC_VERSION >= 80300
 
 config KASAN
bool "KASAN: runtime memory debugger"


[tip: x86/entry] compiler_attributes.h: Support no_sanitize_undefined check with GCC 4

2020-06-17 Thread tip-bot2 for Marco Elver
The following commit has been merged into the x86/entry branch of tip:

Commit-ID: 33aea07f30c261eff7ba229f19fd1b161e0fb851
Gitweb:
https://git.kernel.org/tip/33aea07f30c261eff7ba229f19fd1b161e0fb851
Author:Marco Elver 
AuthorDate:Tue, 16 Jun 2020 01:15:29 +02:00
Committer: Peter Zijlstra 
CommitterDate: Tue, 16 Jun 2020 15:35:02 +02:00

compiler_attributes.h: Support no_sanitize_undefined check with GCC 4

UBSAN is supported since GCC 4.9, which unfortunately did not yet have
__has_attribute(). To work around, the __GCC4_has_attribute workaround
requires defining which compiler version supports the given attribute.

In the case of no_sanitize_undefined, it is the first version that
supports UBSAN, which is GCC 4.9.

Reported-by: kernel test robot 
Signed-off-by: Marco Elver 
Signed-off-by: Peter Zijlstra (Intel) 
Reviewed-by: Miguel Ojeda 
Link: https://lkml.kernel.org/r/20200615231529.ga119...@google.com
---
 include/linux/compiler_attributes.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/include/linux/compiler_attributes.h 
b/include/linux/compiler_attributes.h
index cdf0165..c8f03d2 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -40,6 +40,7 @@
 # define __GCC4_has_attribute___noclone__ 1
 # define __GCC4_has_attribute___nonstring__   0
 # define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8)
+# define __GCC4_has_attribute___no_sanitize_undefined__ (__GNUC_MINOR__ >= 9)
 # define __GCC4_has_attribute___fallthrough__ 0
 #endif
 


[tip: x86/entry] compiler_types.h: Add __no_sanitize_{address,undefined} to noinstr

2020-06-15 Thread tip-bot2 for Marco Elver
The following commit has been merged into the x86/entry branch of tip:

Commit-ID: 5144f8a8dfd7b3681f0a2b5bf599a210b2315018
Gitweb:
https://git.kernel.org/tip/5144f8a8dfd7b3681f0a2b5bf599a210b2315018
Author:Marco Elver 
AuthorDate:Thu, 04 Jun 2020 07:58:11 +02:00
Committer: Peter Zijlstra 
CommitterDate: Mon, 15 Jun 2020 14:10:09 +02:00

compiler_types.h: Add __no_sanitize_{address,undefined} to noinstr

Adds the portable definitions for __no_sanitize_address, and
__no_sanitize_undefined, and subsequently changes noinstr to use the
attributes to disable instrumentation via KASAN or UBSAN.

Reported-by: syzbot+dc1fa714cb070b184...@syzkaller.appspotmail.com
Signed-off-by: Marco Elver 
Signed-off-by: Peter Zijlstra (Intel) 
Acked-by: Miguel Ojeda 
Link: https://lore.kernel.org/lkml/d2474c05a6c93...@google.com/
---
 include/linux/compiler-clang.h | 8 
 include/linux/compiler-gcc.h   | 6 ++
 include/linux/compiler_types.h | 3 ++-
 3 files changed, 16 insertions(+), 1 deletion(-)

diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index ee37256..5e55302 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -33,6 +33,14 @@
 #define __no_sanitize_thread
 #endif
 
+#if __has_feature(undefined_behavior_sanitizer)
+/* GCC does not have __SANITIZE_UNDEFINED__ */
+#define __no_sanitize_undefined \
+   __attribute__((no_sanitize("undefined")))
+#else
+#define __no_sanitize_undefined
+#endif
+
 /*
  * Not all versions of clang implement the the type-generic versions
  * of the builtin overflow checkers. Fortunately, clang implements
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 7dd4e03..1c74464 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -150,6 +150,12 @@
 #define __no_sanitize_thread
 #endif
 
+#if __has_attribute(__no_sanitize_undefined__)
+#define __no_sanitize_undefined __attribute__((no_sanitize_undefined))
+#else
+#define __no_sanitize_undefined
+#endif
+
 #if GCC_VERSION >= 50100
 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
 #endif
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index a8b4266..85b8d23 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -198,7 +198,8 @@ struct ftrace_likely_data {
 
 /* Section for code which can't be instrumented at all */
 #define noinstr
\
-   noinline notrace __attribute((__section__(".noinstr.text"))) __no_kcsan
+   noinline notrace __attribute((__section__(".noinstr.text")))\
+   __no_kcsan __no_sanitize_address __no_sanitize_undefined
 
 #endif /* __KERNEL__ */
 


[tip: x86/entry] kasan: Bump required compiler version

2020-06-15 Thread tip-bot2 for Marco Elver
The following commit has been merged into the x86/entry branch of tip:

Commit-ID: 7b861a53e46b6b42ab8560b105af308cb72d7285
Gitweb:
https://git.kernel.org/tip/7b861a53e46b6b42ab8560b105af308cb72d7285
Author:Marco Elver 
AuthorDate:Thu, 04 Jun 2020 07:58:10 +02:00
Committer: Peter Zijlstra 
CommitterDate: Mon, 15 Jun 2020 14:10:09 +02:00

kasan: Bump required compiler version

Adds config variable CC_HAS_WORKING_NOSANITIZE_ADDRESS, which will be
true if we have a compiler that does not fail builds due to
no_sanitize_address functions. This does not yet mean they work as
intended, but for automated build-tests, this is the minimum
requirement.

For example, we require that __always_inline functions used from
no_sanitize_address functions do not generate instrumentation. On GCC <=
7 this fails to build entirely, therefore we make the minimum version
GCC 8.

Suggested-by: Peter Zijlstra 
Signed-off-by: Marco Elver 
Signed-off-by: Peter Zijlstra (Intel) 
Reviewed-by: Nick Desaulniers 
Acked-by: Andrey Konovalov 
Link: 
https://lkml.kernel.org/r/20200602175859.gc2...@hirez.programming.kicks-ass.net
---
 lib/Kconfig.kasan | 4 
 1 file changed, 4 insertions(+)

diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 81f5464..af0dd09 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -15,11 +15,15 @@ config CC_HAS_KASAN_GENERIC
 config CC_HAS_KASAN_SW_TAGS
def_bool $(cc-option, -fsanitize=kernel-hwaddress)
 
+config CC_HAS_WORKING_NOSANITIZE_ADDRESS
+   def_bool !CC_IS_GCC || GCC_VERSION >= 8
+
 config KASAN
bool "KASAN: runtime memory debugger"
depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
   (HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
+   depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
help
  Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
  designed to find out-of-bounds accesses and use-after-free bugs.


[tip: locking/kcsan] compiler_types.h: Use unoptimized __unqual_scalar_typeof for sparse

2020-05-28 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: f05e1242fbb2d149ceaa87310cf67d03fe007a25
Gitweb:
https://git.kernel.org/tip/f05e1242fbb2d149ceaa87310cf67d03fe007a25
Author:Marco Elver 
AuthorDate:Thu, 28 May 2020 09:43:13 +02:00
Committer: Borislav Petkov 
CommitterDate: Thu, 28 May 2020 11:51:38 +02:00

compiler_types.h: Use unoptimized __unqual_scalar_typeof for sparse

If the file is being checked with sparse, use the unoptimized version of
__unqual_scalar_typeof(), since sparse does not support _Generic.

Reported-by: kbuild test robot 
Signed-off-by: Marco Elver 
Signed-off-by: Borislav Petkov 
Link: https://lkml.kernel.org/r/202005280727.lxn1vntw%...@intel.com
---
 include/linux/compiler_types.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index a529fa2..c1ee208 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -246,7 +246,7 @@ struct ftrace_likely_data {
  * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
  *non-scalar types unchanged.
  */
-#if defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 40900
+#if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 40900) || 
defined(__CHECKER__)
 /*
  * We build this out of a couple of helper macros in a vain attempt to
  * help you keep your lunch down while reading it.


[tip: locking/kcsan] compiler_types.h: Optimize __unqual_scalar_typeof compilation time

2020-05-27 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: a5dead405f6be1fb80555bdcb77c406bf133fdc8
Gitweb:
https://git.kernel.org/tip/a5dead405f6be1fb80555bdcb77c406bf133fdc8
Author:Marco Elver 
AuthorDate:Wed, 27 May 2020 12:32:36 +02:00
Committer: Borislav Petkov 
CommitterDate: Wed, 27 May 2020 14:03:26 +02:00

compiler_types.h: Optimize __unqual_scalar_typeof compilation time

If the compiler supports C11's _Generic, use it to speed up compilation
times of __unqual_scalar_typeof(). GCC version 4.9 or later and
all supported versions of Clang support the feature (the oldest
supported compiler that doesn't support _Generic is GCC 4.8, for which
we use the slower alternative).

The non-_Generic variant relies on multiple expansions of
__pick_integer_type -> __pick_scalar_type -> __builtin_choose_expr,
which increases pre-processed code size, and can cause compile times to
increase in files with numerous expansions of READ_ONCE(), or other
users of __unqual_scalar_typeof().

Summary of compile-time benchmarking done by Arnd Bergmann:

clang-11   gcc-9
  this patch  0.780.91
  ideal   0.760.86

See 
https://lkml.kernel.org/r/CAK8P3a3UYQeXhiufUevz=rwe09wm_vstcd9w+kvjhjcoeqy...@mail.gmail.com

Further compile-testing done with:
  gcc 4.8, 4.9, 5.5, 6.4, 7.5, 8.4;
  clang 9, 10.

Reported-by: Arnd Bergmann 
Signed-off-by: Marco Elver 
Signed-off-by: Borislav Petkov 
Acked-by: Peter Zijlstra 
Tested-by: Arnd Bergmann 
Link: https://lkml.kernel.org/r/20200527103236.148700-1-el...@google.com
Link: 
https://lkml.kernel.org/r/CAK8P3a0RJtbVi1JMsfik=jkhcnfv+djn_fedg-ylw+ueqw3...@mail.gmail.com
---
 include/linux/compiler_types.h | 22 +-
 1 file changed, 21 insertions(+), 1 deletion(-)

diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 5faf68e..a529fa2 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -245,7 +245,9 @@ struct ftrace_likely_data {
 /*
  * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
  *non-scalar types unchanged.
- *
+ */
+#if defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 40900
+/*
  * We build this out of a couple of helper macros in a vain attempt to
  * help you keep your lunch down while reading it.
  */
@@ -267,6 +269,24 @@ struct ftrace_likely_data {
__pick_integer_type(x, int, 
\
__pick_integer_type(x, long,
\
__pick_integer_type(x, long long, 
x))
+#else
+/*
+ * If supported, prefer C11 _Generic for better compile-times. As above, 'char'
+ * is not type-compatible with 'signed char', and we define a separate case.
+ */
+#define __scalar_type_to_expr_cases(type)  \
+   type: (type)0, unsigned type: (unsigned type)0
+
+#define __unqual_scalar_typeof(x) typeof(  \
+   _Generic((x),   \
+__scalar_type_to_expr_cases(char), \
+signed char: (signed char)0,   \
+__scalar_type_to_expr_cases(short),\
+__scalar_type_to_expr_cases(int),  \
+__scalar_type_to_expr_cases(long), \
+__scalar_type_to_expr_cases(long long),\
+default: (x)))
+#endif
 
 /* Is this type a native word size -- useful for atomic operations */
 #define __native_word(t) \


[tip: locking/kcsan] compiler.h: Avoid nested statement expression in data_race()

2020-05-22 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: aa7d8a2ee1e9b80e36ce2aa0d817c14ab3e23157
Gitweb:
https://git.kernel.org/tip/aa7d8a2ee1e9b80e36ce2aa0d817c14ab3e23157
Author:Marco Elver 
AuthorDate:Thu, 21 May 2020 16:20:45 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 22 May 2020 15:24:21 +02:00

compiler.h: Avoid nested statement expression in data_race()

It appears that compilers have trouble with nested statement
expressions. Therefore, remove one level of statement expression nesting
from the data_race() macro. This will help avoiding potential problems
in the future as its usage increases.

Reported-by: Borislav Petkov 
Reported-by: Nathan Chancellor 
Signed-off-by: Marco Elver 
Signed-off-by: Borislav Petkov 
Acked-by: Peter Zijlstra (Intel) 
Acked-by: Will Deacon 
Tested-by: Nick Desaulniers 
Link: https://lkml.kernel.org/r/20200520221712.ga21...@zn.tnic
Link: https://lkml.kernel.org/r/20200521142047.169334-10-el...@google.com
---
 include/linux/compiler.h | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 7444f02..379a507 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -211,12 +211,12 @@ void ftrace_likely_update(struct ftrace_likely_data *f, 
int val,
  */
 #define data_race(expr)
\
 ({ \
-   __kcsan_disable_current();  \
-   ({  \
-   __unqual_scalar_typeof(({ expr; })) __v = ({ expr; });  \
-   __kcsan_enable_current();   \
-   __v;\
+   __unqual_scalar_typeof(({ expr; })) __v = ({\
+   __kcsan_disable_current();  \
+   expr;   \
}); \
+   __kcsan_enable_current();   \
+   __v;\
 })
 
 /*


[tip: locking/kcsan] compiler.h: Remove data_race() and unnecessary checks from {READ,WRITE}_ONCE()

2020-05-22 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: 777f73c4e79106d45b304f6af0d31917864dbdf1
Gitweb:
https://git.kernel.org/tip/777f73c4e79106d45b304f6af0d31917864dbdf1
Author:Marco Elver 
AuthorDate:Thu, 21 May 2020 16:20:44 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 22 May 2020 15:19:53 +02:00

compiler.h: Remove data_race() and unnecessary checks from {READ,WRITE}_ONCE()

The volatile accesses no longer need to be wrapped in data_race()
because compilers that emit instrumentation distinguishing volatile
accesses are required for KCSAN.

Consequently, the explicit kcsan_check_atomic*() are no longer required
either since the compiler emits instrumentation distinguishing the
volatile accesses.

Finally, simplify __READ_ONCE_SCALAR() and remove __WRITE_ONCE_SCALAR().

 [ bp: Convert commit message to passive voice. ]

Signed-off-by: Marco Elver 
Signed-off-by: Borislav Petkov 
Acked-by: Peter Zijlstra (Intel) 
Acked-by: Will Deacon 
Link: https://lkml.kernel.org/r/20200521142047.169334-9-el...@google.com
---
 include/linux/compiler.h | 13 ++---
 1 file changed, 2 insertions(+), 11 deletions(-)

diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 17c98b2..7444f02 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -228,9 +228,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int 
val,
 
 #define __READ_ONCE_SCALAR(x)  \
 ({ \
-   typeof(x) *__xp = &(x); \
-   __unqual_scalar_typeof(x) __x = data_race(__READ_ONCE(*__xp));  \
-   kcsan_check_atomic_read(__xp, sizeof(*__xp));   \
+   __unqual_scalar_typeof(x) __x = __READ_ONCE(x); \
smp_read_barrier_depends(); \
(typeof(x))__x; \
 })
@@ -246,17 +244,10 @@ do {  
\
*(volatile typeof(x) *)&(x) = (val);\
 } while (0)
 
-#define __WRITE_ONCE_SCALAR(x, val)\
-do {   \
-   typeof(x) *__xp = &(x); \
-   kcsan_check_atomic_write(__xp, sizeof(*__xp));  \
-   data_race(({ __WRITE_ONCE(*__xp, val); 0; }));  \
-} while (0)
-
 #define WRITE_ONCE(x, val) \
 do {   \
compiletime_assert_rwonce_type(x);  \
-   __WRITE_ONCE_SCALAR(x, val);\
+   __WRITE_ONCE(x, val);   \
 } while (0)
 
 #ifdef CONFIG_KASAN


[tip: locking/kcsan] compiler_types.h, kasan: Use __SANITIZE_ADDRESS__ instead of CONFIG_KASAN to decide inlining

2020-05-22 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: b91caf58f6fb88738f444cf40d247475c367de47
Gitweb:
https://git.kernel.org/tip/b91caf58f6fb88738f444cf40d247475c367de47
Author:Marco Elver 
AuthorDate:Thu, 21 May 2020 16:20:47 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 22 May 2020 15:31:04 +02:00

compiler_types.h, kasan: Use __SANITIZE_ADDRESS__ instead of CONFIG_KASAN to 
decide inlining

Use __always_inline in compilation units that have instrumentation
disabled (KASAN_SANITIZE_foo.o := n) for KASAN, like it is done for
KCSAN.

Also, add common documentation for KASAN and KCSAN explaining the
attribute.

 [ bp: Massage commit message. ]

Signed-off-by: Marco Elver 
Signed-off-by: Borislav Petkov 
Acked-by: Peter Zijlstra (Intel) 
Acked-by: Will Deacon 
Link: https://lkml.kernel.org/r/20200521142047.169334-12-el...@google.com
---
 include/linux/compiler_types.h | 13 -
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index b190a12..5faf68e 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -167,7 +167,14 @@ struct ftrace_likely_data {
  */
 #define noinline_for_stack noinline
 
-#ifdef CONFIG_KASAN
+/*
+ * Sanitizer helper attributes: Because using __always_inline and
+ * __no_sanitize_* conflict, provide helper attributes that will either expand
+ * to __no_sanitize_* in compilation units where instrumentation is enabled
+ * (__SANITIZE_*__), or __always_inline in compilation units without
+ * instrumentation (__SANITIZE_*__ undefined).
+ */
+#ifdef __SANITIZE_ADDRESS__
 /*
  * We can't declare function 'inline' because __no_sanitize_address conflicts
  * with inlining. Attempt to inline it may cause a build failure.
@@ -182,10 +189,6 @@ struct ftrace_likely_data {
 
 #define __no_kcsan __no_sanitize_thread
 #ifdef __SANITIZE_THREAD__
-/*
- * Rely on __SANITIZE_THREAD__ instead of CONFIG_KCSAN, to avoid not inlining 
in
- * compilation units where instrumentation is disabled.
- */
 # define __no_kcsan_or_inline __no_kcsan notrace __maybe_unused
 # define __no_sanitize_or_inline __no_kcsan_or_inline
 #else


[tip: locking/kcsan] kcsan: Update Documentation to change supported compilers

2020-05-22 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: 345043266de282a4059bc8336e2bcdd3680cc8f0
Gitweb:
https://git.kernel.org/tip/345043266de282a4059bc8336e2bcdd3680cc8f0
Author:Marco Elver 
AuthorDate:Thu, 21 May 2020 16:20:43 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 22 May 2020 15:13:45 +02:00

kcsan: Update Documentation to change supported compilers

Document change in required compiler version for KCSAN, and remove the
now redundant note about __no_kcsan and inlining problems with older
compilers.

Signed-off-by: Marco Elver 
Signed-off-by: Borislav Petkov 
Acked-by: Peter Zijlstra (Intel) 
Acked-by: Will Deacon 
Link: https://lkml.kernel.org/r/20200521142047.169334-8-el...@google.com
---
 Documentation/dev-tools/kcsan.rst |  9 +
 1 file changed, 1 insertion(+), 8 deletions(-)

diff --git a/Documentation/dev-tools/kcsan.rst 
b/Documentation/dev-tools/kcsan.rst
index f4b5766..ce4bbd9 100644
--- a/Documentation/dev-tools/kcsan.rst
+++ b/Documentation/dev-tools/kcsan.rst
@@ -8,8 +8,7 @@ approach to detect races. KCSAN's primary purpose is to detect 
`data races`_.
 Usage
 -
 
-KCSAN is supported in both GCC and Clang. With GCC it requires version 7.3.0 or
-later. With Clang it requires version 7.0.0 or later.
+KCSAN requires Clang version 11 or later.
 
 To enable KCSAN configure the kernel with::
 
@@ -121,12 +120,6 @@ the below options are available:
 static __no_kcsan_or_inline void foo(void) {
 ...
 
-  Note: Older compiler versions (GCC < 9) also do not always honor the
-  ``__no_kcsan`` attribute on regular ``inline`` functions. If false positives
-  with these compilers cannot be tolerated, for small functions where
-  ``__always_inline`` would be appropriate, ``__no_kcsan_or_inline`` should be
-  preferred instead.
-
 * To disable data race detection for a particular compilation unit, add to the
   ``Makefile``::
 


[tip: locking/kcsan] kcsan: Restrict supported compilers

2020-05-22 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: 0d473b1d6e5c240f8ffed02715c718024802d0fa
Gitweb:
https://git.kernel.org/tip/0d473b1d6e5c240f8ffed02715c718024802d0fa
Author:Marco Elver 
AuthorDate:Thu, 21 May 2020 16:20:42 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 22 May 2020 14:46:02 +02:00

kcsan: Restrict supported compilers

The first version of Clang that supports -tsan-distinguish-volatile will
be able to support KCSAN. The first Clang release to do so, will be
Clang 11. This is due to satisfying all the following requirements:

1. Never emit calls to __tsan_func_{entry,exit}.

2. __no_kcsan functions should not call anything, not even
   kcsan_{enable,disable}_current(), when using __{READ,WRITE}_ONCE => Requires
   leaving them plain!

3. Support atomic_{read,set}*() with KCSAN, which rely on
   arch_atomic_{read,set}*() using __{READ,WRITE}_ONCE() => Because of
   #2, rely on Clang 11's -tsan-distinguish-volatile support. We will
   double-instrument atomic_{read,set}*(), but that's reasonable given
   it's still lower cost than the data_race() variant due to avoiding 2
   extra calls (kcsan_{en,dis}able_current() calls).

4. __always_inline functions inlined into __no_kcsan functions are never
   instrumented.

5. __always_inline functions inlined into instrumented functions are
   instrumented.

6. __no_kcsan_or_inline functions may be inlined into __no_kcsan functions =>
   Implies leaving 'noinline' off of __no_kcsan_or_inline.

7. Because of #6, __no_kcsan and __no_kcsan_or_inline functions should never be
   spuriously inlined into instrumented functions, causing the accesses of the
   __no_kcsan function to be instrumented.

Older versions of Clang do not satisfy #3. The latest GCC currently
doesn't support at least #1, #3, and #7.

Signed-off-by: Marco Elver 
Signed-off-by: Borislav Petkov 
Acked-by: Peter Zijlstra (Intel) 
Acked-by: Will Deacon 
Link: 
https://lkml.kernel.org/r/CANpmjNMTsY_8241bS7=xafqvzhflrvekv_um4aduwe_kh3r...@mail.gmail.com
Link: https://lkml.kernel.org/r/20200521142047.169334-7-el...@google.com
---
 lib/Kconfig.kcsan |  9 -
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index b5d88ac..5ee88e5 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -3,6 +3,12 @@
 config HAVE_ARCH_KCSAN
bool
 
+config HAVE_KCSAN_COMPILER
+   def_bool CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm 
-tsan-distinguish-volatile=1)
+   help
+ For the list of compilers that support KCSAN, please see
+ .
+
 config KCSAN_KCOV_BROKEN
def_bool KCOV && CC_HAS_SANCOV_TRACE_PC
depends on CC_IS_CLANG
@@ -15,7 +21,8 @@ config KCSAN_KCOV_BROKEN
 
 menuconfig KCSAN
bool "KCSAN: dynamic data race detector"
-   depends on HAVE_ARCH_KCSAN && DEBUG_KERNEL && !KASAN
+   depends on HAVE_ARCH_KCSAN && HAVE_KCSAN_COMPILER
+   depends on DEBUG_KERNEL && !KASAN
depends on !KCSAN_KCOV_BROKEN
select STACKTRACE
help


[tip: locking/kcsan] compiler.h: Move function attributes to compiler_types.h

2020-05-22 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: 9a4e6db6161cc3b31c6202f8d7a9495e0c2ecda7
Gitweb:
https://git.kernel.org/tip/9a4e6db6161cc3b31c6202f8d7a9495e0c2ecda7
Author:Marco Elver 
AuthorDate:Thu, 21 May 2020 16:20:46 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 22 May 2020 15:27:32 +02:00

compiler.h: Move function attributes to compiler_types.h

Cleanup and move the KASAN and KCSAN related function attributes to
compiler_types.h, where the rest of the same kind live.

No functional change intended.

Signed-off-by: Marco Elver 
Signed-off-by: Borislav Petkov 
Acked-by: Peter Zijlstra (Intel) 
Acked-by: Will Deacon 
Link: https://lkml.kernel.org/r/20200521142047.169334-11-el...@google.com
---
 include/linux/compiler.h   | 29 -
 include/linux/compiler_types.h | 29 +
 2 files changed, 29 insertions(+), 29 deletions(-)

diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 379a507..652aee0 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -250,35 +250,6 @@ do {   
\
__WRITE_ONCE(x, val);   \
 } while (0)
 
-#ifdef CONFIG_KASAN
-/*
- * We can't declare function 'inline' because __no_sanitize_address conflicts
- * with inlining. Attempt to inline it may cause a build failure.
- * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
- * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
- */
-# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
-# define __no_sanitize_or_inline __no_kasan_or_inline
-#else
-# define __no_kasan_or_inline __always_inline
-#endif
-
-#define __no_kcsan __no_sanitize_thread
-#ifdef __SANITIZE_THREAD__
-/*
- * Rely on __SANITIZE_THREAD__ instead of CONFIG_KCSAN, to avoid not inlining 
in
- * compilation units where instrumentation is disabled.
- */
-# define __no_kcsan_or_inline __no_kcsan notrace __maybe_unused
-# define __no_sanitize_or_inline __no_kcsan_or_inline
-#else
-# define __no_kcsan_or_inline __always_inline
-#endif
-
-#ifndef __no_sanitize_or_inline
-#define __no_sanitize_or_inline __always_inline
-#endif
-
 static __no_sanitize_or_inline
 unsigned long __read_once_word_nocheck(const void *addr)
 {
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 6ed0612..b190a12 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -167,6 +167,35 @@ struct ftrace_likely_data {
  */
 #define noinline_for_stack noinline
 
+#ifdef CONFIG_KASAN
+/*
+ * We can't declare function 'inline' because __no_sanitize_address conflicts
+ * with inlining. Attempt to inline it may cause a build failure.
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
+ */
+# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
+# define __no_sanitize_or_inline __no_kasan_or_inline
+#else
+# define __no_kasan_or_inline __always_inline
+#endif
+
+#define __no_kcsan __no_sanitize_thread
+#ifdef __SANITIZE_THREAD__
+/*
+ * Rely on __SANITIZE_THREAD__ instead of CONFIG_KCSAN, to avoid not inlining 
in
+ * compilation units where instrumentation is disabled.
+ */
+# define __no_kcsan_or_inline __no_kcsan notrace __maybe_unused
+# define __no_sanitize_or_inline __no_kcsan_or_inline
+#else
+# define __no_kcsan_or_inline __always_inline
+#endif
+
+#ifndef __no_sanitize_or_inline
+#define __no_sanitize_or_inline __always_inline
+#endif
+
 #endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */


[tip: locking/kcsan] kcsan: Remove 'noinline' from __no_kcsan_or_inline

2020-05-22 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: f487a549ea30ee894055d8d20e81c1996a6e10a0
Gitweb:
https://git.kernel.org/tip/f487a549ea30ee894055d8d20e81c1996a6e10a0
Author:Marco Elver 
AuthorDate:Thu, 21 May 2020 16:20:41 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 22 May 2020 15:12:39 +02:00

kcsan: Remove 'noinline' from __no_kcsan_or_inline

Some compilers incorrectly inline small __no_kcsan functions, which then
results in instrumenting the accesses. For this reason, the 'noinline'
attribute was added to __no_kcsan_or_inline. All known versions of GCC
are affected by this. Supported versions of Clang are unaffected, and
never inline a no_sanitize function.

However, the attribute 'noinline' in __no_kcsan_or_inline causes
unexpected code generation in functions that are __no_kcsan and call a
__no_kcsan_or_inline function.

In certain situations it is expected that the __no_kcsan_or_inline
function is actually inlined by the __no_kcsan function, and *no* calls
are emitted. By removing the 'noinline' attribute, give the compiler
the ability to inline and generate the expected code in __no_kcsan
functions.

Signed-off-by: Marco Elver 
Signed-off-by: Borislav Petkov 
Acked-by: Peter Zijlstra (Intel) 
Acked-by: Will Deacon 
Link: 
https://lkml.kernel.org/r/canpmjnnopjk0tprxkb_deinav_ummorf1-2uajlhnlwqq1h...@mail.gmail.com
Link: https://lkml.kernel.org/r/20200521142047.169334-6-el...@google.com
---
 include/linux/compiler.h | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index e24cc3a..17c98b2 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -276,11 +276,9 @@ do {   
\
 #ifdef __SANITIZE_THREAD__
 /*
  * Rely on __SANITIZE_THREAD__ instead of CONFIG_KCSAN, to avoid not inlining 
in
- * compilation units where instrumentation is disabled. The attribute 
'noinline'
- * is required for older compilers, where implicit inlining of very small
- * functions renders __no_sanitize_thread ineffective.
+ * compilation units where instrumentation is disabled.
  */
-# define __no_kcsan_or_inline __no_kcsan noinline notrace __maybe_unused
+# define __no_kcsan_or_inline __no_kcsan notrace __maybe_unused
 # define __no_sanitize_or_inline __no_kcsan_or_inline
 #else
 # define __no_kcsan_or_inline __always_inline


[tip: locking/kcsan] kcsan: Avoid inserting __tsan_func_entry/exit if possible

2020-05-22 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: 3bc9e5b0725b353b921feaf2c10bb4a9f932646f
Gitweb:
https://git.kernel.org/tip/3bc9e5b0725b353b921feaf2c10bb4a9f932646f
Author:Marco Elver 
AuthorDate:Thu, 21 May 2020 16:20:38 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 22 May 2020 14:36:19 +02:00

kcsan: Avoid inserting __tsan_func_entry/exit if possible

To avoid inserting  __tsan_func_{entry,exit}, add option if supported by
compiler. Currently only Clang can be told to not emit calls to these
functions. It is safe to not emit these, since KCSAN does not rely on
them.

Note that, if we disable __tsan_func_{entry,exit}(), we need to disable
tail-call optimization in sanitized compilation units, as otherwise we
may skip frames in the stack trace; in particular when the tail called
function is one of the KCSAN's runtime functions, and a report is
generated, we might miss the function where the actual access occurred.

Since __tsan_func_{entry,exit}() insertion effectively disabled
tail-call optimization, there should be no observable change.

This was caught and confirmed with kcsan-test & UNWINDER_ORC.

Signed-off-by: Marco Elver 
Signed-off-by: Borislav Petkov 
Acked-by: Will Deacon 
Acked-by: Peter Zijlstra (Intel) 
Link: https://lkml.kernel.org/r/20200521142047.169334-3-el...@google.com
---
 scripts/Makefile.kcsan | 11 ++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/scripts/Makefile.kcsan b/scripts/Makefile.kcsan
index caf..20337a7 100644
--- a/scripts/Makefile.kcsan
+++ b/scripts/Makefile.kcsan
@@ -1,6 +1,15 @@
 # SPDX-License-Identifier: GPL-2.0
 ifdef CONFIG_KCSAN
 
-CFLAGS_KCSAN := -fsanitize=thread
+# GCC and Clang accept backend options differently. Do not wrap in cc-option,
+# because Clang accepts "--param" even if it is unused.
+ifdef CONFIG_CC_IS_CLANG
+cc-param = -mllvm -$(1)
+else
+cc-param = --param -$(1)
+endif
+
+CFLAGS_KCSAN := -fsanitize=thread \
+   $(call cc-option,$(call cc-param,tsan-instrument-func-entry-exit=0) 
-fno-optimize-sibling-calls)
 
 endif # CONFIG_KCSAN


[tip: locking/kcsan] kcsan: Pass option tsan-instrument-read-before-write to Clang

2020-05-22 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: 52dfbb97a90fbf6a9826f15a71fca37861330a13
Gitweb:
https://git.kernel.org/tip/52dfbb97a90fbf6a9826f15a71fca37861330a13
Author:Marco Elver 
AuthorDate:Thu, 21 May 2020 16:20:40 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 22 May 2020 14:57:39 +02:00

kcsan: Pass option tsan-instrument-read-before-write to Clang

Clang (unlike GCC) removes reads before writes with matching addresses
in the same basic block. This is an optimization for TSAN, since writes
will always cause conflict if the preceding read would have.

However, for KCSAN we cannot rely on this option, because we apply
several special rules to writes, in particular when the
KCSAN_ASSUME_PLAIN_WRITES_ATOMIC option is selected. To avoid missing
potential data races, pass the -tsan-instrument-read-before-write option
to Clang if it is available [1].

[1] 
https://github.com/llvm/llvm-project/commit/151ed6aa38a3ec6c01973b35f684586b6e1c0f7e

Signed-off-by: Marco Elver 
Signed-off-by: Borislav Petkov 
Acked-by: Peter Zijlstra (Intel) 
Acked-by: Will Deacon 
Link: https://lkml.kernel.org/r/20200521142047.169334-5-el...@google.com
---
 scripts/Makefile.kcsan | 1 +
 1 file changed, 1 insertion(+)

diff --git a/scripts/Makefile.kcsan b/scripts/Makefile.kcsan
index 75d2942..bd4da1a 100644
--- a/scripts/Makefile.kcsan
+++ b/scripts/Makefile.kcsan
@@ -13,6 +13,7 @@ endif
 # of some options does not break KCSAN nor causes false positive reports.
 CFLAGS_KCSAN := -fsanitize=thread \
$(call cc-option,$(call cc-param,tsan-instrument-func-entry-exit=0) 
-fno-optimize-sibling-calls) \
+   $(call cc-option,$(call cc-param,tsan-instrument-read-before-write=1)) \
$(call cc-param,tsan-distinguish-volatile=1)
 
 endif # CONFIG_KCSAN


[tip: locking/kcsan] kcsan: Support distinguishing volatile accesses

2020-05-22 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: 4e23395b9e97562d12b87a330a2fca3bf10c8663
Gitweb:
https://git.kernel.org/tip/4e23395b9e97562d12b87a330a2fca3bf10c8663
Author:Marco Elver 
AuthorDate:Thu, 21 May 2020 16:20:39 +02:00
Committer: Borislav Petkov 
CommitterDate: Fri, 22 May 2020 14:46:02 +02:00

kcsan: Support distinguishing volatile accesses

In the kernel, the "volatile" keyword is used in various concurrent
contexts, whether in low-level synchronization primitives or for
legacy reasons. If supported by the compiler, it will be assumed
that aligned volatile accesses up to sizeof(long long) (matching
compiletime_assert_rwonce_type()) are atomic.

Recent versions of Clang [1] (GCC tentative [2]) can instrument
volatile accesses differently. Add the option (required) to enable the
instrumentation, and provide the necessary runtime functions. None of
the updated compilers are widely available yet (Clang 11 will be the
first release to support the feature).

[1] 
https://github.com/llvm/llvm-project/commit/5a2c31116f412c3b6888be361137efd705e05814
[2] https://gcc.gnu.org/pipermail/gcc-patches/2020-April/544452.html

This change allows removing of any explicit checks in primitives such as
READ_ONCE() and WRITE_ONCE().

 [ bp: Massage commit message a bit. ]

Signed-off-by: Marco Elver 
Signed-off-by: Borislav Petkov 
Acked-by: Will Deacon 
Acked-by: Peter Zijlstra (Intel) 
Link: https://lkml.kernel.org/r/20200521142047.169334-4-el...@google.com
---
 kernel/kcsan/core.c| 43 +-
 scripts/Makefile.kcsan |  5 -
 2 files changed, 47 insertions(+), 1 deletion(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index a73a66c..15f6794 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -790,6 +790,49 @@ void __tsan_write_range(void *ptr, size_t size)
 EXPORT_SYMBOL(__tsan_write_range);
 
 /*
+ * Use of explicit volatile is generally disallowed [1], however, volatile is
+ * still used in various concurrent context, whether in low-level
+ * synchronization primitives or for legacy reasons.
+ * [1] https://lwn.net/Articles/233479/
+ *
+ * We only consider volatile accesses atomic if they are aligned and would pass
+ * the size-check of compiletime_assert_rwonce_type().
+ */
+#define DEFINE_TSAN_VOLATILE_READ_WRITE(size)  
\
+   void __tsan_volatile_read##size(void *ptr) \
+   {  \
+   const bool is_atomic = size <= sizeof(long long) &&\
+  IS_ALIGNED((unsigned long)ptr, size);   \
+   if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic)  \
+   return;\
+   check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0);  \
+   }  \
+   EXPORT_SYMBOL(__tsan_volatile_read##size); \
+   void __tsan_unaligned_volatile_read##size(void *ptr)   \
+   __alias(__tsan_volatile_read##size);   \
+   EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size);   \
+   void __tsan_volatile_write##size(void *ptr)\
+   {  \
+   const bool is_atomic = size <= sizeof(long long) &&\
+  IS_ALIGNED((unsigned long)ptr, size);   \
+   if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic)  \
+   return;\
+   check_access(ptr, size,\
+KCSAN_ACCESS_WRITE |  \
+(is_atomic ? KCSAN_ACCESS_ATOMIC : 0));   \
+   }  \
+   EXPORT_SYMBOL(__tsan_volatile_write##size);\
+   void __tsan_unaligned_volatile_write##size(void *ptr)  \
+   __alias(__tsan_volatile_write##size);  \
+   EXPORT_SYMBOL(__tsan_unaligned_volatile_write##size)
+
+DEFINE_TSAN_VOLATILE_READ_WRITE(1);
+DEFINE_TSAN_VOLATILE_READ_WRITE(2);
+DEFINE_TSAN_VOLATILE_READ_WRITE(4);
+DEFINE_TSAN_VOLATILE_READ_WRITE(8);
+DEFINE_TSAN_VOLATILE_READ_WRITE(16);
+
+/*
  * The below are not required by KCSAN, but can still be emitted by the
  * compiler.
  */
diff --git a/scripts/Makefile.kcsan b/scripts/Makefile.kcsan
index 20337a7..75d2942 100644
--- a/scripts/Makefile.kcsan
+++ b/scripts/Makefile.kcsan
@@ -9,7 +9,10 @@ else
 cc-param = --param -$(1)
 endif
 

[tip: locking/kcsan] objtool, kcsan: Add explicit check functions to uaccess whitelist

2020-05-08 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: 9967683ce5d4ce21829bbc807e006ee33cc68725
Gitweb:
https://git.kernel.org/tip/9967683ce5d4ce21829bbc807e006ee33cc68725
Author:Marco Elver 
AuthorDate:Wed, 25 Mar 2020 17:41:57 +01:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 13 Apr 2020 17:18:12 -07:00

objtool, kcsan: Add explicit check functions to uaccess whitelist

Add explicitly invoked KCSAN check functions to objtool's uaccess
whitelist. This is needed in order to permit calling into
kcsan_check_scoped_accesses() from the fast-path, which in turn calls
__kcsan_check_access().  __kcsan_check_access() is the generic variant
of the already whitelisted specializations __tsan_{read,write}N.

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 tools/objtool/check.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index b6da413..b6a573d 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -468,8 +468,10 @@ static const char *uaccess_safe_builtin[] = {
"__asan_report_store8_noabort",
"__asan_report_store16_noabort",
/* KCSAN */
+   "__kcsan_check_access",
"kcsan_found_watchpoint",
"kcsan_setup_watchpoint",
+   "kcsan_check_scoped_accesses",
/* KCSAN/TSAN */
"__tsan_func_entry",
"__tsan_func_exit",


[tip: locking/kcsan] kcsan: Move kcsan_{disable,enable}_current() to kcsan-checks.h

2020-05-08 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: 01b4ff58f72dbee926077d9afa0650f6e685e866
Gitweb:
https://git.kernel.org/tip/01b4ff58f72dbee926077d9afa0650f6e685e866
Author:Marco Elver 
AuthorDate:Tue, 31 Mar 2020 21:32:32 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 13 Apr 2020 17:18:14 -07:00

kcsan: Move kcsan_{disable,enable}_current() to kcsan-checks.h

Both affect access checks, and should therefore be in kcsan-checks.h.
This is in preparation to use these in compiler.h.

Acked-by: Will Deacon 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 include/linux/kcsan-checks.h | 16 
 include/linux/kcsan.h| 16 
 2 files changed, 16 insertions(+), 16 deletions(-)

diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index 101df7f..ef95ddc 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -37,6 +37,20 @@
 void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
 
 /**
+ * kcsan_disable_current - disable KCSAN for the current context
+ *
+ * Supports nesting.
+ */
+void kcsan_disable_current(void);
+
+/**
+ * kcsan_enable_current - re-enable KCSAN for the current context
+ *
+ * Supports nesting.
+ */
+void kcsan_enable_current(void);
+
+/**
  * kcsan_nestable_atomic_begin - begin nestable atomic region
  *
  * Accesses within the atomic region may appear to race with other accesses but
@@ -133,6 +147,8 @@ void kcsan_end_scoped_access(struct kcsan_scoped_access 
*sa);
 static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
int type) { }
 
+static inline void kcsan_disable_current(void) { }
+static inline void kcsan_enable_current(void)  { }
 static inline void kcsan_nestable_atomic_begin(void)   { }
 static inline void kcsan_nestable_atomic_end(void) { }
 static inline void kcsan_flat_atomic_begin(void)   { }
diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h
index 17ae59e..53340d8 100644
--- a/include/linux/kcsan.h
+++ b/include/linux/kcsan.h
@@ -50,25 +50,9 @@ struct kcsan_ctx {
  */
 void kcsan_init(void);
 
-/**
- * kcsan_disable_current - disable KCSAN for the current context
- *
- * Supports nesting.
- */
-void kcsan_disable_current(void);
-
-/**
- * kcsan_enable_current - re-enable KCSAN for the current context
- *
- * Supports nesting.
- */
-void kcsan_enable_current(void);
-
 #else /* CONFIG_KCSAN */
 
 static inline void kcsan_init(void){ }
-static inline void kcsan_disable_current(void) { }
-static inline void kcsan_enable_current(void)  { }
 
 #endif /* CONFIG_KCSAN */
 


[tip: locking/kcsan] checkpatch: Warn about data_race() without comment

2020-05-08 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: 5099a722e9727fe9a93fac51e961735f40e5b6c8
Gitweb:
https://git.kernel.org/tip/5099a722e9727fe9a93fac51e961735f40e5b6c8
Author:Marco Elver 
AuthorDate:Wed, 01 Apr 2020 12:17:14 +02:00
Committer: Paul E. McKenney 
CommitterDate: Wed, 06 May 2020 10:54:58 -07:00

checkpatch: Warn about data_race() without comment

Warn about applications of data_race() without a comment, to encourage
documenting the reasoning behind why it was deemed safe.

Suggested-by: Will Deacon 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 scripts/checkpatch.pl | 8 
 1 file changed, 8 insertions(+)

diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index a63380c..48bb950 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -5833,6 +5833,14 @@ sub process {
}
}
 
+# check for data_race without a comment.
+   if ($line =~ /\bdata_race\s*\(/) {
+   if (!ctx_has_comment($first_line, $linenr)) {
+   WARN("DATA_RACE",
+"data_race without comment\n" . $herecurr);
+   }
+   }
+
 # check for smp_read_barrier_depends and read_barrier_depends
if (!$file && $line =~ /\b(smp_|)read_barrier_depends\s*\(/) {
WARN("READ_BARRIER_DEPENDS",


[tip: locking/kcsan] kcsan: Change data_race() to no longer require marking racing accesses

2020-05-08 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: d071e91361bbfef524ae8abf7e560fb294d0ad64
Gitweb:
https://git.kernel.org/tip/d071e91361bbfef524ae8abf7e560fb294d0ad64
Author:Marco Elver 
AuthorDate:Tue, 31 Mar 2020 21:32:33 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 13 Apr 2020 17:18:15 -07:00

kcsan: Change data_race() to no longer require marking racing accesses

Thus far, accesses marked with data_race() would still require the
racing access to be marked in some way (be it with READ_ONCE(),
WRITE_ONCE(), or data_race() itself), as otherwise KCSAN would still
report a data race.  This requirement, however, seems to be unintuitive,
and some valid use-cases demand *not* marking other accesses, as it
might hide more serious bugs (e.g. diagnostic reads).

Therefore, this commit changes data_race() to no longer require marking
racing accesses (although it's still recommended if possible).

The alternative would have been introducing another variant of
data_race(), however, since usage of data_race() already needs to be
carefully reasoned about, distinguishing between these cases likely adds
more complexity in the wrong place.

Link: https://lkml.kernel.org/r/20200331131002.GA30975@willie-the-truck
Cc: Paul E. McKenney 
Cc: Will Deacon 
Cc: Qian Cai 
Acked-by: Will Deacon 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 include/linux/compiler.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index f504ede..1729bd1 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -326,9 +326,9 @@ unsigned long read_word_at_a_time(const void *addr)
 #define data_race(expr)
\
({ \
typeof(({ expr; })) __val; \
-   kcsan_nestable_atomic_begin(); \
+   kcsan_disable_current();   \
__val = ({ expr; });   \
-   kcsan_nestable_atomic_end();   \
+   kcsan_enable_current();\
__val; \
})
 #else


[tip: locking/kcsan] kcsan: Fix function matching in report

2020-05-08 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: f770ed10a9ee65529f3ec8d90eb374bbd8b7c238
Gitweb:
https://git.kernel.org/tip/f770ed10a9ee65529f3ec8d90eb374bbd8b7c238
Author:Marco Elver 
AuthorDate:Fri, 10 Apr 2020 18:44:17 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 13 Apr 2020 17:18:15 -07:00

kcsan: Fix function matching in report

Pass string length as returned by scnprintf() to strnstr(), since
strnstr() searches exactly len bytes in haystack, even if it contains a
NUL-terminator before haystack+len.

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/report.c | 18 +-
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index ddc18f1..cf41d63 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -192,11 +192,11 @@ skip_report(enum kcsan_value_change value_change, 
unsigned long top_frame)
 * maintainers.
 */
char buf[64];
+   int len = scnprintf(buf, sizeof(buf), "%ps", (void *)top_frame);
 
-   snprintf(buf, sizeof(buf), "%ps", (void *)top_frame);
-   if (!strnstr(buf, "rcu_", sizeof(buf)) &&
-   !strnstr(buf, "_rcu", sizeof(buf)) &&
-   !strnstr(buf, "_srcu", sizeof(buf)))
+   if (!strnstr(buf, "rcu_", len) &&
+   !strnstr(buf, "_rcu", len) &&
+   !strnstr(buf, "_srcu", len))
return true;
}
 
@@ -262,15 +262,15 @@ static const char *get_thread_desc(int task_id)
 static int get_stack_skipnr(const unsigned long stack_entries[], int 
num_entries)
 {
char buf[64];
+   int len;
int skip = 0;
 
for (; skip < num_entries; ++skip) {
-   snprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skip]);
-   if (!strnstr(buf, "csan_", sizeof(buf)) &&
-   !strnstr(buf, "tsan_", sizeof(buf)) &&
-   !strnstr(buf, "_once_size", sizeof(buf))) {
+   len = scnprintf(buf, sizeof(buf), "%ps", (void 
*)stack_entries[skip]);
+   if (!strnstr(buf, "csan_", len) &&
+   !strnstr(buf, "tsan_", len) &&
+   !strnstr(buf, "_once_size", len))
break;
-   }
}
return skip;
 }


[tip: locking/kcsan] kcsan: Add __kcsan_{enable,disable}_current() variants

2020-05-08 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: 19acd03d95dad1f50d06f28179a1866fca431fed
Gitweb:
https://git.kernel.org/tip/19acd03d95dad1f50d06f28179a1866fca431fed
Author:Marco Elver 
AuthorDate:Fri, 24 Apr 2020 17:47:29 +02:00
Committer: Paul E. McKenney 
CommitterDate: Wed, 06 May 2020 10:58:46 -07:00

kcsan: Add __kcsan_{enable,disable}_current() variants

The __kcsan_{enable,disable}_current() variants only call into KCSAN if
KCSAN is enabled for the current compilation unit. Note: This is
typically not what we want, as we usually want to ensure that even calls
into other functions still have KCSAN disabled.

These variants may safely be used in header files that are shared
between regular kernel code and code that does not link the KCSAN
runtime.

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 include/linux/kcsan-checks.h | 17 ++---
 kernel/kcsan/core.c  |  7 +++
 2 files changed, 21 insertions(+), 3 deletions(-)

diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index ef95ddc..7b0b9c4 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -49,6 +49,7 @@ void kcsan_disable_current(void);
  * Supports nesting.
  */
 void kcsan_enable_current(void);
+void kcsan_enable_current_nowarn(void); /* Safe in uaccess regions. */
 
 /**
  * kcsan_nestable_atomic_begin - begin nestable atomic region
@@ -149,6 +150,7 @@ static inline void __kcsan_check_access(const volatile void 
*ptr, size_t size,
 
 static inline void kcsan_disable_current(void) { }
 static inline void kcsan_enable_current(void)  { }
+static inline void kcsan_enable_current_nowarn(void)   { }
 static inline void kcsan_nestable_atomic_begin(void)   { }
 static inline void kcsan_nestable_atomic_end(void) { }
 static inline void kcsan_flat_atomic_begin(void)   { }
@@ -165,15 +167,24 @@ static inline void kcsan_end_scoped_access(struct 
kcsan_scoped_access *sa) { }
 
 #endif /* CONFIG_KCSAN */
 
+#ifdef __SANITIZE_THREAD__
 /*
- * kcsan_*: Only calls into the runtime when the particular compilation unit 
has
- * KCSAN instrumentation enabled. May be used in header files.
+ * Only calls into the runtime when the particular compilation unit has KCSAN
+ * instrumentation enabled. May be used in header files.
  */
-#ifdef __SANITIZE_THREAD__
 #define kcsan_check_access __kcsan_check_access
+
+/*
+ * Only use these to disable KCSAN for accesses in the current compilation 
unit;
+ * calls into libraries may still perform KCSAN checks.
+ */
+#define __kcsan_disable_current kcsan_disable_current
+#define __kcsan_enable_current kcsan_enable_current_nowarn
 #else
 static inline void kcsan_check_access(const volatile void *ptr, size_t size,
  int type) { }
+static inline void __kcsan_enable_current(void)  { }
+static inline void __kcsan_disable_current(void) { }
 #endif
 
 /**
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index a572aae..a73a66c 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -625,6 +625,13 @@ void kcsan_enable_current(void)
 }
 EXPORT_SYMBOL(kcsan_enable_current);
 
+void kcsan_enable_current_nowarn(void)
+{
+   if (get_ctx()->disable_count-- == 0)
+   kcsan_disable_current();
+}
+EXPORT_SYMBOL(kcsan_enable_current_nowarn);
+
 void kcsan_nestable_atomic_begin(void)
 {
/*


[tip: locking/kcsan] kcsan: Update Documentation/dev-tools/kcsan.rst

2020-05-08 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: e7325b774cc72edc2cffc4a1ce40f4dbf1bc0930
Gitweb:
https://git.kernel.org/tip/e7325b774cc72edc2cffc4a1ce40f4dbf1bc0930
Author:Marco Elver 
AuthorDate:Thu, 05 Mar 2020 15:21:08 +01:00
Committer: Paul E. McKenney 
CommitterDate: Wed, 25 Mar 2020 09:56:00 -07:00

kcsan: Update Documentation/dev-tools/kcsan.rst

Extend and improve based on recent changes, and summarize important
bits that have been missing. Tested with "make htmldocs".

Signed-off-by: Marco Elver 
Cc: Qian Cai 
Signed-off-by: Paul E. McKenney 
---
 Documentation/dev-tools/kcsan.rst | 227 ++---
 1 file changed, 144 insertions(+), 83 deletions(-)

diff --git a/Documentation/dev-tools/kcsan.rst 
b/Documentation/dev-tools/kcsan.rst
index 65a0be5..52a5d6f 100644
--- a/Documentation/dev-tools/kcsan.rst
+++ b/Documentation/dev-tools/kcsan.rst
@@ -1,27 +1,22 @@
 The Kernel Concurrency Sanitizer (KCSAN)
 
 
-Overview
-
-
-*Kernel Concurrency Sanitizer (KCSAN)* is a dynamic data race detector for
-kernel space. KCSAN is a sampling watchpoint-based data race detector. Key
-priorities in KCSAN's design are lack of false positives, scalability, and
-simplicity. More details can be found in `Implementation Details`_.
-
-KCSAN uses compile-time instrumentation to instrument memory accesses. KCSAN is
-supported in both GCC and Clang. With GCC it requires version 7.3.0 or later.
-With Clang it requires version 7.0.0 or later.
+The Kernel Concurrency Sanitizer (KCSAN) is a dynamic race detector, which
+relies on compile-time instrumentation, and uses a watchpoint-based sampling
+approach to detect races. KCSAN's primary purpose is to detect `data races`_.
 
 Usage
 -
 
-To enable KCSAN configure kernel with::
+KCSAN is supported in both GCC and Clang. With GCC it requires version 7.3.0 or
+later. With Clang it requires version 7.0.0 or later.
+
+To enable KCSAN configure the kernel with::
 
 CONFIG_KCSAN = y
 
 KCSAN provides several other configuration options to customize behaviour (see
-their respective help text for more info).
+the respective help text in ``lib/Kconfig.kcsan`` for more info).
 
 Error reports
 ~
@@ -96,7 +91,8 @@ The other less common type of data race report looks like 
this::
 This report is generated where it was not possible to determine the other
 racing thread, but a race was inferred due to the data value of the watched
 memory location having changed. These can occur either due to missing
-instrumentation or e.g. DMA accesses.
+instrumentation or e.g. DMA accesses. These reports will only be generated if
+``CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=y`` (selected by default).
 
 Selective analysis
 ~~
@@ -110,9 +106,26 @@ the below options are available:
   behaviour when encountering a data race is deemed safe.
 
 * Disabling data race detection for entire functions can be accomplished by
-  using the function attribute ``__no_kcsan`` (or ``__no_kcsan_or_inline`` for
-  ``__always_inline`` functions). To dynamically control for which functions
-  data races are reported, see the `debugfs`_ blacklist/whitelist feature.
+  using the function attribute ``__no_kcsan``::
+
+__no_kcsan
+void foo(void) {
+...
+
+  To dynamically limit for which functions to generate reports, see the
+  `DebugFS interface`_ blacklist/whitelist feature.
+
+  For ``__always_inline`` functions, replace ``__always_inline`` with
+  ``__no_kcsan_or_inline`` (which implies ``__always_inline``)::
+
+static __no_kcsan_or_inline void foo(void) {
+...
+
+  Note: Older compiler versions (GCC < 9) also do not always honor the
+  ``__no_kcsan`` attribute on regular ``inline`` functions. If false positives
+  with these compilers cannot be tolerated, for small functions where
+  ``__always_inline`` would be appropriate, ``__no_kcsan_or_inline`` should be
+  preferred instead.
 
 * To disable data race detection for a particular compilation unit, add to the
   ``Makefile``::
@@ -124,13 +137,29 @@ the below options are available:
 
 KCSAN_SANITIZE := n
 
-debugfs
-~~~
+Furthermore, it is possible to tell KCSAN to show or hide entire classes of
+data races, depending on preferences. These can be changed via the following
+Kconfig options:
+
+* ``CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY``: If enabled and a conflicting write
+  is observed via a watchpoint, but the data value of the memory location was
+  observed to remain unchanged, do not report the data race.
+
+* ``CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC``: Assume that plain aligned writes
+  up to word size are atomic by default. Assumes that such writes are not
+  subject to unsafe compiler optimizations resulting in data races. The option
+  causes KCSAN to not report data races due to conflicts where the only plain
+  accesses are aligned writes up to word size.
+

[tip: locking/kcsan] kcsan: Add support for scoped accesses

2020-05-08 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: 757a4cefde76697af2b2c284c8a320912b77e7e6
Gitweb:
https://git.kernel.org/tip/757a4cefde76697af2b2c284c8a320912b77e7e6
Author:Marco Elver 
AuthorDate:Wed, 25 Mar 2020 17:41:56 +01:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 13 Apr 2020 17:18:11 -07:00

kcsan: Add support for scoped accesses

This adds support for scoped accesses, where the memory range is checked
for the duration of the scope. The feature is implemented by inserting
the relevant access information into a list of scoped accesses for
the current execution context, which are then checked (until removed)
on every call (through instrumentation) into the KCSAN runtime.

An alternative, more complex, implementation could set up a watchpoint for
the scoped access, and keep the watchpoint set up. This, however, would
require first exposing a handle to the watchpoint, as well as dealing
with cases such as accesses by the same thread while the watchpoint is
still set up (and several more cases). It is also doubtful if this would
provide any benefit, since the majority of delay where the watchpoint
is set up is likely due to the injected delays by KCSAN.  Therefore,
the implementation in this patch is simpler and avoids hurting KCSAN's
main use-case (normal data race detection); it also implicitly increases
scoped-access race-detection-ability due to increased probability of
setting up watchpoints by repeatedly calling __kcsan_check_access()
throughout the scope of the access.

The implementation required adding an additional conditional branch to
the fast-path. However, the microbenchmark showed a *speedup* of ~5%
on the fast-path. This appears to be due to subtly improved codegen by
GCC from moving get_ctx() and associated load of preempt_count earlier.

Suggested-by: Boqun Feng 
Suggested-by: Paul E. McKenney 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 include/linux/kcsan-checks.h | 57 -
 include/linux/kcsan.h|  3 +-
 init/init_task.c |  1 +-
 kernel/kcsan/core.c  | 83 +++
 kernel/kcsan/report.c| 33 +-
 5 files changed, 158 insertions(+), 19 deletions(-)

diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index 3cd8bb0..b24253d 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -3,6 +3,8 @@
 #ifndef _LINUX_KCSAN_CHECKS_H
 #define _LINUX_KCSAN_CHECKS_H
 
+/* Note: Only include what is already included by compiler.h. */
+#include 
 #include 
 
 /*
@@ -12,10 +14,12 @@
  *   WRITE : write access;
  *   ATOMIC: access is atomic;
  *   ASSERT: access is not a regular access, but an assertion;
+ *   SCOPED: access is a scoped access;
  */
 #define KCSAN_ACCESS_WRITE  0x1
 #define KCSAN_ACCESS_ATOMIC 0x2
 #define KCSAN_ACCESS_ASSERT 0x4
+#define KCSAN_ACCESS_SCOPED 0x8
 
 /*
  * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be 
used
@@ -78,6 +82,52 @@ void kcsan_atomic_next(int n);
  */
 void kcsan_set_access_mask(unsigned long mask);
 
+/* Scoped access information. */
+struct kcsan_scoped_access {
+   struct list_head list;
+   const volatile void *ptr;
+   size_t size;
+   int type;
+};
+/*
+ * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
+ * out of scope; relies on attribute "cleanup", which is supported by all
+ * compilers that support KCSAN.
+ */
+#define __kcsan_cleanup_scoped 
\
+   __maybe_unused __attribute__((__cleanup__(kcsan_end_scoped_access)))
+
+/**
+ * kcsan_begin_scoped_access - begin scoped access
+ *
+ * Begin scoped access and initialize @sa, which will cause KCSAN to
+ * continuously check the memory range in the current thread until
+ * kcsan_end_scoped_access() is called for @sa.
+ *
+ * Scoped accesses are implemented by appending @sa to an internal list for the
+ * current execution context, and then checked on every call into the KCSAN
+ * runtime.
+ *
+ * @ptr: address of access
+ * @size: size of access
+ * @type: access type modifier
+ * @sa: struct kcsan_scoped_access to use for the scope of the access
+ */
+struct kcsan_scoped_access *
+kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
+ struct kcsan_scoped_access *sa);
+
+/**
+ * kcsan_end_scoped_access - end scoped access
+ *
+ * End a scoped access, which will stop KCSAN checking the memory range.
+ * Requires that kcsan_begin_scoped_access() was previously called once for 
@sa.
+ *
+ * @sa: a previously initialized struct kcsan_scoped_access
+ */
+void kcsan_end_scoped_access(struct kcsan_scoped_access *sa);
+
+
 #else /* CONFIG_KCSAN */
 
 static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
@@ -90,6 +140,13 @@ static inline void kcsan_flat_atomic_end(void)  
{ }
 

[tip: locking/kcsan] kcsan: Add option to allow watcher interruptions

2020-05-08 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: 48b1fc190a180d971fb69217c88c7247f4f2ca19
Gitweb:
https://git.kernel.org/tip/48b1fc190a180d971fb69217c88c7247f4f2ca19
Author:Marco Elver 
AuthorDate:Fri, 21 Feb 2020 23:02:09 +01:00
Committer: Paul E. McKenney 
CommitterDate: Wed, 25 Mar 2020 09:55:59 -07:00

kcsan: Add option to allow watcher interruptions

Add option to allow interrupts while a watchpoint is set up. This can be
enabled either via CONFIG_KCSAN_INTERRUPT_WATCHER or via the boot
parameter 'kcsan.interrupt_watcher=1'.

Note that, currently not all safe per-CPU access primitives and patterns
are accounted for, which could result in false positives. For example,
asm-generic/percpu.h uses plain operations, which by default are
instrumented. On interrupts and subsequent accesses to the same
variable, KCSAN would currently report a data race with this option.

Therefore, this option should currently remain disabled by default, but
may be enabled for specific test scenarios.

To avoid new warnings, changes all uses of smp_processor_id() to use the
raw version (as already done in kcsan_found_watchpoint()). The exact SMP
processor id is for informational purposes in the report, and
correctness is not affected.

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/core.c | 34 ++
 lib/Kconfig.kcsan   | 11 +++
 2 files changed, 21 insertions(+), 24 deletions(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 589b1e7..e7387fe 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -21,6 +21,7 @@ static bool kcsan_early_enable = 
IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
 static unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
 static unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
 static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
+static bool kcsan_interrupt_watcher = 
IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
 
 #ifdef MODULE_PARAM_PREFIX
 #undef MODULE_PARAM_PREFIX
@@ -30,6 +31,7 @@ module_param_named(early_enable, kcsan_early_enable, bool, 0);
 module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
 module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
 module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
+module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);
 
 bool kcsan_enabled;
 
@@ -354,7 +356,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t 
size, int type)
unsigned long access_mask;
enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
unsigned long ua_flags = user_access_save();
-   unsigned long irq_flags;
+   unsigned long irq_flags = 0;
 
/*
 * Always reset kcsan_skip counter in slow-path to avoid underflow; see
@@ -370,26 +372,9 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t 
size, int type)
goto out;
}
 
-   /*
-* Disable interrupts & preemptions to avoid another thread on the same
-* CPU accessing memory locations for the set up watchpoint; this is to
-* avoid reporting races to e.g. CPU-local data.
-*
-* An alternative would be adding the source CPU to the watchpoint
-* encoding, and checking that watchpoint-CPU != this-CPU. There are
-* several problems with this:
-*   1. we should avoid stealing more bits from the watchpoint encoding
-*  as it would affect accuracy, as well as increase performance
-*  overhead in the fast-path;
-*   2. if we are preempted, but there *is* a genuine data race, we
-*  would *not* report it -- since this is the common case (vs.
-*  CPU-local data accesses), it makes more sense (from a data race
-*  detection point of view) to simply disable preemptions to ensure
-*  as many tasks as possible run on other CPUs.
-*
-* Use raw versions, to avoid lockdep recursion via IRQ flags tracing.
-*/
-   raw_local_irq_save(irq_flags);
+   if (!kcsan_interrupt_watcher)
+   /* Use raw to avoid lockdep recursion via IRQ flags tracing. */
+   raw_local_irq_save(irq_flags);
 
watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
if (watchpoint == NULL) {
@@ -507,7 +492,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t 
size, int type)
if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
 
-   kcsan_report(ptr, size, type, value_change, smp_processor_id(),
+   kcsan_report(ptr, size, type, value_change, 
raw_smp_processor_id(),
 KCSAN_REPORT_RACE_SIGNAL);
} else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
/* 

[tip: locking/kcsan] kcsan: Add current->state to implicitly atomic accesses

2020-05-08 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: 44656d3dc4f0dc20010d054f27397a4a1469fabf
Gitweb:
https://git.kernel.org/tip/44656d3dc4f0dc20010d054f27397a4a1469fabf
Author:Marco Elver 
AuthorDate:Tue, 25 Feb 2020 15:32:58 +01:00
Committer: Paul E. McKenney 
CommitterDate: Wed, 25 Mar 2020 09:56:00 -07:00

kcsan: Add current->state to implicitly atomic accesses

Add volatile current->state to list of implicitly atomic accesses. This
is in preparation to eventually enable KCSAN on kernel/sched (which
currently still has KCSAN_SANITIZE := n).

Since accesses that match the special check in atomic.h are rare, it
makes more sense to move this check to the slow-path, avoiding the
additional compare in the fast-path. With the microbenchmark, a speedup
of ~6% is measured.

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/atomic.h  | 21 +++--
 kernel/kcsan/core.c| 22 +++---
 kernel/kcsan/debugfs.c | 27 ++-
 3 files changed, 40 insertions(+), 30 deletions(-)

diff --git a/kernel/kcsan/atomic.h b/kernel/kcsan/atomic.h
index a9c1930..be9e625 100644
--- a/kernel/kcsan/atomic.h
+++ b/kernel/kcsan/atomic.h
@@ -4,24 +4,17 @@
 #define _KERNEL_KCSAN_ATOMIC_H
 
 #include 
+#include 
 
 /*
- * Helper that returns true if access to @ptr should be considered an atomic
- * access, even though it is not explicitly atomic.
- *
- * List all volatile globals that have been observed in races, to suppress
- * data race reports between accesses to these variables.
- *
- * For now, we assume that volatile accesses of globals are as strong as atomic
- * accesses (READ_ONCE, WRITE_ONCE cast to volatile). The situation is still 
not
- * entirely clear, as on some architectures (Alpha) READ_ONCE/WRITE_ONCE do 
more
- * than cast to volatile. Eventually, we hope to be able to remove this
- * function.
+ * Special rules for certain memory where concurrent conflicting accesses are
+ * common, however, the current convention is to not mark them; returns true if
+ * access to @ptr should be considered atomic. Called from slow-path.
  */
-static __always_inline bool kcsan_is_atomic(const volatile void *ptr)
+static bool kcsan_is_atomic_special(const volatile void *ptr)
 {
-   /* only jiffies for now */
-   return ptr == 
+   /* volatile globals that have been observed in data races. */
+   return ptr ==  || ptr == >state;
 }
 
 #endif /* _KERNEL_KCSAN_ATOMIC_H */
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 065615d..eb30ecd 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -188,12 +188,13 @@ static __always_inline struct kcsan_ctx *get_ctx(void)
return in_task() ? >kcsan_ctx : raw_cpu_ptr(_cpu_ctx);
 }
 
+/* Rules for generic atomic accesses. Called from fast-path. */
 static __always_inline bool
 is_atomic(const volatile void *ptr, size_t size, int type)
 {
struct kcsan_ctx *ctx;
 
-   if ((type & KCSAN_ACCESS_ATOMIC) != 0)
+   if (type & KCSAN_ACCESS_ATOMIC)
return true;
 
/*
@@ -201,16 +202,16 @@ is_atomic(const volatile void *ptr, size_t size, int type)
 * as atomic. This allows using them also in atomic regions, such as
 * seqlocks, without implicitly changing their semantics.
 */
-   if ((type & KCSAN_ACCESS_ASSERT) != 0)
+   if (type & KCSAN_ACCESS_ASSERT)
return false;
 
if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
-   (type & KCSAN_ACCESS_WRITE) != 0 && size <= sizeof(long) &&
+   (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
IS_ALIGNED((unsigned long)ptr, size))
return true; /* Assume aligned writes up to word size are 
atomic. */
 
ctx = get_ctx();
-   if (unlikely(ctx->atomic_next > 0)) {
+   if (ctx->atomic_next > 0) {
/*
 * Because we do not have separate contexts for nested
 * interrupts, in case atomic_next is set, we simply assume that
@@ -224,10 +225,8 @@ is_atomic(const volatile void *ptr, size_t size, int type)
--ctx->atomic_next; /* in task, or outer interrupt */
return true;
}
-   if (unlikely(ctx->atomic_nest_count > 0 || ctx->in_flat_atomic))
-   return true;
 
-   return kcsan_is_atomic(ptr);
+   return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic;
 }
 
 static __always_inline bool
@@ -367,6 +366,15 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t 
size, int type)
if (!kcsan_is_enabled())
goto out;
 
+   /*
+* Special atomic rules: unlikely to be true, so we check them here in
+* the slow-path, and not in the fast-path in is_atomic(). Call after
+* kcsan_is_enabled(), as we may access memory that is not yet
+* initialized during early boot.
+

[tip: locking/kcsan] kcsan: Update API documentation in kcsan-checks.h

2020-05-08 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: 1443b8c9e712ef8914a2cab9ae7ce133229ed96c
Gitweb:
https://git.kernel.org/tip/1443b8c9e712ef8914a2cab9ae7ce133229ed96c
Author:Marco Elver 
AuthorDate:Thu, 05 Mar 2020 15:21:09 +01:00
Committer: Paul E. McKenney 
CommitterDate: Wed, 25 Mar 2020 09:56:00 -07:00

kcsan: Update API documentation in kcsan-checks.h

Update the API documentation for ASSERT_EXCLUSIVE_* macros and make them
generate readable documentation for the code examples.

All @variable short summaries were missing ':', which was updated for
the whole file.

Tested with "make htmldocs".

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 include/linux/kcsan-checks.h | 98 +--
 1 file changed, 61 insertions(+), 37 deletions(-)

diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index 8f9f6e2..3cd8bb0 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -26,9 +26,9 @@
 /**
  * __kcsan_check_access - check generic access for races
  *
- * @ptr address of access
- * @size size of access
- * @type access type modifier
+ * @ptr: address of access
+ * @size: size of access
+ * @type: access type modifier
  */
 void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
 
@@ -64,7 +64,7 @@ void kcsan_flat_atomic_end(void);
  * Force treating the next n memory accesses for the current context as atomic
  * operations.
  *
- * @n number of following memory accesses to treat as atomic.
+ * @n: number of following memory accesses to treat as atomic.
  */
 void kcsan_atomic_next(int n);
 
@@ -74,7 +74,7 @@ void kcsan_atomic_next(int n);
  * Set the access mask for all accesses for the current context if non-zero.
  * Only value changes to bits set in the mask will be reported.
  *
- * @mask bitmask
+ * @mask: bitmask
  */
 void kcsan_set_access_mask(unsigned long mask);
 
@@ -106,16 +106,16 @@ static inline void kcsan_check_access(const volatile void 
*ptr, size_t size,
 /**
  * __kcsan_check_read - check regular read access for races
  *
- * @ptr address of access
- * @size size of access
+ * @ptr: address of access
+ * @size: size of access
  */
 #define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0)
 
 /**
  * __kcsan_check_write - check regular write access for races
  *
- * @ptr address of access
- * @size size of access
+ * @ptr: address of access
+ * @size: size of access
  */
 #define __kcsan_check_write(ptr, size) 
\
__kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
@@ -123,16 +123,16 @@ static inline void kcsan_check_access(const volatile void 
*ptr, size_t size,
 /**
  * kcsan_check_read - check regular read access for races
  *
- * @ptr address of access
- * @size size of access
+ * @ptr: address of access
+ * @size: size of access
  */
 #define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0)
 
 /**
  * kcsan_check_write - check regular write access for races
  *
- * @ptr address of access
- * @size size of access
+ * @ptr: address of access
+ * @size: size of access
  */
 #define kcsan_check_write(ptr, size)   
\
kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
@@ -158,14 +158,26 @@ static inline void kcsan_check_access(const volatile void 
*ptr, size_t size,
  * allowed. This assertion can be used to specify properties of concurrent 
code,
  * where violation cannot be detected as a normal data race.
  *
- * For example, if a per-CPU variable is only meant to be written by a single
- * CPU, but may be read from other CPUs; in this case, reads and writes must be
- * marked properly, however, if an off-CPU WRITE_ONCE() races with the owning
- * CPU's WRITE_ONCE(), would not constitute a data race but could be a harmful
- * race condition. Using this macro allows specifying this property in the code
- * and catch such bugs.
+ * For example, if we only have a single writer, but multiple concurrent
+ * readers, to avoid data races, all these accesses must be marked; even
+ * concurrent marked writes racing with the single writer are bugs.
+ * Unfortunately, due to being marked, they are no longer data races. For cases
+ * like these, we can use the macro as follows:
  *
- * @var variable to assert on
+ * .. code-block:: c
+ *
+ * void writer(void) {
+ * spin_lock(_foo_lock);
+ * ASSERT_EXCLUSIVE_WRITER(shared_foo);
+ * WRITE_ONCE(shared_foo, ...);
+ * spin_unlock(_foo_lock);
+ * }
+ * void reader(void) {
+ * // update_foo_lock does not need to be held!
+ * ... = READ_ONCE(shared_foo);
+ * }
+ *
+ * @var: variable to assert on
  */
 #define ASSERT_EXCLUSIVE_WRITER(var)   
\
__kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
@@ -177,16 +189,22 @@ static 

[tip: locking/kcsan] kcsan: Introduce report access_info and other_info

2020-05-08 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: 135c0872d86948046d11d7083e36c930cc43ac93
Gitweb:
https://git.kernel.org/tip/135c0872d86948046d11d7083e36c930cc43ac93
Author:Marco Elver 
AuthorDate:Wed, 18 Mar 2020 18:38:44 +01:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 13 Apr 2020 17:18:10 -07:00

kcsan: Introduce report access_info and other_info

Improve readability by introducing access_info and other_info structs,
and in preparation of the following commit in this series replaces the
single instance of other_info with an array of size 1.

No functional change intended.

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/core.c   |   6 +--
 kernel/kcsan/kcsan.h  |   2 +-
 kernel/kcsan/report.c | 147 -
 3 files changed, 77 insertions(+), 78 deletions(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index ee82008..f1c3862 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -321,7 +321,7 @@ static noinline void kcsan_found_watchpoint(const volatile 
void *ptr,
flags = user_access_save();
 
if (consumed) {
-   kcsan_report(ptr, size, type, true, raw_smp_processor_id(),
+   kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_MAYBE,
 KCSAN_REPORT_CONSUMED_WATCHPOINT);
} else {
/*
@@ -500,8 +500,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t 
size, int type)
if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
 
-   kcsan_report(ptr, size, type, value_change, 
raw_smp_processor_id(),
-KCSAN_REPORT_RACE_SIGNAL);
+   kcsan_report(ptr, size, type, value_change, 
KCSAN_REPORT_RACE_SIGNAL);
} else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
/* Inferring a race, since the value should not have changed. */
 
@@ -511,7 +510,6 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t 
size, int type)
 
if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || 
is_assert)
kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE,
-raw_smp_processor_id(),
 KCSAN_REPORT_RACE_UNKNOWN_ORIGIN);
}
 
diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
index e282f8b..6630dfe 100644
--- a/kernel/kcsan/kcsan.h
+++ b/kernel/kcsan/kcsan.h
@@ -135,7 +135,7 @@ enum kcsan_report_type {
  * Print a race report from thread that encountered the race.
  */
 extern void kcsan_report(const volatile void *ptr, size_t size, int 
access_type,
-enum kcsan_value_change value_change, int cpu_id,
+enum kcsan_value_change value_change,
 enum kcsan_report_type type);
 
 #endif /* _KERNEL_KCSAN_KCSAN_H */
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index 18f9d3b..de234d1 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -19,18 +19,23 @@
  */
 #define NUM_STACK_ENTRIES 64
 
+/* Common access info. */
+struct access_info {
+   const volatile void *ptr;
+   size_t  size;
+   int access_type;
+   int task_pid;
+   int cpu_id;
+};
+
 /*
  * Other thread info: communicated from other racing thread to thread that set
  * up the watchpoint, which then prints the complete report atomically. Only
  * need one struct, as all threads should to be serialized regardless to print
  * the reports, with reporting being in the slow-path.
  */
-static struct {
-   const volatile void *ptr;
-   size_t  size;
-   int access_type;
-   int task_pid;
-   int cpu_id;
+struct other_info {
+   struct access_info  ai;
unsigned long   stack_entries[NUM_STACK_ENTRIES];
int num_stack_entries;
 
@@ -52,7 +57,9 @@ static struct {
 * that populated @other_info until it has been consumed.
 */
struct task_struct  *task;
-} other_info;
+};
+
+static struct other_info other_infos[1];
 
 /*
  * Information about reported races; used to rate limit reporting.
@@ -238,7 +245,7 @@ static const char *get_thread_desc(int task_id)
 }
 
 /* Helper to skip KCSAN-related functions in stack-trace. */
-static int get_stack_skipnr(unsigned long stack_entries[], int num_entries)
+static int get_stack_skipnr(const unsigned long stack_entries[], int 
num_entries)
 {
char buf[64];
int skip = 0;
@@ -279,9 +286,10 @@ static void print_verbose_info(struct task_struct *task)
 /*
  * Returns true if a report was generated, false otherwise.
  */
-static bool 

[tip: locking/kcsan] kcsan: Make reporting aware of KCSAN tests

2020-05-08 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: cdb9b07d8c78be63d72aba9a2686ff161ddd2099
Gitweb:
https://git.kernel.org/tip/cdb9b07d8c78be63d72aba9a2686ff161ddd2099
Author:Marco Elver 
AuthorDate:Fri, 10 Apr 2020 18:44:18 +02:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 13 Apr 2020 17:18:16 -07:00

kcsan: Make reporting aware of KCSAN tests

Reporting hides KCSAN runtime functions in the stack trace, with
filtering done based on function names. Currently this included all
functions (or modules) that would match "kcsan_". Make the filter aware
of KCSAN tests, which contain "kcsan_test", and are no longer skipped in
the report.

This is in preparation for adding a KCSAN test module.

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/report.c | 30 +++---
 1 file changed, 23 insertions(+), 7 deletions(-)

diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index cf41d63..ac5f834 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -262,16 +262,32 @@ static const char *get_thread_desc(int task_id)
 static int get_stack_skipnr(const unsigned long stack_entries[], int 
num_entries)
 {
char buf[64];
-   int len;
-   int skip = 0;
+   char *cur;
+   int len, skip;
 
-   for (; skip < num_entries; ++skip) {
+   for (skip = 0; skip < num_entries; ++skip) {
len = scnprintf(buf, sizeof(buf), "%ps", (void 
*)stack_entries[skip]);
-   if (!strnstr(buf, "csan_", len) &&
-   !strnstr(buf, "tsan_", len) &&
-   !strnstr(buf, "_once_size", len))
-   break;
+
+   /* Never show tsan_* or {read,write}_once_size. */
+   if (strnstr(buf, "tsan_", len) ||
+   strnstr(buf, "_once_size", len))
+   continue;
+
+   cur = strnstr(buf, "kcsan_", len);
+   if (cur) {
+   cur += sizeof("kcsan_") - 1;
+   if (strncmp(cur, "test", sizeof("test") - 1))
+   continue; /* KCSAN runtime function. */
+   /* KCSAN related test. */
+   }
+
+   /*
+* No match for runtime functions -- @skip entries to skip to
+* get to first frame of interest.
+*/
+   break;
}
+
return skip;
 }
 


[tip: locking/kcsan] kcsan: Add option for verbose reporting

2020-05-08 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: 2402d0eae589a31ee7b1774cb220d84d0f5605b4
Gitweb:
https://git.kernel.org/tip/2402d0eae589a31ee7b1774cb220d84d0f5605b4
Author:Marco Elver 
AuthorDate:Sat, 22 Feb 2020 00:10:27 +01:00
Committer: Paul E. McKenney 
CommitterDate: Wed, 25 Mar 2020 09:56:00 -07:00

kcsan: Add option for verbose reporting

Adds CONFIG_KCSAN_VERBOSE to optionally enable more verbose reports.
Currently information about the reporting task's held locks and IRQ
trace events are shown, if they are enabled.

Signed-off-by: Marco Elver 
Suggested-by: Qian Cai 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/core.c   |   4 +-
 kernel/kcsan/kcsan.h  |   3 +-
 kernel/kcsan/report.c | 103 -
 lib/Kconfig.kcsan |  13 +-
 4 files changed, 120 insertions(+), 3 deletions(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index e7387fe..065615d 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -18,8 +18,8 @@
 #include "kcsan.h"
 
 static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
-static unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
-static unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
+unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
+unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
 static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
 static bool kcsan_interrupt_watcher = 
IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
 
diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
index 892de51..e282f8b 100644
--- a/kernel/kcsan/kcsan.h
+++ b/kernel/kcsan/kcsan.h
@@ -13,6 +13,9 @@
 /* The number of adjacent watchpoints to check. */
 #define KCSAN_CHECK_ADJACENT 1
 
+extern unsigned int kcsan_udelay_task;
+extern unsigned int kcsan_udelay_interrupt;
+
 /*
  * Globally enable and disable KCSAN.
  */
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index 11c791b..18f9d3b 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -1,5 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 
+#include 
+#include 
 #include 
 #include 
 #include 
@@ -31,7 +33,26 @@ static struct {
int cpu_id;
unsigned long   stack_entries[NUM_STACK_ENTRIES];
int num_stack_entries;
-} other_info = { .ptr = NULL };
+
+   /*
+* Optionally pass @current. Typically we do not need to pass @current
+* via @other_info since just @task_pid is sufficient. Passing @current
+* has additional overhead.
+*
+* To safely pass @current, we must either use get_task_struct/
+* put_task_struct, or stall the thread that populated @other_info.
+*
+* We cannot rely on get_task_struct/put_task_struct in case
+* release_report() races with a task being released, and would have to
+* free it in release_report(). This may result in deadlock if we want
+* to use KCSAN on the allocators.
+*
+* Since we also want to reliably print held locks for
+* CONFIG_KCSAN_VERBOSE, the current implementation stalls the thread
+* that populated @other_info until it has been consumed.
+*/
+   struct task_struct  *task;
+} other_info;
 
 /*
  * Information about reported races; used to rate limit reporting.
@@ -245,6 +266,16 @@ static int sym_strcmp(void *addr1, void *addr2)
return strncmp(buf1, buf2, sizeof(buf1));
 }
 
+static void print_verbose_info(struct task_struct *task)
+{
+   if (!task)
+   return;
+
+   pr_err("\n");
+   debug_show_held_locks(task);
+   print_irqtrace_events(task);
+}
+
 /*
  * Returns true if a report was generated, false otherwise.
  */
@@ -319,6 +350,9 @@ static bool print_report(const volatile void *ptr, size_t 
size, int access_type,
  other_info.num_stack_entries - other_skipnr,
  0);
 
+   if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
+   print_verbose_info(other_info.task);
+
pr_err("\n");
pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
   get_access_type(access_type), ptr, size,
@@ -340,6 +374,9 @@ static bool print_report(const volatile void *ptr, size_t 
size, int access_type,
stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr,
  0);
 
+   if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
+   print_verbose_info(current);
+
/* Print report footer. */
pr_err("\n");
pr_err("Reported by Kernel Concurrency Sanitizer on:\n");
@@ -358,6 +395,67 @@ static void release_report(unsigned long *flags, enum 
kcsan_report_type type)
 }
 
 /*
+ * Sets @other_info.task and awaits consumption of @other_info.
+ *
+ * Precondition: report_lock is held.
+ * 

[tip: locking/kcsan] kcsan: Avoid blocking producers in prepare_report()

2020-05-08 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: 6119418f94ca5314392d258d27eb0cb58bb1774e
Gitweb:
https://git.kernel.org/tip/6119418f94ca5314392d258d27eb0cb58bb1774e
Author:Marco Elver 
AuthorDate:Wed, 18 Mar 2020 18:38:45 +01:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 13 Apr 2020 17:18:11 -07:00

kcsan: Avoid blocking producers in prepare_report()

To avoid deadlock in case watchers can be interrupted, we need to ensure
that producers of the struct other_info can never be blocked by an
unrelated consumer. (Likely to occur with KCSAN_INTERRUPT_WATCHER.)

There are several cases that can lead to this scenario, for example:

1. A watchpoint A was set up by task T1, but interrupted by
   interrupt I1. Some other thread (task or interrupt) finds
   watchpoint A consumes it, and sets other_info. Then I1 also
   finds some unrelated watchpoint B, consumes it, but is blocked
   because other_info is in use. T1 cannot consume other_info
   because I1 never returns -> deadlock.

2. A watchpoint A was set up by task T1, but interrupted by
   interrupt I1, which also sets up a watchpoint B. Some other
   thread finds watchpoint A, and consumes it and sets up
   other_info with its information. Similarly some other thread
   finds watchpoint B and consumes it, but is then blocked because
   other_info is in use. When I1 continues it sees its watchpoint
   was consumed, and that it must wait for other_info, which
   currently contains information to be consumed by T1. However, T1
   cannot unblock other_info because I1 never returns -> deadlock.

To avoid this, we need to ensure that producers of struct other_info
always have a usable other_info entry. This is obviously not the case
with only a single instance of struct other_info, as concurrent
producers must wait for the entry to be released by some consumer (which
may be locked up as illustrated above).

While it would be nice if producers could simply call kmalloc() and
append their instance of struct other_info to a list, we are very
limited in this code path: since KCSAN can instrument the allocators
themselves, calling kmalloc() could lead to deadlock or corrupted
allocator state.

Since producers of the struct other_info will always succeed at
try_consume_watchpoint(), preceding the call into kcsan_report(), we
know that the particular watchpoint slot cannot simply be reused or
consumed by another potential other_info producer. If we move removal of
a watchpoint after reporting (by the consumer of struct other_info), we
can see a consumed watchpoint as a held lock on elements of other_info,
if we create a one-to-one mapping of a watchpoint to an other_info
element.

Therefore, the simplest solution is to create an array of struct
other_info that is as large as the watchpoints array in core.c, and pass
the watchpoint index to kcsan_report() for producers and consumers, and
change watchpoints to be removed after reporting is done.

With a default config on a 64-bit system, the array other_infos consumes
~37KiB. For most systems today this is not a problem. On smaller memory
constrained systems, the config value CONFIG_KCSAN_NUM_WATCHPOINTS can
be reduced appropriately.

Overall, this change is a simplification of the prepare_report() code,
and makes some of the checks (such as checking if at least one access is
a write) redundant.

Tested:
$ tools/testing/selftests/rcutorture/bin/kvm.sh \
--cpus 12 --duration 10 --kconfig "CONFIG_DEBUG_INFO=y \
CONFIG_KCSAN=y CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n \
CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=n \
CONFIG_KCSAN_REPORT_ONCE_IN_MS=10 CONFIG_KCSAN_VERBOSE=y \
CONFIG_KCSAN_INTERRUPT_WATCHER=y CONFIG_PROVE_LOCKING=y" \
--configs TREE03
=> No longer hangs and runs to completion as expected.

Reported-by: Paul E. McKenney 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 kernel/kcsan/core.c   |  31 --
 kernel/kcsan/kcsan.h  |   3 +-
 kernel/kcsan/report.c | 212 +++--
 3 files changed, 124 insertions(+), 122 deletions(-)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index f1c3862..4d8ea0f 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -69,7 +69,6 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
  *   slot=9:  [10, 11,  9]
  *   slot=63: [64, 65, 63]
  */
-#define NUM_SLOTS (1 + 2*KCSAN_CHECK_ADJACENT)
 #define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
 
 /*
@@ -171,12 +170,16 @@ try_consume_watchpoint(atomic_long_t *watchpoint, long 
encoded_watchpoint)
return atomic_long_try_cmpxchg_relaxed(watchpoint, _watchpoint, 
CONSUMED_WATCHPOINT);
 }
 
-/*
- * Return true if watchpoint was not touched, false if consumed.
- */
-static inline bool 

[tip: locking/kcsan] objtool, kcsan: Add kcsan_disable_current() and kcsan_enable_current_nowarn()

2020-05-08 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: 50a19ad4b1ec531eb550183cb5d4ab9f25a56bf8
Gitweb:
https://git.kernel.org/tip/50a19ad4b1ec531eb550183cb5d4ab9f25a56bf8
Author:Marco Elver 
AuthorDate:Fri, 24 Apr 2020 17:47:30 +02:00
Committer: Paul E. McKenney 
CommitterDate: Wed, 06 May 2020 13:47:06 -07:00

objtool, kcsan: Add kcsan_disable_current() and kcsan_enable_current_nowarn()

Both are safe to be called from uaccess contexts.

Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 tools/objtool/check.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index b6a573d..9122c20 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -472,6 +472,8 @@ static const char *uaccess_safe_builtin[] = {
"kcsan_found_watchpoint",
"kcsan_setup_watchpoint",
"kcsan_check_scoped_accesses",
+   "kcsan_disable_current",
+   "kcsan_enable_current_nowarn",
/* KCSAN/TSAN */
"__tsan_func_entry",
"__tsan_func_exit",


[tip: locking/kcsan] kcsan: Introduce scoped ASSERT_EXCLUSIVE macros

2020-05-08 Thread tip-bot2 for Marco Elver
The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: d8949ef1d9f1062848cd068cf369a57ce33dae6f
Gitweb:
https://git.kernel.org/tip/d8949ef1d9f1062848cd068cf369a57ce33dae6f
Author:Marco Elver 
AuthorDate:Wed, 25 Mar 2020 17:41:58 +01:00
Committer: Paul E. McKenney 
CommitterDate: Mon, 13 Apr 2020 17:18:13 -07:00

kcsan: Introduce scoped ASSERT_EXCLUSIVE macros

Introduce ASSERT_EXCLUSIVE_*_SCOPED(), which provide an intuitive
interface to use the scoped-access feature, without having to explicitly
mark the start and end of the desired scope. Basing duration of the
checks on scope avoids accidental misuse and resulting false positives,
which may be hard to debug. See added comments for usage.

The macros are implemented using __attribute__((__cleanup__(func))),
which is supported by all compilers that currently support KCSAN.

Suggested-by: Boqun Feng 
Suggested-by: Paul E. McKenney 
Signed-off-by: Marco Elver 
Signed-off-by: Paul E. McKenney 
---
 Documentation/dev-tools/kcsan.rst |  3 +-
 include/linux/kcsan-checks.h  | 73 +-
 kernel/kcsan/debugfs.c| 16 ++-
 3 files changed, 89 insertions(+), 3 deletions(-)

diff --git a/Documentation/dev-tools/kcsan.rst 
b/Documentation/dev-tools/kcsan.rst
index 52a5d6f..f4b5766 100644
--- a/Documentation/dev-tools/kcsan.rst
+++ b/Documentation/dev-tools/kcsan.rst
@@ -238,7 +238,8 @@ are defined at the C-language level. The following macros 
can be used to check
 properties of concurrent code where bugs would not manifest as data races.
 
 .. kernel-doc:: include/linux/kcsan-checks.h
-:functions: ASSERT_EXCLUSIVE_WRITER ASSERT_EXCLUSIVE_ACCESS
+:functions: ASSERT_EXCLUSIVE_WRITER ASSERT_EXCLUSIVE_WRITER_SCOPED
+ASSERT_EXCLUSIVE_ACCESS ASSERT_EXCLUSIVE_ACCESS_SCOPED
 ASSERT_EXCLUSIVE_BITS
 
 Implementation Details
diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index b24253d..101df7f 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -234,11 +234,63 @@ static inline void kcsan_check_access(const volatile void 
*ptr, size_t size,
  * ... = READ_ONCE(shared_foo);
  * }
  *
+ * Note: ASSERT_EXCLUSIVE_WRITER_SCOPED(), if applicable, performs more 
thorough
+ * checking if a clear scope where no concurrent writes are expected exists.
+ *
  * @var: variable to assert on
  */
 #define ASSERT_EXCLUSIVE_WRITER(var)   
\
__kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
 
+/*
+ * Helper macros for implementation of for ASSERT_EXCLUSIVE_*_SCOPED(). @id is
+ * expected to be unique for the scope in which instances of 
kcsan_scoped_access
+ * are declared.
+ */
+#define __kcsan_scoped_name(c, suffix) __kcsan_scoped_##c##suffix
+#define __ASSERT_EXCLUSIVE_SCOPED(var, type, id)   
\
+   struct kcsan_scoped_access __kcsan_scoped_name(id, _)  \
+   __kcsan_cleanup_scoped;\
+   struct kcsan_scoped_access *__kcsan_scoped_name(id, _dummy_p)  \
+   __maybe_unused = kcsan_begin_scoped_access(\
+   &(var), sizeof(var), KCSAN_ACCESS_SCOPED | (type), \
+   &__kcsan_scoped_name(id, _))
+
+/**
+ * ASSERT_EXCLUSIVE_WRITER_SCOPED - assert no concurrent writes to @var in 
scope
+ *
+ * Scoped variant of ASSERT_EXCLUSIVE_WRITER().
+ *
+ * Assert that there are no concurrent writes to @var for the duration of the
+ * scope in which it is introduced. This provides a better way to fully cover
+ * the enclosing scope, compared to multiple ASSERT_EXCLUSIVE_WRITER(), and
+ * increases the likelihood for KCSAN to detect racing accesses.
+ *
+ * For example, it allows finding race-condition bugs that only occur due to
+ * state changes within the scope itself:
+ *
+ * .. code-block:: c
+ *
+ * void writer(void) {
+ * spin_lock(_foo_lock);
+ * {
+ * ASSERT_EXCLUSIVE_WRITER_SCOPED(shared_foo);
+ * WRITE_ONCE(shared_foo, 42);
+ * ...
+ * // shared_foo should still be 42 here!
+ * }
+ * spin_unlock(_foo_lock);
+ * }
+ * void buggy(void) {
+ * if (READ_ONCE(shared_foo) == 42)
+ * WRITE_ONCE(shared_foo, 1); // bug!
+ * }
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_WRITER_SCOPED(var)
\
+   __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_ASSERT, __COUNTER__)
+
 /**
  * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var
  *
@@ -258,6 +310,9 @@ static inline void kcsan_check_access(const volatile void 
*ptr, size_t size,
  * release_for_reuse(obj);
  * }
  *
+ * Note: ASSERT_EXCLUSIVE_ACCESS_SCOPED(),