Add an RV monitor to detect realtime tasks getting blocked. For the full
description, see Documentation/trace/rv/monitor_rtapp_block.rst.

Signed-off-by: Nam Cao <[email protected]>
---
Cc: Peter Zijlstra <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Boqun Feng <[email protected]>
Cc: Waiman Long <[email protected]>
---
 .../trace/rv/monitor_rtapp_block.rst          |  34 +++
 include/trace/events/lock.h                   |  12 +
 kernel/locking/rtmutex.c                      |   4 +
 kernel/trace/rv/Kconfig                       |  12 +-
 kernel/trace/rv/Makefile                      |   2 +
 kernel/trace/rv/monitors/rtapp_block/ba.c     | 146 +++++++++++
 kernel/trace/rv/monitors/rtapp_block/ba.h     | 166 +++++++++++++
 kernel/trace/rv/monitors/rtapp_block/ltl      |   9 +
 .../rv/monitors/rtapp_block/rtapp_block.c     | 232 ++++++++++++++++++
 kernel/trace/rv/rv_trace.h                    |  44 ++++
 lib/Kconfig.debug                             |   3 +
 11 files changed, 663 insertions(+), 1 deletion(-)
 create mode 100644 Documentation/trace/rv/monitor_rtapp_block.rst
 create mode 100644 kernel/trace/rv/monitors/rtapp_block/ba.c
 create mode 100644 kernel/trace/rv/monitors/rtapp_block/ba.h
 create mode 100644 kernel/trace/rv/monitors/rtapp_block/ltl
 create mode 100644 kernel/trace/rv/monitors/rtapp_block/rtapp_block.c

diff --git a/Documentation/trace/rv/monitor_rtapp_block.rst 
b/Documentation/trace/rv/monitor_rtapp_block.rst
new file mode 100644
index 000000000000..9cabbe66fa4a
--- /dev/null
+++ b/Documentation/trace/rv/monitor_rtapp_block.rst
@@ -0,0 +1,34 @@
+Monitor rtapp_block
+=======================
+
+- Name: rtapp_block - real time applications are undesirably blocked
+- Type: per-task linear temporal logic monitor
+- Author: Nam Cao <[email protected]>
+
+Introduction
+------------
+
+Real time threads could be blocked and fail to finish their execution timely. 
For instance, they
+need to access shared resources which are already acquired by other threads. 
Or they could be
+waiting for non-realtime threads to signal them to proceed: as the 
non-realtime threads are not
+prioritized by the scheduler, the execution of realtime threads could be 
delayed indefinitely.
+These scenarios are often unintentional, and cause unexpected latency to the 
realtime application.
+
+The rtapp_block monitor reports this type of scenario, by monitoring for:
+
+  * Realtime threads going to sleep without explicitly asking for it (namely, 
with nanosleep
+    syscall).
+  * Realtime threads are woken up by non-realtime threads.
+
+How to fix the monitor's warnings?
+----------------------------------
+
+There is no single answer, the solution needs to be evaluated depending on the 
specific cases.
+
+If the realtime thread is blocked trying to take a `pthread_mutex_t` which is 
already taken by a
+non-realtime thread, the solution could be enabling priority inheritance for 
the mutex, so that the
+blocking non-realtime thread would be priority-boosted to run at realtime 
priority.
+
+If realtime thread needs to wait for non-realtime thread to signal it to 
proceed, perhaps the design
+needs to be reconsidered to remove this dependency. Often, the work executed 
by the realtime thread
+needs not to be realtime at all.
diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h
index 8e89baa3775f..d4b32194d47f 100644
--- a/include/trace/events/lock.h
+++ b/include/trace/events/lock.h
@@ -138,6 +138,18 @@ TRACE_EVENT(contention_end,
        TP_printk("%p (ret=%d)", __entry->lock_addr, __entry->ret)
 );
 
+#ifdef CONFIG_TRACE_RT_MUTEX_WAKE_WAITER
+DECLARE_TRACE(rt_mutex_wake_waiter_begin,
+               TP_PROTO(struct task_struct *task),
+               TP_ARGS(task))
+DECLARE_TRACE(rt_mutex_wake_waiter_end,
+               TP_PROTO(struct task_struct *task),
+               TP_ARGS(task))
+#else
+#define trace_rt_mutex_wake_waiter_begin(...)
+#define trace_rt_mutex_wake_waiter_end(...)
+#endif /* CONFIG_TRACE_RT_MUTEX */
+
 #endif /* _TRACE_LOCK_H */
 
 /* This part must be outside protection */
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 4a8df1800cbb..fc9cf4a2cf75 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -561,6 +561,8 @@ static __always_inline void rt_mutex_wake_q_add(struct 
rt_wake_q_head *wqh,
 
 static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh)
 {
+       trace_rt_mutex_wake_waiter_begin(current);
+
        if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) {
                wake_up_state(wqh->rtlock_task, TASK_RTLOCK_WAIT);
                put_task_struct(wqh->rtlock_task);
@@ -572,6 +574,8 @@ static __always_inline void rt_mutex_wake_up_q(struct 
rt_wake_q_head *wqh)
 
        /* Pairs with preempt_disable() in mark_wakeup_next_waiter() */
        preempt_enable();
+
+       trace_rt_mutex_wake_waiter_end(current);
 }
 
 /*
diff --git a/kernel/trace/rv/Kconfig b/kernel/trace/rv/Kconfig
index 8226352a0062..d65bf9bda2f2 100644
--- a/kernel/trace/rv/Kconfig
+++ b/kernel/trace/rv/Kconfig
@@ -13,7 +13,7 @@ config DA_MON_EVENTS_ID
 
 menuconfig RV
        bool "Runtime Verification"
-       depends on TRACING
+       select TRACING
        help
          Enable the kernel runtime verification infrastructure. RV is a
          lightweight (yet rigorous) method that complements classical
@@ -29,6 +29,16 @@ source "kernel/trace/rv/monitors/wip/Kconfig"
 source "kernel/trace/rv/monitors/wwnr/Kconfig"
 # Add new monitors here
 
+config RV_MON_RTAPP_BLOCK
+       depends on RV
+       select DA_MON_EVENTS
+       select TRACE_IRQFLAGS
+       select TRACE_RT_MUTEX_WAKE_WAITER
+       bool "rtapp_block monitor"
+       help
+         Enable rtapp_wakeup which monitors that realtime tasks are not 
blocked.
+         For details, see Documentation/trace/rv/monitor_rtapp_block.rst.
+
 config RV_REACTORS
        bool "Runtime verification reactors"
        default y
diff --git a/kernel/trace/rv/Makefile b/kernel/trace/rv/Makefile
index 188b64668e1f..6570a3116127 100644
--- a/kernel/trace/rv/Makefile
+++ b/kernel/trace/rv/Makefile
@@ -5,6 +5,8 @@ ccflags-y += -I $(src)          # needed for trace events
 obj-$(CONFIG_RV) += rv.o
 obj-$(CONFIG_RV_MON_WIP) += monitors/wip/wip.o
 obj-$(CONFIG_RV_MON_WWNR) += monitors/wwnr/wwnr.o
+obj-$(CONFIG_RV_MON_RTAPP_BLOCK) += monitors/rtapp_block/ba.o \
+                                   monitors/rtapp_block/rtapp_block.o
 # Add new monitors here
 obj-$(CONFIG_RV_REACTORS) += rv_reactors.o
 obj-$(CONFIG_RV_REACT_PRINTK) += reactor_printk.o
diff --git a/kernel/trace/rv/monitors/rtapp_block/ba.c 
b/kernel/trace/rv/monitors/rtapp_block/ba.c
new file mode 100644
index 000000000000..5e99f79d5e74
--- /dev/null
+++ b/kernel/trace/rv/monitors/rtapp_block/ba.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is generated, do not edit.
+ */
+#include <linux/rv.h>
+#include <rv/instrumentation.h>
+#include <trace/events/task.h>
+#include <trace/events/sched.h>
+
+#include "ba.h"
+
+static_assert(NUM_ATOM <= RV_MAX_LTL_ATOM);
+
+enum buchi_state {
+       INIT,
+       S3,
+       DEAD,
+};
+
+int rv_rtapp_block_task_slot = RV_PER_TASK_MONITOR_INIT;
+
+static void init_monitor(struct task_struct *task)
+{
+       struct ltl_monitor *mon = rv_rtapp_block_get_monitor(task);
+
+       for (int i = 0; i < NUM_ATOM; ++i)
+               mon->atoms[i] = LTL_UNDETERMINED;
+       mon->state = INIT;
+}
+
+static void handle_task_newtask(void *data, struct task_struct *task, unsigned 
long flags)
+{
+       struct ltl_monitor *mon = rv_rtapp_block_get_monitor(task);
+
+       init_monitor(task);
+
+       rv_rtapp_block_atoms_init(task, mon);
+       rv_rtapp_block_atoms_fetch(task, mon);
+}
+
+int rv_rtapp_block_init(size_t data_size)
+{
+       struct task_struct *g, *p;
+       int ret, cpu;
+
+       if (WARN_ON(data_size > RV_MAX_DATA_SIZE))
+               return -EINVAL;
+
+       ret = rv_get_task_monitor_slot();
+       if (ret < 0)
+               return ret;
+
+       rv_rtapp_block_task_slot = ret;
+
+       rv_attach_trace_probe("rtapp_block", task_newtask, handle_task_newtask);
+
+       read_lock(&tasklist_lock);
+
+       for_each_process_thread(g, p)
+               init_monitor(p);
+
+       for_each_present_cpu(cpu)
+               init_monitor(idle_task(cpu));
+
+       read_unlock(&tasklist_lock);
+
+       return 0;
+}
+
+void rv_rtapp_block_destroy(void)
+{
+       rv_put_task_monitor_slot(rv_rtapp_block_task_slot);
+       rv_rtapp_block_task_slot = RV_PER_TASK_MONITOR_INIT;
+
+       rv_detach_trace_probe("rtapp_block", task_newtask, handle_task_newtask);
+}
+
+static void illegal_state(struct task_struct *task, struct ltl_monitor *mon)
+{
+       mon->state = INIT;
+       rv_rtapp_block_error(task, mon);
+}
+
+static void rv_rtapp_block_attempt_start(struct task_struct *task, struct 
ltl_monitor *mon)
+{
+       int i;
+
+       mon = rv_rtapp_block_get_monitor(task);
+
+       rv_rtapp_block_atoms_fetch(task, mon);
+
+       for (i = 0; i < NUM_ATOM; ++i) {
+               if (mon->atoms[i] == LTL_UNDETERMINED)
+                       return;
+       }
+
+       if (((!mon->atoms[WAKEUP_RT_TASK] || (mon->atoms[RT] || 
(mon->atoms[RT_MUTEX_WAKING_WAITER]
+          || (mon->atoms[STOPPING_WOKEN_TASK] || 
(mon->atoms[WOKEN_TASK_IS_MIGRATION] ||
+          mon->atoms[WOKEN_TASK_IS_RCU])))))) && (((!mon->atoms[USER_TASK] || 
!mon->atoms[RT]) ||
+          (!mon->atoms[SLEEP] || (mon->atoms[DO_NANOSLEEP] || 
mon->atoms[FUTEX_LOCK_WITH_PI])))))
+               mon->state = S3;
+       else
+               illegal_state(task, mon);
+}
+
+static void rv_rtapp_block_step(struct task_struct *task, struct ltl_monitor 
*mon)
+{
+       switch (mon->state) {
+       case S3:
+               if (((!mon->atoms[WAKEUP_RT_TASK] || (mon->atoms[RT] ||
+                  (mon->atoms[RT_MUTEX_WAKING_WAITER] || 
(mon->atoms[STOPPING_WOKEN_TASK] ||
+                  (mon->atoms[WOKEN_TASK_IS_MIGRATION] || 
mon->atoms[WOKEN_TASK_IS_RCU])))))) &&
+                  (((!mon->atoms[USER_TASK] || !mon->atoms[RT]) || 
(!mon->atoms[SLEEP] ||
+                  (mon->atoms[DO_NANOSLEEP] || 
mon->atoms[FUTEX_LOCK_WITH_PI])))))
+                       mon->state = S3;
+               else
+                       illegal_state(task, mon);
+               break;
+       case DEAD:
+       case INIT:
+               break;
+       default:
+               WARN_ON_ONCE(1);
+       }
+}
+
+void rv_rtapp_block_atom_update(struct task_struct *task, unsigned int atom, 
bool value)
+{
+       struct ltl_monitor *mon = rv_rtapp_block_get_monitor(task);
+
+       rv_rtapp_block_atom_set(mon, atom, value);
+
+       if (mon->state == DEAD)
+               return;
+
+       if (mon->state == INIT)
+               rv_rtapp_block_attempt_start(task, mon);
+       if (mon->state == INIT)
+               return;
+
+       mon->atoms[atom] = value;
+
+       rv_rtapp_block_atoms_fetch(task, mon);
+
+       rv_rtapp_block_step(task, mon);
+}
diff --git a/kernel/trace/rv/monitors/rtapp_block/ba.h 
b/kernel/trace/rv/monitors/rtapp_block/ba.h
new file mode 100644
index 000000000000..c1ba88f6779a
--- /dev/null
+++ b/kernel/trace/rv/monitors/rtapp_block/ba.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file is generated, do not edit.
+ *
+ * This file includes necessary functions to glue the Buchi automaton and the 
kernel together.
+ * Some of these functions must be manually implemented (look for "Must be 
implemented", or just
+ * let the compiler tells you).
+ *
+ * Essentially, you need to manually define the meaning of the atomic 
propositions in the LTL
+ * property. The primary function for that is rv_rtapp_block_atom_update(), 
which can be called
+ * in tracepoints' handlers for example. In some specific cases where
+ * rv_rtapp_block_atom_update() is not convenient, 
rv_rtapp_block_atoms_fetch() can be used.
+ *
+ * rv_rtapp_block_init()/rv_rtapp_block_destroy() must be called while 
enabling/disabling
+ * the monitor.
+ *
+ * If the fields in struct ltl_monitor is not enough, extra custom data can be 
used. See
+ * rv_rtapp_block_get_data().
+ */
+
+#include <linux/sched.h>
+
+enum rtapp_block_atom {
+       DO_NANOSLEEP,
+       FUTEX_LOCK_WITH_PI,
+       RT,
+       RT_MUTEX_WAKING_WAITER,
+       SLEEP,
+       STOPPING_WOKEN_TASK,
+       USER_TASK,
+       WAKEUP_RT_TASK,
+       WOKEN_TASK_IS_MIGRATION,
+       WOKEN_TASK_IS_RCU,
+       NUM_ATOM
+};
+
+/**
+ * rv_rtapp_block_init
+ * @data_size: required custom data size, can be zero
+ *
+ * Must be called while enabling the monitor
+ */
+int rv_rtapp_block_init(size_t data_size);
+
+/**
+ * rv_rtapp_block_destroy
+ *
+ * must be called while disabling the monitor
+ */
+void rv_rtapp_block_destroy(void);
+
+/**
+ * rv_rtapp_block_error - report violation of the LTL property
+ * @task:      the task violating the LTL property
+ * @mon:       the LTL monitor
+ *
+ * Must be implemented. This function should invoke the RV reactor and the 
monitor's tracepoints.
+ */
+void rv_rtapp_block_error(struct task_struct *task, struct ltl_monitor *mon);
+
+extern int rv_rtapp_block_task_slot;
+
+/**
+ * rv_rtapp_block_get_monitor - get the struct ltl_monitor of a task
+ */
+static inline struct ltl_monitor *rv_rtapp_block_get_monitor(struct 
task_struct *task)
+{
+       return &task->rv[rv_rtapp_block_task_slot].ltl_mon;
+}
+
+/**
+ * rv_rtapp_block_atoms_init - initialize the atomic propositions
+ * @task:      the task
+ * @mon:       the LTL monitor
+ *
+ * Must be implemented. This function is called during task creation, and 
should initialize all
+ * atomic propositions. rv_rtapp_block_atom_set() should be used to implement 
this function.
+ *
+ * This function does not have to initialize atomic propositions that are 
updated by
+ * rv_rtapp_block_atoms_fetch(), because the two functions are called together.
+ */
+void rv_rtapp_block_atoms_init(struct task_struct *task, struct ltl_monitor 
*mon);
+
+/**
+ * rv_rtapp_block_atoms_fetch - fetch the atomic propositions
+ * @task:      the task
+ * @mon:       the LTL monitor
+ *
+ * Must be implemented. This function is called anytime the Buchi automaton is 
triggered. Its
+ * intended purpose is to update the atomic propositions which are expensive 
to trace and can be
+ * easily read from @task. rv_rtapp_block_atom_set() should be used to 
implement this function.
+ *
+ * Using this function may cause incorrect verification result if it is 
important for the LTL that
+ * the atomic propositions must be updated at the correct time. Therefore, if 
it is possible,
+ * updating atomic propositions should be done with 
rv_rtapp_block_atom_update() instead.
+ *
+ * An example where this function is useful is with the LTL property:
+ *    always (RT imply not PAGEFAULT)
+ * (a realtime task does not raise page faults)
+ *
+ * In this example, adding tracepoints to track RT is complicated, because it 
is changed in
+ * differrent places (mutex's priority boosting, sched_setscheduler). 
Furthermore, for this LTL
+ * property, we don't care exactly when RT changes, as long as we have its 
correct value when
+ * PAGEFAULT==true. Therefore, it is better to update RT in 
rv_rtapp_block_atoms_fetch(), as it
+ * can easily be retrieved from task_struct.
+ *
+ * This function can be empty.
+ */
+void rv_rtapp_block_atoms_fetch(struct task_struct *task, struct ltl_monitor 
*mon);
+
+/**
+ * rv_rtapp_block_atom_update - update an atomic proposition
+ * @task:      the task
+ * @atom:      the atomic proposition, one of enum rtapp_block_atom
+ * @value:     the new value for @atom
+ *
+ * Update an atomic proposition and trigger the Buchi atomaton to check for 
violation of the LTL
+ * property. This function can be called in tracepoints' handler, for example.
+ */
+void rv_rtapp_block_atom_update(struct task_struct *task, unsigned int atom, 
bool value);
+
+/**
+ * rv_rtapp_block_atom_get - get an atomic proposition
+ * @mon:       the monitor
+ * @atom:      the atomic proposition, one of enum rtapp_block_atom
+ *
+ * Returns the value of an atomic proposition.
+ */
+static inline
+enum ltl_truth_value rv_rtapp_block_atom_get(struct ltl_monitor *mon, unsigned 
int atom)
+{
+       return mon->atoms[atom];
+}
+
+/**
+ * rv_rtapp_block_atom_set - set an atomic proposition
+ * @mon:       the monitor
+ * @atom:      the atomic proposition, one of enum rtapp_block_atom
+ * @value:     the new value for @atom
+ *
+ * Update an atomic proposition without triggering the Buchi automaton. This 
can be useful to
+ * implement rv_rtapp_block_atoms_fetch() and rv_rtapp_block_atoms_init().
+ *
+ * Another use case for this function is when multiple atomic propositions 
change at the same time,
+ * because calling rv_rtapp_block_atom_update() (and thus triggering the Buchi 
automaton)
+ * multiple times may be incorrect. In that case, rv_rtapp_block_atom_set() 
can be used to avoid
+ * triggering the Buchi automaton, and rv_rtapp_block_atom_update() is only 
used for the last
+ * atomic proposition.
+ */
+static inline
+void rv_rtapp_block_atom_set(struct ltl_monitor *mon, unsigned int atom, bool 
value)
+{
+       mon->atoms[atom] = value;
+}
+
+/**
+ * rv_rtapp_block_get_data - get the custom data of this monitor.
+ * @mon: the monitor
+ *
+ * If this function is used, rv_rtapp_block_init() must have been called with 
a positive
+ * data_size.
+ */
+static inline void *rv_rtapp_block_get_data(struct ltl_monitor *mon)
+{
+       return &mon->data;
+}
diff --git a/kernel/trace/rv/monitors/rtapp_block/ltl 
b/kernel/trace/rv/monitors/rtapp_block/ltl
new file mode 100644
index 000000000000..781f0144a222
--- /dev/null
+++ b/kernel/trace/rv/monitors/rtapp_block/ltl
@@ -0,0 +1,9 @@
+RULE = always (WAKEUP_RT_TASK imply (RT or WAKEUP_WHITELIST))
+   and always ((USER_TASK and RT) imply (SLEEP imply INTENTIONAL_SLEEP))
+
+INTENTIONAL_SLEEP = DO_NANOSLEEP or FUTEX_LOCK_WITH_PI
+
+WAKEUP_WHITELIST = RT_MUTEX_WAKING_WAITER
+                or STOPPING_WOKEN_TASK
+                or WOKEN_TASK_IS_MIGRATION
+                or WOKEN_TASK_IS_RCU
diff --git a/kernel/trace/rv/monitors/rtapp_block/rtapp_block.c 
b/kernel/trace/rv/monitors/rtapp_block/rtapp_block.c
new file mode 100644
index 000000000000..3f5b1efb7af0
--- /dev/null
+++ b/kernel/trace/rv/monitors/rtapp_block/rtapp_block.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/ftrace.h>
+#include <linux/tracepoint.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched/rt.h>
+#include <linux/preempt.h>
+#include <linux/rv.h>
+
+#include <uapi/linux/futex.h>
+#include <trace/events/syscalls.h>
+#include <trace/events/sched.h>
+#include <trace/events/task.h>
+#include <trace/events/lock.h>
+#include <trace/events/preemptirq.h>
+
+#include <rv_trace.h>
+#include <rv/instrumentation.h>
+
+
+#include "ba.h"
+
+struct rtapp_block_data {
+       struct task_struct *woken_task;
+       struct task_struct *stopping_task;
+};
+
+static void handle_sys_enter(void *data, struct pt_regs *regs, long id)
+{
+       unsigned long args[6];
+       int op, cmd;
+
+       switch (id) {
+       case __NR_nanosleep:
+       case __NR_clock_nanosleep:
+#ifdef __NR_clock_nanosleep_time64
+       case __NR_clock_nanosleep_time64:
+#endif
+               rv_rtapp_block_atom_update(current, DO_NANOSLEEP, true);
+               break;
+
+       case __NR_futex:
+#ifdef __NR_futex_time64
+       case __NR_futex_time64:
+#endif
+               syscall_get_arguments(current, regs, args);
+               op = args[1];
+               cmd = op & FUTEX_CMD_MASK;
+
+               if (cmd == FUTEX_LOCK_PI || cmd == FUTEX_LOCK_PI2)
+                       rv_rtapp_block_atom_update(current, FUTEX_LOCK_WITH_PI, 
true);
+               break;
+       }
+}
+
+static void handle_sys_exit(void *data, struct pt_regs *regs, long ret)
+{
+       struct ltl_monitor *mon = rv_rtapp_block_get_monitor(current);
+
+       rv_rtapp_block_atom_set(mon, FUTEX_LOCK_WITH_PI, false);
+       rv_rtapp_block_atom_update(current, DO_NANOSLEEP, false);
+}
+
+static void handle_sched_switch(void *data, bool preempt, struct task_struct 
*prev,
+                               struct task_struct *next, unsigned int 
prev_state)
+{
+       if (prev_state & TASK_INTERRUPTIBLE)
+               rv_rtapp_block_atom_update(prev, SLEEP, true);
+       rv_rtapp_block_atom_update(next, SLEEP, false);
+}
+
+void rv_rtapp_block_atoms_fetch(struct task_struct *task, struct ltl_monitor 
*mon)
+{
+       rv_rtapp_block_atom_set(mon, RT, rt_task(task));
+       rv_rtapp_block_atom_set(mon, USER_TASK, !(task->flags & PF_KTHREAD));
+}
+
+void rv_rtapp_block_atoms_init(struct task_struct *task, struct ltl_monitor 
*mon)
+{
+       rv_rtapp_block_atom_set(mon, SLEEP, false);
+       rv_rtapp_block_atom_set(mon, DO_NANOSLEEP, false);
+       rv_rtapp_block_atom_set(mon, FUTEX_LOCK_PI, false);
+       rv_rtapp_block_atom_set(mon, WAKEUP_RT_TASK, false);
+       rv_rtapp_block_atom_set(mon, RT_MUTEX_WAKING_WAITER, false);
+       rv_rtapp_block_atom_set(mon, STOPPING_WOKEN_TASK, false);
+       rv_rtapp_block_atom_set(mon, WOKEN_TASK_IS_MIGRATION, false);
+       rv_rtapp_block_atom_set(mon, WOKEN_TASK_IS_RCU, false);
+}
+
+static void handle_rt_mutex_wake_waiter_begin(void *, struct task_struct *task)
+{
+       rv_rtapp_block_atom_update(task, RT_MUTEX_WAKING_WAITER, true);
+}
+
+static void handle_rt_mutex_wake_waiter_end(void *, struct task_struct *task)
+{
+       rv_rtapp_block_atom_update(task, RT_MUTEX_WAKING_WAITER, false);
+}
+
+static void handle_sched_kthread_stop(void *, struct task_struct *task)
+{
+       struct ltl_monitor *mon = rv_rtapp_block_get_monitor(current);
+       struct rtapp_block_data *data = rv_rtapp_block_get_data(mon);
+
+       data->stopping_task = task;
+}
+
+static void handle_sched_kthread_stop_ret(void *, int)
+{
+       struct ltl_monitor *mon = rv_rtapp_block_get_monitor(current);
+       struct rtapp_block_data *data = rv_rtapp_block_get_data(mon);
+
+       data->stopping_task = NULL;
+}
+
+static void handle_sched_wakeup(void *, struct task_struct *task)
+{
+       struct ltl_monitor *mon = rv_rtapp_block_get_monitor(current);
+       struct rtapp_block_data *data = rv_rtapp_block_get_data(mon);
+
+       if (!in_task())
+               return;
+
+       if (this_cpu_read(hardirq_context))
+               return;
+
+       if (!rt_task(task))
+               return;
+
+       data->woken_task = task;
+
+       if (!strncmp(task->comm, "migration/", strlen("migration/")))
+               rv_rtapp_block_atom_set(mon, WOKEN_TASK_IS_MIGRATION, true);
+       if (!strcmp(task->comm, "rcu_preempt"))
+               rv_rtapp_block_atom_set(mon, WOKEN_TASK_IS_RCU, true);
+       if (data->stopping_task == data->woken_task)
+               rv_rtapp_block_atom_set(mon, STOPPING_WOKEN_TASK, true);
+
+       rv_rtapp_block_atom_update(current, WAKEUP_RT_TASK, true);
+
+       rv_rtapp_block_atom_set(mon, WOKEN_TASK_IS_MIGRATION, false);
+       rv_rtapp_block_atom_set(mon, WOKEN_TASK_IS_RCU, false);
+       rv_rtapp_block_atom_set(mon, STOPPING_WOKEN_TASK, false);
+       rv_rtapp_block_atom_update(current, WAKEUP_RT_TASK, false);
+}
+
+static int enable_rtapp_block(void)
+{
+       int ret;
+
+       ret = rv_rtapp_block_init(sizeof(struct rtapp_block_data));
+
+       if (ret)
+               return ret;
+
+       rv_attach_trace_probe("rtapp_block", sched_wakeup, handle_sched_wakeup);
+       rv_attach_trace_probe("rtapp_block", rt_mutex_wake_waiter_begin,
+                                            handle_rt_mutex_wake_waiter_begin);
+       rv_attach_trace_probe("rtapp_block", rt_mutex_wake_waiter_end,
+                                            handle_rt_mutex_wake_waiter_end);
+       rv_attach_trace_probe("rtapp_block", sched_kthread_stop, 
handle_sched_kthread_stop);
+       rv_attach_trace_probe("rtapp_block", sched_kthread_stop_ret, 
handle_sched_kthread_stop_ret);
+       rv_attach_trace_probe("rtapp_block", sys_enter, handle_sys_enter);
+       rv_attach_trace_probe("rtapp_block", sys_exit, handle_sys_exit);
+       rv_attach_trace_probe("rtapp_block", sched_switch, handle_sched_switch);
+
+       return 0;
+}
+
+static void disable_rtapp_block(void)
+{
+       rv_detach_trace_probe("rtapp_block", sched_wakeup, handle_sched_wakeup);
+       rv_detach_trace_probe("rtapp_block", rt_mutex_wake_waiter_begin,
+                                            handle_rt_mutex_wake_waiter_begin);
+       rv_detach_trace_probe("rtapp_block", rt_mutex_wake_waiter_end,
+                                            handle_rt_mutex_wake_waiter_end);
+       rv_detach_trace_probe("rtapp_block", sched_kthread_stop, 
handle_sched_kthread_stop);
+       rv_detach_trace_probe("rtapp_block", sched_kthread_stop_ret, 
handle_sched_kthread_stop_ret);
+       rv_detach_trace_probe("rtapp_block", sys_enter, handle_sys_enter);
+       rv_detach_trace_probe("rtapp_block", sys_exit, handle_sys_exit);
+       rv_detach_trace_probe("rtapp_block", sched_switch, handle_sched_switch);
+
+       rv_rtapp_block_destroy();
+}
+
+static struct rv_monitor rv_rtapp_block = {
+       .name = "rtapp_block",
+       .description = "Monitor that RT tasks are not blocked by non-RT tasks",
+       .enable = enable_rtapp_block,
+       .disable = disable_rtapp_block,
+};
+
+void rv_rtapp_block_error(struct task_struct *task, struct ltl_monitor *mon)
+{
+       struct rtapp_block_data *data = rv_rtapp_block_get_data(mon);
+       struct task_struct *woken = data->woken_task;
+
+       bool sleep = rv_rtapp_block_atom_get(mon, SLEEP);
+
+       if (sleep)
+               trace_rtapp_block_sleep_error(task);
+       else
+               trace_rtapp_block_wakeup_error(task, woken);
+
+#ifdef CONFIG_RV_REACTORS
+       if (!rv_rtapp_block.react)
+               return;
+
+       if (sleep) {
+               rv_rtapp_block.react("rv: %s[%d](RT) is blocked\n", task->comm, 
task->pid);
+       } else {
+               rv_rtapp_block.react("rv: %s[%d](RT) was blocked 
%s[%d](non-RT)\n",
+                                       woken->comm, woken->pid,
+                                       task->comm, task->pid);
+       }
+#endif
+}
+
+static int __init register_rtapp_block(void)
+{
+       rv_register_monitor(&rv_rtapp_block);
+       return 0;
+}
+
+static void __exit unregister_rtapp_block(void)
+{
+       rv_unregister_monitor(&rv_rtapp_block);
+}
+
+module_init(register_rtapp_block);
+module_exit(unregister_rtapp_block);
diff --git a/kernel/trace/rv/rv_trace.h b/kernel/trace/rv/rv_trace.h
index 96264233cac5..79a7388b5c55 100644
--- a/kernel/trace/rv/rv_trace.h
+++ b/kernel/trace/rv/rv_trace.h
@@ -121,6 +121,50 @@ DECLARE_EVENT_CLASS(error_da_monitor_id,
 // Add new monitors based on CONFIG_DA_MON_EVENTS_ID here
 
 #endif /* CONFIG_DA_MON_EVENTS_ID */
+#ifdef CONFIG_RV_MON_RTAPP_BLOCK
+TRACE_EVENT(rtapp_block_wakeup_error,
+
+       TP_PROTO(struct task_struct *task, struct task_struct *woken),
+
+       TP_ARGS(task, woken),
+
+       TP_STRUCT__entry(
+               __string(comm, task->comm)
+               __string(woken_comm, woken->comm)
+               __field(pid_t, pid)
+               __field(pid_t, woken_pid)
+       ),
+
+       TP_fast_assign(
+               __assign_str(comm);
+               __assign_str(woken_comm);
+               __entry->pid = task->pid;
+               __entry->woken_pid = woken->pid;
+       ),
+
+       TP_printk("rv: %s[%d](RT) was blocked by %s[%d](non-RT)\n",
+                       __get_str(woken_comm), __entry->woken_pid,
+                       __get_str(comm), __entry->pid)
+);
+TRACE_EVENT(rtapp_block_sleep_error,
+
+       TP_PROTO(struct task_struct *task),
+
+       TP_ARGS(task),
+
+       TP_STRUCT__entry(
+               __string(comm, task->comm)
+               __field(pid_t, pid)
+       ),
+
+       TP_fast_assign(
+               __assign_str(comm);
+               __entry->pid = task->pid;
+       ),
+
+       TP_printk("rv: %s[%d](RT) is blocked\n", __get_str(comm), __entry->pid)
+);
+#endif
 #endif /* _TRACE_RV_H */
 
 /* This part must be outside protection */
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1af972a92d06..942318ef3f62 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1638,6 +1638,9 @@ config TRACE_IRQFLAGS
          Enables hooks to interrupt enabling and disabling for
          either tracing or lock debugging.
 
+config TRACE_RT_MUTEX_WAKE_WAITER
+       bool
+
 config TRACE_IRQFLAGS_NMI
        def_bool y
        depends on TRACE_IRQFLAGS
-- 
2.39.5


Reply via email to