This commit adds task isolation hooks as follows:

- __handle_domain_irq() generates an isolation warning for the
  local task

- irq_work_queue_on() generates an isolation warning for the remote
  task being interrupted for irq_work

- generic_exec_single() generates a remote isolation warning for
  the remote cpu being IPI'd

- smp_call_function_many() generates a remote isolation warning for
  the set of remote cpus being IPI'd

Calls to task_isolation_remote() or task_isolation_interrupt() can
be placed in the platform-independent code like this when doing so
results in fewer lines of code changes, as for example is true of
the users of the arch_send_call_function_*() APIs.  Or, they can
be placed in the per-architecture code when there are many callers,
as for example is true of the smp_send_reschedule() call.

A further cleanup might be to create an intermediate layer, so that
for example smp_send_reschedule() is a single generic function that
just calls arch_smp_send_reschedule(), allowing generic code to be
called every time smp_send_reschedule() is invoked.  But for now,
we just update either callers or callees as makes most sense.

Signed-off-by: Chris Metcalf <cmetc...@mellanox.com>
---
 kernel/irq/irqdesc.c | 5 +++++
 kernel/irq_work.c    | 5 ++++-
 kernel/smp.c         | 6 +++++-
 3 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 82afb7ed369f..1b114c6b7ab8 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -16,6 +16,7 @@
 #include <linux/bitmap.h>
 #include <linux/irqdomain.h>
 #include <linux/sysfs.h>
+#include <linux/isolation.h>
 
 #include "internals.h"
 
@@ -633,6 +634,10 @@ int __handle_domain_irq(struct irq_domain *domain, 
unsigned int hwirq,
                irq = irq_find_mapping(domain, hwirq);
 #endif
 
+       task_isolation_interrupt((irq == hwirq) ?
+                                "irq %d (%s)" : "irq %d (%s hwirq %d)",
+                                irq, domain ? domain->name : "", hwirq);
+
        /*
         * Some hardware gives randomly wrong interrupts.  Rather
         * than crashing, do something sensible.
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index bcf107ce0854..cde49f1f31f7 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -17,6 +17,7 @@
 #include <linux/cpu.h>
 #include <linux/notifier.h>
 #include <linux/smp.h>
+#include <linux/isolation.h>
 #include <asm/processor.h>
 
 
@@ -75,8 +76,10 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
        if (!irq_work_claim(work))
                return false;
 
-       if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
+       if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) {
+               task_isolation_remote(cpu, "irq_work");
                arch_send_call_function_single_ipi(cpu);
+       }
 
        return true;
 }
diff --git a/kernel/smp.c b/kernel/smp.c
index c94dd85c8d41..44252aa650ac 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -19,6 +19,7 @@
 #include <linux/sched.h>
 #include <linux/sched/idle.h>
 #include <linux/hypervisor.h>
+#include <linux/isolation.h>
 
 #include "smpboot.h"
 
@@ -175,8 +176,10 @@ static int generic_exec_single(int cpu, call_single_data_t 
*csd,
         * locking and barrier primitives. Generic code isn't really
         * equipped to do the right thing...
         */
-       if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
+       if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) {
+               task_isolation_remote(cpu, "IPI function");
                arch_send_call_function_single_ipi(cpu);
+       }
 
        return 0;
 }
@@ -458,6 +461,7 @@ void smp_call_function_many(const struct cpumask *mask,
        }
 
        /* Send a message to all CPUs in the map */
+       task_isolation_remote_cpumask(cfd->cpumask_ipi, "IPI function");
        arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
 
        if (wait) {
-- 
2.1.2

Reply via email to