Preliminary discussion: https://lkml.org/lkml/2020/5/13/1327
This patch avoids introducing arch-specific trace points by leveraging
existing definition for ipi_raise.

Issues to address in potential future work:
- make ipi reason available on generic smp code level (possible?)
- addition of ipi_entry/ipi_exit tracepoints in generic smp code

Signed-off-by: Wojciech Kudla <wk.ker...@gmail.com>
---
 kernel/smp.c | 14 +++++++++++++-
 1 file changed, 13 insertions(+), 1 deletion(-)

diff --git a/kernel/smp.c b/kernel/smp.c
index 7dbcb402c2fc..df6982a1d3f2 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -23,6 +23,11 @@
 
 #include "smpboot.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/ipi.h>
+
+static const char *ipi_reason_missing __tracepoint_string = "";
+
 enum {
        CSD_FLAG_LOCK           = 0x01,
        CSD_FLAG_SYNCHRONOUS    = 0x02,
@@ -34,6 +39,7 @@ struct call_function_data {
        cpumask_var_t           cpumask_ipi;
 };
 
+
 static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
@@ -176,8 +182,12 @@ static int generic_exec_single(int cpu, call_single_data_t 
*csd,
         * locking and barrier primitives. Generic code isn't really
         * equipped to do the right thing...
         */
-       if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
+       if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) {
+               if (trace_ipi_raise_enabled())
+                       trace_ipi_raise(cpumask_of(cpu), ipi_reason_missing);
+
                arch_send_call_function_single_ipi(cpu);
+       }
 
        return 0;
 }
@@ -474,6 +484,8 @@ void smp_call_function_many(const struct cpumask *mask,
                        __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
        }
 
+       trace_ipi_raise(cfd->cpumask_ipi, ipi_reason_missing);
+
        /* Send a message to all CPUs in the map */
        arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
 
-- 
2.17.1

Reply via email to