Hi Peter,

On 10/04/2017 09:43 AM, Joel Fernandes wrote:
[..]
>>> + */
>>> +static DEFINE_PER_CPU(int, tracing_irq_cpu);
>>> +
>>>  #if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
>>>  void trace_hardirqs_on(void)
>>>  {
>>> +     if (!this_cpu_read(tracing_irq_cpu))
>>> +             return;
>>> +
>>> +     trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
>>>       tracer_hardirqs_on();
>>> +
>>> +     this_cpu_write(tracing_irq_cpu, 0);
>>>  }
>>>  EXPORT_SYMBOL(trace_hardirqs_on);
>>>
>>>  void trace_hardirqs_off(void)
>>>  {
>>> +     if (this_cpu_read(tracing_irq_cpu))
>>> +             return;
>>> +
>>> +     this_cpu_write(tracing_irq_cpu, 1);
>>> +
>>> +     trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
>>>       tracer_hardirqs_off();
>>>  }
>>>  EXPORT_SYMBOL(trace_hardirqs_off);
>>>
>>>  __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
>>>  {
>>> +     if (!this_cpu_read(tracing_irq_cpu))
>>> +             return;
>>> +
>>> +     trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
>>>       tracer_hardirqs_on_caller(caller_addr);
>>> +
>>> +     this_cpu_write(tracing_irq_cpu, 0);
>>>  }
>>>  EXPORT_SYMBOL(trace_hardirqs_on_caller);
>>>
>>>  __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
>>>  {
>>> +     if (this_cpu_read(tracing_irq_cpu))
>>> +             return;
>>> +
>>> +     this_cpu_write(tracing_irq_cpu, 1);
>>> +
>>> +     trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
>>>       tracer_hardirqs_off_caller(caller_addr);
>>>  }
>>>  EXPORT_SYMBOL(trace_hardirqs_off_caller);
>>
>> lockdep implements the trace_hardirq_*() in terms of *_caller(). Would
>> that make sense here?
> 
> Yes, I think that makes sense and reduces code duplication, I can do
> this in the next rev.

I rewrote the patches to use standalone tracepoints with hooks on them for
the various users (lockdep and *irqsoff tracers), it looks much cleaner now
and gets rid of the time_ calls in lockdep as well. It also makes the trace
events available when PROVE_LOCKING is enabled as you wanted. I still have
to test lockdep with it but it builds fine, and I tested the preemptirqsoff
tracer and the standalone trace events..

---8<---
From: Joel Fernandes <joe...@google.com>
Subject: [PATCH RFC] tracing: Add preempt and irq disable/enable tracepoints

- add new tracepoints
- convert lockdep to add hooks onto them
- convert preempt/irqoff tracer to add hooks onto them.

Signed-off-by: Joel Fernandes <joe...@google.com>
---
 include/linux/ftrace.h            |  10 +--
 include/linux/irqflags.h          |   5 ++
 include/linux/lockdep.h           |   6 +-
 include/trace/events/preemptirq.h |  70 ++++++++++++++++++++
 init/main.c                       |   2 +-
 kernel/locking/lockdep.c          |  30 +++------
 kernel/sched/core.c               |   2 +-
 kernel/trace/Kconfig              |  25 +++++++
 kernel/trace/Makefile             |   1 +
 kernel/trace/trace_irqsoff.c      | 133 ++++++++++++++------------------------
 kernel/trace/trace_preemptirq.c   |  69 ++++++++++++++++++++
 11 files changed, 233 insertions(+), 120 deletions(-)
 create mode 100644 include/trace/events/preemptirq.h
 create mode 100644 kernel/trace/trace_preemptirq.c

diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 2e028854bac7..4d9b3dbfa494 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -734,15 +734,7 @@ static inline unsigned long get_lock_parent_ip(void)
        return CALLER_ADDR2;
 }
 
-#ifdef CONFIG_IRQSOFF_TRACER
-  extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
-  extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
-#else
-  static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
-  static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
-#endif
-
-#ifdef CONFIG_PREEMPT_TRACER
+#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
   extern void trace_preempt_on(unsigned long a0, unsigned long a1);
   extern void trace_preempt_off(unsigned long a0, unsigned long a1);
 #else
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 5dd1272d1ab2..0d323931def6 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -15,8 +15,13 @@
 #include <asm/irqflags.h>
 
 #ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_PROVE_LOCKING
   extern void trace_softirqs_on(unsigned long ip);
   extern void trace_softirqs_off(unsigned long ip);
+#else
+  static inline void trace_softirqs_on(unsigned long ip) { }
+  static inline void trace_softirqs_off(unsigned long ip) { }
+#endif
   extern void trace_hardirqs_on(void);
   extern void trace_hardirqs_off(void);
 # define trace_hardirq_context(p)      ((p)->hardirq_context)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index fffe49f188e6..6e7c71e546a3 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -263,7 +263,7 @@ struct held_lock {
 /*
  * Initialization, self-test and debugging-output methods:
  */
-extern void lockdep_info(void);
+extern void lockdep_init(void);
 extern void lockdep_reset(void);
 extern void lockdep_reset_lock(struct lockdep_map *lock);
 extern void lockdep_free_key_range(void *start, unsigned long size);
@@ -419,7 +419,7 @@ static inline void lockdep_on(void)
 # define lockdep_set_current_reclaim_state(g)  do { } while (0)
 # define lockdep_clear_current_reclaim_state() do { } while (0)
 # define lockdep_trace_alloc(g)                        do { } while (0)
-# define lockdep_info()                                do { } while (0)
+# define lockdep_init()                                do { } while (0)
 # define lockdep_init_map(lock, name, key, sub) \
                do { (void)(name); (void)(key); } while (0)
 # define lockdep_set_class(lock, key)          do { (void)(key); } while (0)
@@ -523,7 +523,7 @@ do {                                                        
        \
 
 #endif /* CONFIG_LOCKDEP */
 
-#ifdef CONFIG_TRACE_IRQFLAGS
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
 extern void print_irqtrace_events(struct task_struct *curr);
 #else
 static inline void print_irqtrace_events(struct task_struct *curr)
diff --git a/include/trace/events/preemptirq.h 
b/include/trace/events/preemptirq.h
new file mode 100644
index 000000000000..f9a86f2d4802
--- /dev/null
+++ b/include/trace/events/preemptirq.h
@@ -0,0 +1,70 @@
+#ifdef CONFIG_PREEMPTIRQ_TRACEPOINTS
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM preemptirq
+
+#if !defined(_TRACE_PREEMPTIRQ_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PREEMPTIRQ_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+#include <linux/string.h>
+#include <asm/sections.h>
+
+DECLARE_EVENT_CLASS(preemptirq_template,
+
+       TP_PROTO(unsigned long ip, unsigned long parent_ip),
+
+       TP_ARGS(ip, parent_ip),
+
+       TP_STRUCT__entry(
+               __field(u32, caller_offs)
+               __field(u32, parent_offs)
+       ),
+
+       TP_fast_assign(
+               __entry->caller_offs = (u32)(ip - (unsigned long)_stext);
+               __entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext);
+       ),
+
+       TP_printk("caller=%pF parent=%pF",
+                 (void *)((unsigned long)(_stext) + __entry->caller_offs),
+                 (void *)((unsigned long)(_stext) + __entry->parent_offs))
+);
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+DEFINE_EVENT(preemptirq_template, irq_disable,
+            TP_PROTO(unsigned long ip, unsigned long parent_ip),
+            TP_ARGS(ip, parent_ip));
+
+DEFINE_EVENT(preemptirq_template, irq_enable,
+            TP_PROTO(unsigned long ip, unsigned long parent_ip),
+            TP_ARGS(ip, parent_ip));
+#endif
+
+#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
+DEFINE_EVENT(preemptirq_template, preempt_disable,
+            TP_PROTO(unsigned long ip, unsigned long parent_ip),
+            TP_ARGS(ip, parent_ip));
+
+DEFINE_EVENT(preemptirq_template, preempt_enable,
+            TP_PROTO(unsigned long ip, unsigned long parent_ip),
+            TP_ARGS(ip, parent_ip));
+#endif
+
+#endif /* _TRACE_PREEMPTIRQ_H */
+
+#include <trace/define_trace.h>
+
+#else /* !CONFIG_PREEMPTIRQ_TRACEPOINT */
+
+#define trace_irq_enable(...)
+#define trace_irq_disable(...)
+#define trace_preempt_enable(...)
+#define trace_preempt_disable(...)
+#define trace_irq_enable_rcuidle(...)
+#define trace_irq_disable_rcuidle(...)
+#define trace_preempt_enable_rcuidle(...)
+#define trace_preempt_disable_rcuidle(...)
+
+#endif
diff --git a/init/main.c b/init/main.c
index 052481fbe363..9d8049bf448d 100644
--- a/init/main.c
+++ b/init/main.c
@@ -632,7 +632,7 @@ asmlinkage __visible void __init start_kernel(void)
                panic("Too many boot %s vars at `%s'", panic_later,
                      panic_param);
 
-       lockdep_info();
+       lockdep_init();
 
        /*
         * Need to run this when irqs are enabled, because it wants
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 7d2499bec5fe..bd0686665a4d 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -55,6 +55,7 @@
 
 #include "lockdep_internals.h"
 
+#include <trace/events/preemptirq.h>
 #define CREATE_TRACE_POINTS
 #include <trace/events/lock.h>
 
@@ -2710,10 +2711,9 @@ static void __trace_hardirqs_on_caller(unsigned long ip)
        debug_atomic_inc(hardirqs_on_events);
 }
 
-__visible void trace_hardirqs_on_caller(unsigned long ip)
+static void lockdep_hardirqs_on(void *none, unsigned long ip,
+                               unsigned long ignore)
 {
-       time_hardirqs_on(CALLER_ADDR0, ip);
-
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
 
@@ -2752,23 +2752,15 @@ __visible void trace_hardirqs_on_caller(unsigned long 
ip)
        __trace_hardirqs_on_caller(ip);
        current->lockdep_recursion = 0;
 }
-EXPORT_SYMBOL(trace_hardirqs_on_caller);
-
-void trace_hardirqs_on(void)
-{
-       trace_hardirqs_on_caller(CALLER_ADDR0);
-}
-EXPORT_SYMBOL(trace_hardirqs_on);
 
 /*
  * Hardirqs were disabled:
  */
-__visible void trace_hardirqs_off_caller(unsigned long ip)
+static void lockdep_hardirqs_off(void *none, unsigned long ip,
+                                unsigned long ignore)
 {
        struct task_struct *curr = current;
 
-       time_hardirqs_off(CALLER_ADDR0, ip);
-
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
 
@@ -2790,13 +2782,6 @@ __visible void trace_hardirqs_off_caller(unsigned long 
ip)
        } else
                debug_atomic_inc(redundant_hardirqs_off);
 }
-EXPORT_SYMBOL(trace_hardirqs_off_caller);
-
-void trace_hardirqs_off(void)
-{
-       trace_hardirqs_off_caller(CALLER_ADDR0);
-}
-EXPORT_SYMBOL(trace_hardirqs_off);
 
 /*
  * Softirqs will be enabled:
@@ -4281,8 +4266,11 @@ void lockdep_reset_lock(struct lockdep_map *lock)
        raw_local_irq_restore(flags);
 }
 
-void __init lockdep_info(void)
+void __init lockdep_init(void)
 {
+       register_trace_irq_disable(lockdep_hardirqs_off, NULL);
+       register_trace_irq_enable(lockdep_hardirqs_on, NULL);
+
        printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., 
Ingo Molnar\n");
 
        printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0869b20fba81..ee5514b97b03 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3037,7 +3037,7 @@ u64 scheduler_tick_max_deferment(void)
 #endif
 
 #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
-                               defined(CONFIG_PREEMPT_TRACER))
+                               defined(CONFIG_TRACE_PREEMPT_TOGGLE))
 /*
  * If the value passed in is equal to the current preempt count
  * then we just disabled preemption. Start timing the latency.
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 434c840e2d82..c4079931d5c1 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -159,6 +159,30 @@ config FUNCTION_GRAPH_TRACER
          the return value. This is done by setting the current return
          address on the current task structure into a stack of calls.
 
+config TRACE_PREEMPT_TOGGLE
+       bool
+       help
+         Enables hooks which will be called when preemption is first disabled,
+         and last enabled.
+
+config PREEMPTIRQ_TRACEPOINTS
+       # Used to create the preempt and irq tracepoints
+       bool
+       depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS
+       default y
+       help
+         Create preempt/irq toggle tracepoints if needed, so that other parts
+         of the kernel can use them to generate or add hooks to them.
+
+config PREEMPTIRQ_EVENTS
+       bool "Enable trace events for preempt and irq disable/enable"
+       select TRACE_IRQFLAGS
+       select TRACE_PREEMPT_TOGGLE if PREEMPT
+       help
+         Enable tracing of disable and enable events for preemption and irqs.
+         For tracing preempt disable/enable events, DEBUG_PREEMPT must be
+         enabled. For tracing irq disable/enable events, PROVE_LOCKING must
+         be disabled.
 
 config IRQSOFF_TRACER
        bool "Interrupts-off Latency Tracer"
@@ -195,6 +219,7 @@ config PREEMPT_TRACER
        select RING_BUFFER_ALLOW_SWAP
        select TRACER_SNAPSHOT
        select TRACER_SNAPSHOT_PER_CPU_SWAP
+       select TRACE_PREEMPT_TOGGLE
        help
          This option measures the time spent in preemption-off critical
          sections, with microsecond accuracy.
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 90f2701d92a7..53f536f3a763 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_TRACING) += trace_printk.o
 obj-$(CONFIG_TRACING_MAP) += tracing_map.o
 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
 obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
+obj-$(CONFIG_PREEMPTIRQ_TRACEPOINTS) += trace_preemptirq.o
 obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 7758bc0617cb..0c95a86d8541 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -13,7 +13,7 @@
 #include <linux/uaccess.h>
 #include <linux/module.h>
 #include <linux/ftrace.h>
-
+#include <trace/events/preemptirq.h>
 #include "trace.h"
 
 static struct trace_array              *irqsoff_trace __read_mostly;
@@ -446,86 +446,6 @@ void stop_critical_timings(void)
 }
 EXPORT_SYMBOL_GPL(stop_critical_timings);
 
-#ifdef CONFIG_IRQSOFF_TRACER
-#ifdef CONFIG_PROVE_LOCKING
-void time_hardirqs_on(unsigned long a0, unsigned long a1)
-{
-       if (!preempt_trace() && irq_trace())
-               stop_critical_timing(a0, a1);
-}
-
-void time_hardirqs_off(unsigned long a0, unsigned long a1)
-{
-       if (!preempt_trace() && irq_trace())
-               start_critical_timing(a0, a1);
-}
-
-#else /* !CONFIG_PROVE_LOCKING */
-
-/*
- * Stubs:
- */
-
-void trace_softirqs_on(unsigned long ip)
-{
-}
-
-void trace_softirqs_off(unsigned long ip)
-{
-}
-
-inline void print_irqtrace_events(struct task_struct *curr)
-{
-}
-
-/*
- * We are only interested in hardirq on/off events:
- */
-void trace_hardirqs_on(void)
-{
-       if (!preempt_trace() && irq_trace())
-               stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
-}
-EXPORT_SYMBOL(trace_hardirqs_on);
-
-void trace_hardirqs_off(void)
-{
-       if (!preempt_trace() && irq_trace())
-               start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
-}
-EXPORT_SYMBOL(trace_hardirqs_off);
-
-__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
-{
-       if (!preempt_trace() && irq_trace())
-               stop_critical_timing(CALLER_ADDR0, caller_addr);
-}
-EXPORT_SYMBOL(trace_hardirqs_on_caller);
-
-__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
-{
-       if (!preempt_trace() && irq_trace())
-               start_critical_timing(CALLER_ADDR0, caller_addr);
-}
-EXPORT_SYMBOL(trace_hardirqs_off_caller);
-
-#endif /* CONFIG_PROVE_LOCKING */
-#endif /*  CONFIG_IRQSOFF_TRACER */
-
-#ifdef CONFIG_PREEMPT_TRACER
-void trace_preempt_on(unsigned long a0, unsigned long a1)
-{
-       if (preempt_trace() && !irq_trace())
-               stop_critical_timing(a0, a1);
-}
-
-void trace_preempt_off(unsigned long a0, unsigned long a1)
-{
-       if (preempt_trace() && !irq_trace())
-               start_critical_timing(a0, a1);
-}
-#endif /* CONFIG_PREEMPT_TRACER */
-
 #ifdef CONFIG_FUNCTION_TRACER
 static bool function_enabled;
 
@@ -675,10 +595,28 @@ static void irqsoff_tracer_stop(struct trace_array *tr)
 }
 
 #ifdef CONFIG_IRQSOFF_TRACER
+/*
+ * We are only interested in hardirq on/off events:
+ */
+static void tracer_hardirqs_on(void *none, unsigned long a0, unsigned long a1)
+{
+       if (!preempt_trace() && irq_trace())
+               stop_critical_timing(a0, a1);
+}
+
+static void tracer_hardirqs_off(void *none, unsigned long a0, unsigned long a1)
+{
+       if (!preempt_trace() && irq_trace())
+               start_critical_timing(a0, a1);
+}
+
 static int irqsoff_tracer_init(struct trace_array *tr)
 {
        trace_type = TRACER_IRQS_OFF;
 
+       register_trace_irq_disable(tracer_hardirqs_off, NULL);
+       register_trace_irq_enable(tracer_hardirqs_on, NULL);
+
        return __irqsoff_tracer_init(tr);
 }
 static struct tracer irqsoff_tracer __read_mostly =
@@ -701,15 +639,33 @@ static struct tracer irqsoff_tracer __read_mostly =
        .use_max_tr     = true,
 };
 # define register_irqsoff(trace) register_tracer(&trace)
+
 #else
+static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
+static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
 # define register_irqsoff(trace) do { } while (0)
-#endif
+#endif /*  CONFIG_IRQSOFF_TRACER */
 
 #ifdef CONFIG_PREEMPT_TRACER
+static void tracer_preempt_on(void *none, unsigned long a0, unsigned long a1)
+{
+       if (preempt_trace() && !irq_trace())
+               stop_critical_timing(a0, a1);
+}
+
+static void tracer_preempt_off(void *none, unsigned long a0, unsigned long a1)
+{
+       if (preempt_trace() && !irq_trace())
+               start_critical_timing(a0, a1);
+}
+
 static int preemptoff_tracer_init(struct trace_array *tr)
 {
        trace_type = TRACER_PREEMPT_OFF;
 
+       register_trace_preempt_disable(tracer_preempt_off, NULL);
+       register_trace_preempt_enable(tracer_preempt_on, NULL);
+
        return __irqsoff_tracer_init(tr);
 }
 
@@ -733,17 +689,24 @@ static struct tracer preemptoff_tracer __read_mostly =
        .use_max_tr     = true,
 };
 # define register_preemptoff(trace) register_tracer(&trace)
+
 #else
+static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
+static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
 # define register_preemptoff(trace) do { } while (0)
-#endif
+#endif /* CONFIG_PREEMPT_TRACER */
 
-#if defined(CONFIG_IRQSOFF_TRACER) && \
-       defined(CONFIG_PREEMPT_TRACER)
+#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
 
 static int preemptirqsoff_tracer_init(struct trace_array *tr)
 {
        trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
 
+       register_trace_irq_disable(tracer_hardirqs_off, NULL);
+       register_trace_irq_enable(tracer_hardirqs_on, NULL);
+       register_trace_preempt_disable(tracer_preempt_off, NULL);
+       register_trace_preempt_enable(tracer_preempt_on, NULL);
+
        return __irqsoff_tracer_init(tr);
 }
 
diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
new file mode 100644
index 000000000000..62d94b170978
--- /dev/null
+++ b/kernel/trace/trace_preemptirq.c
@@ -0,0 +1,69 @@
+/*
+ * preemptoff and irqoff tracepoints
+ */
+
+#include <linux/kallsyms.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/ftrace.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/preemptirq.h>
+
+/* Per-cpu variable to prevent redundant calls when IRQs already off */
+static DEFINE_PER_CPU(int, tracing_irq_cpu);
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+void trace_hardirqs_on(void)
+{
+       if (!this_cpu_read(tracing_irq_cpu))
+               return;
+
+       trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+       this_cpu_write(tracing_irq_cpu, 0);
+}
+EXPORT_SYMBOL(trace_hardirqs_on);
+
+void trace_hardirqs_off(void)
+{
+       if (this_cpu_read(tracing_irq_cpu))
+               return;
+
+       this_cpu_write(tracing_irq_cpu, 1);
+       trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+}
+EXPORT_SYMBOL(trace_hardirqs_off);
+
+__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
+{
+       if (!this_cpu_read(tracing_irq_cpu))
+               return;
+
+       trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
+       this_cpu_write(tracing_irq_cpu, 0);
+}
+EXPORT_SYMBOL(trace_hardirqs_on_caller);
+
+__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
+{
+       if (this_cpu_read(tracing_irq_cpu))
+               return;
+
+       this_cpu_write(tracing_irq_cpu, 1);
+       trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
+}
+EXPORT_SYMBOL(trace_hardirqs_off_caller);
+
+#endif
+
+#ifdef CONFIG_DEBUG_PREEMPT
+void trace_preempt_on(unsigned long a0, unsigned long a1)
+{
+       trace_preempt_enable_rcuidle(a0, a1);
+}
+
+void trace_preempt_off(unsigned long a0, unsigned long a1)
+{
+       trace_preempt_disable_rcuidle(a0, a1);
+}
+#endif
-- 
2.14.2.920.gcf0c67979c-goog





Reply via email to