From: Peter Zijlstra <pet...@infradead.org>

Force inlining and prevent instrumentation of all sorts by marking the
functions which are invoked from low level entry code with 'noinstr'.

Split the irqflags tracking into two parts. One which does the heavy
lifting while RCU is watching and the final one which can be invoked after
RCU is turned off.

Signed-off-by: Peter Zijlstra <pet...@infradead.org>
Signed-off-by: Thomas Gleixner <t...@linutronix.de>
---
 include/linux/irqflags.h        |    2 +
 include/linux/sched.h           |    1 
 kernel/locking/lockdep.c        |   70 ++++++++++++++++++++++++++++++----------
 kernel/trace/trace_preemptirq.c |    2 +
 lib/debug_locks.c               |    2 -
 5 files changed, 59 insertions(+), 18 deletions(-)

--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -19,11 +19,13 @@
 #ifdef CONFIG_PROVE_LOCKING
   extern void lockdep_softirqs_on(unsigned long ip);
   extern void lockdep_softirqs_off(unsigned long ip);
+  extern void lockdep_hardirqs_on_prepare(unsigned long ip);
   extern void lockdep_hardirqs_on(unsigned long ip);
   extern void lockdep_hardirqs_off(unsigned long ip);
 #else
   static inline void lockdep_softirqs_on(unsigned long ip) { }
   static inline void lockdep_softirqs_off(unsigned long ip) { }
+  static inline void lockdep_hardirqs_on_prepare(unsigned long ip) { }
   static inline void lockdep_hardirqs_on(unsigned long ip) { }
   static inline void lockdep_hardirqs_off(unsigned long ip) { }
 #endif
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -983,6 +983,7 @@ struct task_struct {
        unsigned int                    hardirq_disable_event;
        int                             hardirqs_enabled;
        int                             hardirq_context;
+       u64                             hardirq_chain_key;
        unsigned long                   softirq_disable_ip;
        unsigned long                   softirq_enable_ip;
        unsigned int                    softirq_disable_event;
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3639,9 +3639,6 @@ static void __trace_hardirqs_on_caller(u
 {
        struct task_struct *curr = current;
 
-       /* we'll do an OFF -> ON transition: */
-       curr->hardirqs_enabled = 1;
-
        /*
         * We are going to turn hardirqs on, so set the
         * usage bit for all held locks:
@@ -3653,16 +3650,13 @@ static void __trace_hardirqs_on_caller(u
         * bit for all held locks. (disabled hardirqs prevented
         * this bit from being set before)
         */
-       if (curr->softirqs_enabled)
+       if (curr->softirqs_enabled) {
                if (!mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ))
                        return;
-
-       curr->hardirq_enable_ip = ip;
-       curr->hardirq_enable_event = ++curr->irq_events;
-       debug_atomic_inc(hardirqs_on_events);
+       }
 }
 
-void lockdep_hardirqs_on(unsigned long ip)
+void lockdep_hardirqs_on_prepare(unsigned long ip)
 {
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
@@ -3698,20 +3692,62 @@ void lockdep_hardirqs_on(unsigned long i
        if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
                return;
 
+       current->hardirq_chain_key = current->curr_chain_key;
+
        current->lockdep_recursion++;
        __trace_hardirqs_on_caller(ip);
        lockdep_recursion_finish();
 }
-NOKPROBE_SYMBOL(lockdep_hardirqs_on);
+EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
+
+void noinstr lockdep_hardirqs_on(unsigned long ip)
+{
+       struct task_struct *curr = current;
+
+       if (unlikely(!debug_locks || curr->lockdep_recursion))
+               return;
+
+       if (curr->hardirqs_enabled) {
+               /*
+                * Neither irq nor preemption are disabled here
+                * so this is racy by nature but losing one hit
+                * in a stat is not a big deal.
+                */
+               __debug_atomic_inc(redundant_hardirqs_on);
+               return;
+       }
+
+       /*
+        * We're enabling irqs and according to our state above irqs weren't
+        * already enabled, yet we find the hardware thinks they are in fact
+        * enabled.. someone messed up their IRQ state tracing.
+        */
+       if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+               return;
+
+       /*
+        * Ensure the lock stack remained unchanged between
+        * lockdep_hardirqs_on_prepare() and lockdep_hardirqs_on().
+        */
+       DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key !=
+                           current->curr_chain_key);
+
+       /* we'll do an OFF -> ON transition: */
+       curr->hardirqs_enabled = 1;
+       curr->hardirq_enable_ip = ip;
+       curr->hardirq_enable_event = ++curr->irq_events;
+       debug_atomic_inc(hardirqs_on_events);
+}
+EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
 
 /*
  * Hardirqs were disabled:
  */
-void lockdep_hardirqs_off(unsigned long ip)
+void noinstr lockdep_hardirqs_off(unsigned long ip)
 {
        struct task_struct *curr = current;
 
-       if (unlikely(!debug_locks || current->lockdep_recursion))
+       if (unlikely(!debug_locks || curr->lockdep_recursion))
                return;
 
        /*
@@ -3732,7 +3768,7 @@ void lockdep_hardirqs_off(unsigned long
        } else
                debug_atomic_inc(redundant_hardirqs_off);
 }
-NOKPROBE_SYMBOL(lockdep_hardirqs_off);
+EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
 
 /*
  * Softirqs will be enabled:
@@ -4408,8 +4444,8 @@ static void print_unlock_imbalance_bug(s
        dump_stack();
 }
 
-static int match_held_lock(const struct held_lock *hlock,
-                                       const struct lockdep_map *lock)
+static noinstr int match_held_lock(const struct held_lock *hlock,
+                                  const struct lockdep_map *lock)
 {
        if (hlock->instance == lock)
                return 1;
@@ -4696,7 +4732,7 @@ static int
        return 0;
 }
 
-static nokprobe_inline
+static __always_inline
 int __lock_is_held(const struct lockdep_map *lock, int read)
 {
        struct task_struct *curr = current;
@@ -4956,7 +4992,7 @@ void lock_release(struct lockdep_map *lo
 }
 EXPORT_SYMBOL_GPL(lock_release);
 
-int lock_is_held_type(const struct lockdep_map *lock, int read)
+noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
 {
        unsigned long flags;
        int ret = 0;
--- a/kernel/trace/trace_preemptirq.c
+++ b/kernel/trace/trace_preemptirq.c
@@ -46,6 +46,7 @@ void trace_hardirqs_on(void)
                this_cpu_write(tracing_irq_cpu, 0);
        }
 
+       lockdep_hardirqs_on_prepare(CALLER_ADDR0);
        lockdep_hardirqs_on(CALLER_ADDR0);
 }
 EXPORT_SYMBOL(trace_hardirqs_on);
@@ -93,6 +94,7 @@ NOKPROBE_SYMBOL(trace_hardirqs_off);
                this_cpu_write(tracing_irq_cpu, 0);
        }
 
+       lockdep_hardirqs_on_prepare(CALLER_ADDR0);
        lockdep_hardirqs_on(CALLER_ADDR0);
 }
 EXPORT_SYMBOL(trace_hardirqs_on_caller);
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -36,7 +36,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent);
 /*
  * Generic 'turn off all lock debugging' function:
  */
-int debug_locks_off(void)
+noinstr int debug_locks_off(void)
 {
        if (debug_locks && __debug_locks_off()) {
                if (!debug_locks_silent) {

Reply via email to