There is no reason not to always, accurately, track IRQ state.

This change also makes IRQ state tracking ignore lockdep_off().

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 kernel/locking/lockdep.c |   44 +++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 41 insertions(+), 3 deletions(-)

--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3646,7 +3646,16 @@ static void __trace_hardirqs_on_caller(v
  */
 void lockdep_hardirqs_on_prepare(unsigned long ip)
 {
-       if (unlikely(!debug_locks || current->lockdep_recursion))
+       if (unlikely(!debug_locks))
+               return;
+
+       /*
+        * NMIs do not (and cannot) track lock dependencies, nothing to do.
+        */
+       if (unlikely(in_nmi()))
+               return;
+
+       if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
                return;
 
        if (unlikely(current->hardirqs_enabled)) {
@@ -3692,7 +3701,27 @@ void noinstr lockdep_hardirqs_on(unsigne
 {
        struct task_struct *curr = current;
 
-       if (unlikely(!debug_locks || curr->lockdep_recursion))
+       if (unlikely(!debug_locks))
+               return;
+
+       /*
+        * NMIs can happen in the middle of local_irq_{en,dis}able() where the
+        * tracking state and hardware state are out of sync.
+        *
+        * NMIs must save lockdep_hardirqs_enabled() to restore IRQ state from,
+        * and not rely on hardware state like normal interrupts.
+        */
+       if (unlikely(in_nmi())) {
+               /*
+                * Skip:
+                *  - recursion check, because NMI can hit lockdep;
+                *  - hardware state check, because above;
+                *  - chain_key check, see lockdep_hardirqs_on_prepare().
+                */
+               goto skip_checks;
+       }
+
+       if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
                return;
 
        if (curr->hardirqs_enabled) {
@@ -3720,6 +3749,7 @@ void noinstr lockdep_hardirqs_on(unsigne
        DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key !=
                            current->curr_chain_key);
 
+skip_checks:
        /* we'll do an OFF -> ON transition: */
        curr->hardirqs_enabled = 1;
        curr->hardirq_enable_ip = ip;
@@ -3735,7 +3765,15 @@ void noinstr lockdep_hardirqs_off(unsign
 {
        struct task_struct *curr = current;
 
-       if (unlikely(!debug_locks || curr->lockdep_recursion))
+       if (unlikely(!debug_locks))
+               return;
+
+       /*
+        * Matching lockdep_hardirqs_on(), allow NMIs in the middle of lockdep;
+        * they will restore the software state. This ensures the software
+        * state is consistent inside NMIs as well.
+        */
+       if (unlikely(!in_nmi() && (current->lockdep_recursion & 
LOCKDEP_RECURSION_MASK)))
                return;
 
        /*


Reply via email to