The lockdep tracepoints are under the lockdep recursion counter, this
has a bunch of nasty side effects:

 - TRACE_IRQFLAGS doesn't work across the entire tracepoint, leading to
   all sorts of dodgy complaints.

 - RCU-lockdep doesn't see the tracepoints either, hiding numerous
   "suspicious RCU usage" warnings.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 kernel/locking/lockdep.c |   27 ++++++++++++++++++---------
 1 file changed, 18 insertions(+), 9 deletions(-)

--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -5000,8 +5000,9 @@ void lock_acquire(struct lockdep_map *lo
        raw_local_irq_save(flags);
        check_flags(flags);
 
-       current->lockdep_recursion++;
        trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
+
+       current->lockdep_recursion++;
        __lock_acquire(lock, subclass, trylock, read, check,
                       irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
        lockdep_recursion_finish();
@@ -5016,10 +5017,13 @@ void lock_release(struct lockdep_map *lo
        if (unlikely(current->lockdep_recursion))
                return;
 
+
        raw_local_irq_save(flags);
        check_flags(flags);
-       current->lockdep_recursion++;
+
        trace_lock_release(lock, ip);
+
+       current->lockdep_recursion++;
        if (__lock_release(lock, ip))
                check_chain_key(current);
        lockdep_recursion_finish();
@@ -5171,7 +5175,7 @@ __lock_contended(struct lockdep_map *loc
                stats->bounces[bounce_contended + !!hlock->read]++;
 }
 
-static void
+static bool
 __lock_acquired(struct lockdep_map *lock, unsigned long ip)
 {
        struct task_struct *curr = current;
@@ -5187,16 +5191,16 @@ __lock_acquired(struct lockdep_map *lock
         * acquire, how the heck did that happen?
         */
        if (DEBUG_LOCKS_WARN_ON(!depth))
-               return;
+               return false;
 
        hlock = find_held_lock(curr, lock, depth, &i);
        if (!hlock) {
                print_lock_contention_bug(curr, lock, _RET_IP_);
-               return;
+               return false;
        }
 
        if (hlock->instance != lock)
-               return;
+               return false;
 
        cpu = smp_processor_id();
        if (hlock->waittime_stamp) {
@@ -5205,8 +5209,6 @@ __lock_acquired(struct lockdep_map *lock
                hlock->holdtime_stamp = now;
        }
 
-       trace_lock_acquired(lock, ip);
-
        stats = get_lock_stats(hlock_class(hlock));
        if (waittime) {
                if (hlock->read)
@@ -5219,6 +5221,8 @@ __lock_acquired(struct lockdep_map *lock
 
        lock->cpu = cpu;
        lock->ip = ip;
+
+       return true;
 }
 
 void lock_contended(struct lockdep_map *lock, unsigned long ip)
@@ -5244,6 +5248,7 @@ EXPORT_SYMBOL_GPL(lock_contended);
 void lock_acquired(struct lockdep_map *lock, unsigned long ip)
 {
        unsigned long flags;
+       bool trace;
 
        if (unlikely(!lock_stat || !debug_locks))
                return;
@@ -5254,8 +5259,12 @@ void lock_acquired(struct lockdep_map *l
        raw_local_irq_save(flags);
        check_flags(flags);
        current->lockdep_recursion++;
-       __lock_acquired(lock, ip);
+       trace = __lock_acquired(lock, ip);
        lockdep_recursion_finish();
+
+       if (trace)
+               trace_lock_acquired(lock, ip);
+
        raw_local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(lock_acquired);


Reply via email to