Now that we have full support from softirqs to perform per vector masking, let's feed lockdep with the proper inputs and push the vector numbers involved in a base softirq lock usage:
LOCK_ENABLED_SOFTIRQ: push local_softirq_enabled() LOCK_USED_IN_SOFTIRQ: push curr->softirq_context, modified by lockdep_softirq_enter/exit() Reviewed-by: David S. Miller <da...@davemloft.net> Signed-off-by: Frederic Weisbecker <frede...@kernel.org> Cc: Mauro Carvalho Chehab <mchehab+sams...@kernel.org> Cc: Joel Fernandes <j...@joelfernandes.org> Cc: Thomas Gleixner <t...@linutronix.de> Cc: Pavan Kondeti <pkond...@codeaurora.org> Cc: Paul E . McKenney <paul...@linux.vnet.ibm.com> Cc: David S . Miller <da...@davemloft.net> Cc: Ingo Molnar <mi...@kernel.org> Cc: Sebastian Andrzej Siewior <bige...@linutronix.de> Cc: Linus Torvalds <torva...@linux-foundation.org> Cc: Peter Zijlstra <pet...@infradead.org> --- kernel/locking/lockdep.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index aab634b07d67..4d38ff30006d 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -2879,6 +2879,7 @@ static void __trace_hardirqs_on_caller(unsigned long ip) */ if (curr->softirqs_enabled) { usage.bit = LOCK_ENABLED_SOFTIRQ; + usage.vector = local_softirq_enabled(); if (!mark_held_locks(curr, &usage)) return; } @@ -2966,6 +2967,7 @@ void trace_softirqs_on(unsigned long ip) struct task_struct *curr = current; struct lock_usage usage = { .bit = LOCK_ENABLED_SOFTIRQ, + .vector = local_softirq_enabled() }; if (unlikely(!debug_locks || current->lockdep_recursion)) @@ -3030,7 +3032,7 @@ void trace_softirqs_off(unsigned long ip) static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) { - struct lock_usage usage = { .vector = 0 }; + struct lock_usage usage; /* * If non-trylock use in a hardirq or softirq context, then * mark the lock as used in these contexts: @@ -3039,22 +3041,26 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) if (hlock->read) { if (curr->hardirq_context) { usage.bit = LOCK_USED_IN_HARDIRQ_READ; + usage.vector = 0; if (!mark_lock(curr, hlock, &usage)) return 0; } if (curr->softirq_context) { usage.bit = LOCK_USED_IN_SOFTIRQ_READ; + usage.vector = curr->softirq_context; if (!mark_lock(curr, hlock, &usage)) return 0; } } else { if (curr->hardirq_context) { usage.bit = LOCK_USED_IN_HARDIRQ; + usage.vector = 0; if (!mark_lock(curr, hlock, &usage)) return 0; } if (curr->softirq_context) { usage.bit = LOCK_USED_IN_SOFTIRQ; + usage.vector = curr->softirq_context; if (!mark_lock(curr, hlock, &usage)) return 0; } @@ -3063,19 +3069,23 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) if (!hlock->hardirqs_off) { if (hlock->read) { usage.bit = LOCK_ENABLED_HARDIRQ_READ; + usage.vector = 0; if (!mark_lock(curr, hlock, &usage)) return 0; if (curr->softirqs_enabled) { usage.bit = LOCK_ENABLED_SOFTIRQ_READ; + usage.vector = local_softirq_enabled(); if (!mark_lock(curr, hlock, &usage)) return 0; } } else { usage.bit = LOCK_ENABLED_HARDIRQ; + usage.vector = 0; if (!mark_lock(curr, hlock, &usage)) return 0; if (curr->softirqs_enabled) { usage.bit = LOCK_ENABLED_SOFTIRQ; + usage.vector = local_softirq_enabled(); if (!mark_lock(curr, hlock, &usage)) return 0; } -- 2.21.0