On Tue, Oct 27, 2020 at 01:30:56PM +0100, Peter Zijlstra wrote:
> This seems to make it happy. Not quite sure that's the best solution.
> 
> diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
> index 3e99dfef8408..81295bc760fe 100644
> --- a/kernel/locking/lockdep.c
> +++ b/kernel/locking/lockdep.c
> @@ -4411,7 +4405,9 @@ static int mark_lock(struct task_struct *curr, struct 
> held_lock *this,
>               break;
>  
>       case LOCK_USED:
> -             debug_atomic_dec(nr_unused_locks);
> +     case LOCK_USED_READ:
> +             if ((hlock_class(this)->usage_mask & 
> (LOCKF_USED|LOCKF_USED_READ)) == new_mask)
> +                     debug_atomic_dec(nr_unused_locks);
>               break;
>  
>       default:

This also works, and I think I likes it better.. anyone?

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 3e99dfef8408..e603e86c0227 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4396,6 +4390,9 @@ static int mark_lock(struct task_struct *curr, struct 
held_lock *this,
        if (unlikely(hlock_class(this)->usage_mask & new_mask))
                goto unlock;
 
+       if (!hlock_class(this)->usage_mask)
+               debug_atomic_dec(nr_unused_locks);
+
        hlock_class(this)->usage_mask |= new_mask;
 
        if (new_bit < LOCK_TRACE_STATES) {
@@ -4403,19 +4400,10 @@ static int mark_lock(struct task_struct *curr, struct 
held_lock *this,
                        return 0;
        }
 
-       switch (new_bit) {
-       case 0 ... LOCK_USED-1:
+       if (new_bit < LOCK_USED) {
                ret = mark_lock_irq(curr, this, new_bit);
                if (!ret)
                        return 0;
-               break;
-
-       case LOCK_USED:
-               debug_atomic_dec(nr_unused_locks);
-               break;
-
-       default:
-               break;
        }
 
 unlock:


Reply via email to