>  __visible __used noinline
> @@ -730,6 +744,23 @@ __mutex_unlock_common_slowpath(struct mutex *lock, int 
> nested)
>       if (__mutex_slowpath_needs_to_unlock())
>               atomic_set(&lock->count, 1);
>  
> +/*
> + * Skipping the mutex_has_owner() check when DEBUG, allows us to
> + * avoid taking the wait_lock in order to do not call mutex_release()
> + * and debug_mutex_unlock() when !DEBUG. This can otherwise result in
> + * deadlocks when another task enters the lock's slowpath in mutex_lock().
> + */
> +#ifndef CONFIG_DEBUG_MUTEXES
> +     /*
> +      * Abort the wakeup operation if there is an another mutex owner, as the
> +      * lock was stolen. mutex_unlock() should have cleared the owner field
> +      * before calling this function. If that field is now set, another task
> +      * must have acquired the mutex.
> +      */
> +     if (mutex_has_owner(lock))
> +             return;

Would we need the mutex lock count to eventually get set to a negative
value if there are waiters? An optimistic spinner can get the lock and
set lock->count to 0. Then the lock count might remain 0 since a waiter
might not get waken up here to try-lock and set lock->count to -1 if it
goes back to sleep in the lock path.


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to