As mentioned in a previous patch, its possible to implement the
handoff logic differently, avoiding the issue where we 'leak' a HADOFF
flag.

This patch does so, just to show what it looks like; I'm not at all
convinced this is worth it.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
---
 kernel/locking/mutex.c |   34 +++++++++++++++++++++++++++++-----
 1 file changed, 29 insertions(+), 5 deletions(-)

--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -71,6 +71,32 @@ static inline unsigned long __owner_flag
        return owner & MUTEX_FLAGS;
 }
 
+static inline bool __mutex_req_handoff(struct mutex *lock)
+{
+       unsigned long owner, curr = (unsigned long)current;
+
+       owner = atomic_long_read(&lock->owner);
+       for (;;) {
+               unsigned long old, new;
+
+               if (owner & MUTEX_FLAG_HANDOFF)
+                       return false;
+
+               if (__owner_task(owner))
+                       new = owner | MUTEX_FLAG_HANDOFF;
+               else
+                       new = curr | __owner_flags(owner);
+
+               old = atomic_long_cmpxchg_acquire(&lock->owner, owner, new);
+               if (old == owner)
+                       break;
+
+               owner = old;
+       }
+
+       return !__owner_task(owner);
+}
+
 /*
  * Actual trylock that will work on any unlocked state.
  *
@@ -609,7 +635,6 @@ __mutex_lock_common(struct mutex *lock,
        struct task_struct *task = current;
        struct mutex_waiter waiter;
        unsigned long flags;
-       bool first = false;
        struct ww_mutex *ww;
        int ret;
 
@@ -676,8 +701,9 @@ __mutex_lock_common(struct mutex *lock,
                schedule_preempt_disabled();
 
                if (__mutex_waiter_is_first(lock, &waiter)) {
-                       first = true;
-                       __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
+                       if (__mutex_req_handoff(lock))
+                               break;
+
                        if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, 
true))
                                break;
                }
@@ -700,8 +726,6 @@ __mutex_lock_common(struct mutex *lock,
        mutex_remove_waiter(lock, &waiter, task);
        if (likely(list_empty(&lock->wait_list)))
                __mutex_clear_flag(lock, MUTEX_FLAGS);
-       else if (first && (atomic_long_read(&lock->owner) & MUTEX_FLAG_HANDOFF))
-               __mutex_clear_flag(lock, MUTEX_FLAG_HANDOFF);
 
        debug_mutex_free_waiter(&waiter);
 


Reply via email to