From: Peter Zijlstra <pet...@infradead.org>

In preparation to nest mutex::wait_lock under rq::lock we need to remove
wakeups from under it.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
[Heavily changed after 55f036ca7e74 ("locking: WW mutex cleanup") and
08295b3b5bee ("locking: Implement an algorithm choice for Wound-Wait
mutexes")]
Signed-off-by: Juri Lelli <juri.le...@redhat.com>
---
 kernel/locking/mutex.c | 43 +++++++++++++++++++++++++++---------------
 1 file changed, 28 insertions(+), 15 deletions(-)

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index df34ce70fcde..f37402cd8496 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -338,7 +338,7 @@ __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct 
ww_acquire_ctx *b)
  */
 static bool __sched
 __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
-              struct ww_acquire_ctx *ww_ctx)
+              struct ww_acquire_ctx *ww_ctx, struct wake_q_head *wake_q)
 {
        if (!ww_ctx->is_wait_die)
                return false;
@@ -346,7 +346,7 @@ __ww_mutex_die(struct mutex *lock, struct mutex_waiter 
*waiter,
        if (waiter->ww_ctx->acquired > 0 &&
                        __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
                debug_mutex_wake_waiter(lock, waiter);
-               wake_up_process(waiter->task);
+               wake_q_add(wake_q, waiter->task); // XXX
        }
 
        return true;
@@ -361,7 +361,8 @@ __ww_mutex_die(struct mutex *lock, struct mutex_waiter 
*waiter,
  */
 static bool __ww_mutex_wound(struct mutex *lock,
                             struct ww_acquire_ctx *ww_ctx,
-                            struct ww_acquire_ctx *hold_ctx)
+                            struct ww_acquire_ctx *hold_ctx,
+                            struct wake_q_head *wake_q)
 {
        struct task_struct *owner = __mutex_owner(lock);
 
@@ -393,7 +394,7 @@ static bool __ww_mutex_wound(struct mutex *lock,
                 * wakeup pending to re-read the wounded state.
                 */
                if (owner != current)
-                       wake_up_process(owner);
+                       wake_q_add(wake_q, owner); // XXX
 
                return true;
        }
@@ -414,7 +415,9 @@ static bool __ww_mutex_wound(struct mutex *lock,
  * The current task must not be on the wait list.
  */
 static void __sched
-__ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
+__ww_mutex_check_waiters(struct mutex *lock,
+                        struct ww_acquire_ctx *ww_ctx,
+                        struct wake_q_head *wake_q)
 {
        struct mutex_waiter *cur;
 
@@ -424,8 +427,8 @@ __ww_mutex_check_waiters(struct mutex *lock, struct 
ww_acquire_ctx *ww_ctx)
                if (!cur->ww_ctx)
                        continue;
 
-               if (__ww_mutex_die(lock, cur, ww_ctx) ||
-                   __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
+               if (__ww_mutex_die(lock, cur, ww_ctx, wake_q) ||
+                   __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx, wake_q))
                        break;
        }
 }
@@ -437,6 +440,8 @@ __ww_mutex_check_waiters(struct mutex *lock, struct 
ww_acquire_ctx *ww_ctx)
 static __always_inline void
 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx 
*ctx)
 {
+       DEFINE_WAKE_Q(wake_q);
+
        ww_mutex_lock_acquired(lock, ctx);
 
        /*
@@ -465,8 +470,10 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock, 
struct ww_acquire_ctx *ctx)
         * die or wound us.
         */
        raw_spin_lock(&lock->base.wait_lock);
-       __ww_mutex_check_waiters(&lock->base, ctx);
+       __ww_mutex_check_waiters(&lock->base, ctx, &wake_q);
        raw_spin_unlock(&lock->base.wait_lock);
+
+       wake_up_q(&wake_q);
 }
 
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -824,7 +831,8 @@ __ww_mutex_check_kill(struct mutex *lock, struct 
mutex_waiter *waiter,
 static inline int __sched
 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
                      struct mutex *lock,
-                     struct ww_acquire_ctx *ww_ctx)
+                     struct ww_acquire_ctx *ww_ctx,
+                     struct wake_q_head *wake_q)
 {
        struct mutex_waiter *cur;
        struct list_head *pos;
@@ -868,7 +876,7 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter,
                pos = &cur->list;
 
                /* Wait-Die: ensure younger waiters die. */
-               __ww_mutex_die(lock, cur, ww_ctx);
+               __ww_mutex_die(lock, cur, ww_ctx, wake_q);
        }
 
        __mutex_add_waiter(lock, waiter, pos);
@@ -886,7 +894,7 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter,
                 * such that either we or the fastpath will wound @ww->ctx.
                 */
                smp_mb();
-               __ww_mutex_wound(lock, ww_ctx, ww->ctx);
+               __ww_mutex_wound(lock, ww_ctx, ww->ctx, wake_q);
        }
 
        return 0;
@@ -900,6 +908,7 @@ __mutex_lock_common(struct mutex *lock, long state, 
unsigned int subclass,
                    struct lockdep_map *nest_lock, unsigned long ip,
                    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
 {
+       DEFINE_WAKE_Q(wake_q);
        struct mutex_waiter waiter;
        bool first = false;
        struct ww_mutex *ww;
@@ -940,7 +949,7 @@ __mutex_lock_common(struct mutex *lock, long state, 
unsigned int subclass,
         */
        if (__mutex_trylock(lock)) {
                if (use_ww_ctx && ww_ctx)
-                       __ww_mutex_check_waiters(lock, ww_ctx);
+                       __ww_mutex_check_waiters(lock, ww_ctx, &wake_q);
 
                goto skip_wait;
        }
@@ -962,7 +971,7 @@ __mutex_lock_common(struct mutex *lock, long state, 
unsigned int subclass,
                 * Add in stamp order, waking up waiters that must kill
                 * themselves.
                 */
-               ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
+               ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx, &wake_q);
                if (ret)
                        goto err_early_kill;
 
@@ -1034,7 +1043,7 @@ __mutex_lock_common(struct mutex *lock, long state, 
unsigned int subclass,
                 */
                if (!ww_ctx->is_wait_die &&
                    !__mutex_waiter_is_first(lock, &waiter))
-                       __ww_mutex_check_waiters(lock, ww_ctx);
+                       __ww_mutex_check_waiters(lock, ww_ctx, &wake_q);
        }
 
        mutex_remove_waiter(lock, &waiter, current);
@@ -1051,6 +1060,7 @@ __mutex_lock_common(struct mutex *lock, long state, 
unsigned int subclass,
                ww_mutex_lock_acquired(ww, ww_ctx);
 
        raw_spin_unlock(&lock->wait_lock);
+       wake_up_q(&wake_q);
        preempt_enable();
        return 0;
 
@@ -1061,6 +1071,7 @@ __mutex_lock_common(struct mutex *lock, long state, 
unsigned int subclass,
        raw_spin_unlock(&lock->wait_lock);
        debug_mutex_free_waiter(&waiter);
        mutex_release(&lock->dep_map, 1, ip);
+       wake_up_q(&wake_q);
        preempt_enable();
        return ret;
 }
@@ -1244,9 +1255,11 @@ static noinline void __sched 
__mutex_unlock_slowpath(struct mutex *lock, unsigne
        if (owner & MUTEX_FLAG_HANDOFF)
                __mutex_handoff(lock, next);
 
+       preempt_disable(); // XXX unlock->wakeup inversion like
        raw_spin_unlock(&lock->wait_lock);
 
-       wake_up_q(&wake_q);
+       wake_up_q(&wake_q); // XXX must force resched on proxy
+       preempt_enable();
 }
 
 #ifndef CONFIG_DEBUG_LOCK_ALLOC
-- 
2.17.1

Reply via email to