From: Waiman Long <waiman.l...@hpe.com>

This patch removes some of the redundant ww_mutex code in
__mutex_lock_common().

Cc: Ingo Molnar <mi...@redhat.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Tim Chen <tim.c.c...@linux.intel.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Imre Deak <imre.d...@intel.com>
Cc: Jason Low <jason.l...@hpe.com>
Cc: "Paul E. McKenney" <paul...@us.ibm.com>
Cc: Ding Tianhong <dingtianh...@huawei.com>
Cc: Davidlohr Bueso <d...@stgolabs.net>
Cc: Will Deacon <will.dea...@arm.com>
Signed-off-by: Waiman Long <waiman.l...@hpe.com>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 kernel/locking/mutex.c |   13 ++++---------
 1 file changed, 4 insertions(+), 9 deletions(-)

--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -580,10 +580,11 @@ __mutex_lock_common(struct mutex *lock,
        struct mutex_waiter waiter;
        unsigned long flags;
        bool first = false;
+       struct ww_mutex *ww;
        int ret;
 
        if (use_ww_ctx) {
-               struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
+               ww = container_of(lock, struct ww_mutex, base);
                if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
                        return -EALREADY;
        }
@@ -595,12 +596,8 @@ __mutex_lock_common(struct mutex *lock,
            mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
                /* got the lock, yay! */
                lock_acquired(&lock->dep_map, ip);
-               if (use_ww_ctx) {
-                       struct ww_mutex *ww;
-                       ww = container_of(lock, struct ww_mutex, base);
-
+               if (use_ww_ctx)
                        ww_mutex_set_context_fastpath(ww, ww_ctx);
-               }
                preempt_enable();
                return 0;
        }
@@ -680,10 +677,8 @@ __mutex_lock_common(struct mutex *lock,
        /* got the lock - cleanup and rejoice! */
        lock_acquired(&lock->dep_map, ip);
 
-       if (use_ww_ctx) {
-               struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
+       if (use_ww_ctx)
                ww_mutex_set_context_slowpath(ww, ww_ctx);
-       }
 
        spin_unlock_mutex(&lock->wait_lock, flags);
        preempt_enable();


Reply via email to