Flip the branch condition after atomic_fetch_or_acquire(_Q_PENDING_VAL)
such that we loose the indent. This also result in a more natural code
flow IMO.

Cc: mi...@kernel.org
Cc: t...@linutronix.de
Cc: long...@redhat.com
Cc: andrea.pa...@amarulasolutions.com
Acked-by: Will Deacon <will.dea...@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 kernel/locking/qspinlock.c |   56 +++++++++++++++++++++------------------------
 1 file changed, 27 insertions(+), 29 deletions(-)

--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -330,39 +330,37 @@ void queued_spin_lock_slowpath(struct qs
         * 0,0,1 -> 0,1,1 ; pending
         */
        val = atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
-       if (!(val & ~_Q_LOCKED_MASK)) {
-               /*
-                * We're pending, wait for the owner to go away.
-                *
-                * *,1,1 -> *,1,0
-                *
-                * this wait loop must be a load-acquire such that we match the
-                * store-release that clears the locked bit and create lock
-                * sequentiality; this is because not all
-                * clear_pending_set_locked() implementations imply full
-                * barriers.
-                */
-               if (val & _Q_LOCKED_MASK) {
-                       atomic_cond_read_acquire(&lock->val,
-                                                !(VAL & _Q_LOCKED_MASK));
-               }
-
-               /*
-                * take ownership and clear the pending bit.
-                *
-                * *,1,0 -> *,0,1
-                */
-               clear_pending_set_locked(lock);
-               qstat_inc(qstat_lock_pending, true);
-               return;
+       /*
+        * If we observe any contention; undo and queue.
+        */
+       if (unlikely(val & ~_Q_LOCKED_MASK)) {
+               if (!(val & _Q_PENDING_MASK))
+                       clear_pending(lock);
+               goto queue;
        }
 
        /*
-        * If pending was clear but there are waiters in the queue, then
-        * we need to undo our setting of pending before we queue ourselves.
+        * We're pending, wait for the owner to go away.
+        *
+        * 0,1,1 -> 0,1,0
+        *
+        * this wait loop must be a load-acquire such that we match the
+        * store-release that clears the locked bit and create lock
+        * sequentiality; this is because not all
+        * clear_pending_set_locked() implementations imply full
+        * barriers.
+        */
+       if (val & _Q_LOCKED_MASK)
+               atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_MASK));
+
+       /*
+        * take ownership and clear the pending bit.
+        *
+        * 0,1,0 -> 0,0,1
         */
-       if (!(val & _Q_PENDING_MASK))
-               clear_pending(lock);
+       clear_pending_set_locked(lock);
+       qstat_inc(qstat_lock_pending, true);
+       return;
 
        /*
         * End of pending bit optimistic spinning and beginning of MCS


Reply via email to