The existing version uses a heavy barrier while only release semantics
is required. So use atomic_sub_return_release instead.

Suggested-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Pan Xinhui <[email protected]>
---
 include/asm-generic/qspinlock.h | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 35a52a8..8947cd2 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -92,10 +92,9 @@ static __always_inline void queued_spin_lock(struct 
qspinlock *lock)
 static __always_inline void queued_spin_unlock(struct qspinlock *lock)
 {
        /*
-        * smp_mb__before_atomic() in order to guarantee release semantics
-        */
-       smp_mb__before_atomic();
-       atomic_sub(_Q_LOCKED_VAL, &lock->val);
+       * unlock() need release semantics
+       */
+       (void)atomic_sub_return_release(_Q_LOCKED_VAL, &lock->val);
 }
 #endif
 
-- 
1.9.1

Reply via email to