The osq_lock() and osq_unlock() function may not provide the necessary
acquire and release barrier in some cases. This patch makes sure
that the proper barriers are provided when osq_lock() is successful
or when osq_unlock() is called.

Signed-off-by: Waiman Long <waiman.l...@hpe.com>
---
 kernel/locking/osq_lock.c |    4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index 05a3785..7dd4ee5 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -115,7 +115,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
         * cmpxchg in an attempt to undo our queueing.
         */
 
-       while (!READ_ONCE(node->locked)) {
+       while (!smp_load_acquire(&node->locked)) {
                /*
                 * If we need to reschedule bail... so we can block.
                 */
@@ -198,7 +198,7 @@ void osq_unlock(struct optimistic_spin_queue *lock)
         * Second most likely case.
         */
        node = this_cpu_ptr(&osq_node);
-       next = xchg(&node->next, NULL);
+       next = xchg_release(&node->next, NULL);
        if (next) {
                WRITE_ONCE(next->locked, 1);
                return;
-- 
1.7.1

Reply via email to