Convert the rwsem count variable to an atomic_long_t since we use it
as an atomic variable. This also allows us to remove the
rwsem_atomic_{add,update} "abstraction" which would now be an unnecesary
level of indirection. In follow up patches, we also remove the
rwsem_atomic_{add,update} definitions across the various architectures.

Suggested-by: Peter Zijlstra <pet...@infradead.org>
Signed-off-by: Jason Low <jason.l...@hpe.com>
---
 include/linux/rwsem.h       |  6 +++---
 kernel/locking/rwsem-xadd.c | 31 ++++++++++++++++---------------
 2 files changed, 19 insertions(+), 18 deletions(-)

diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index d1c12d1..e3d5a00 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -26,7 +26,7 @@ struct rw_semaphore;
 #else
 /* All arch specific implementations share the same struct */
 struct rw_semaphore {
-       long count;
+       atomic_long_t count;
        struct list_head wait_list;
        raw_spinlock_t wait_lock;
 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
@@ -54,7 +54,7 @@ extern struct rw_semaphore *rwsem_downgrade_wake(struct 
rw_semaphore *sem);
 /* In all implementations count != 0 means locked */
 static inline int rwsem_is_locked(struct rw_semaphore *sem)
 {
-       return sem->count != 0;
+       return atomic_long_read(&sem->count) != 0;
 }
 
 #endif
@@ -74,7 +74,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
 #endif
 
 #define __RWSEM_INITIALIZER(name)                              \
-       { .count = RWSEM_UNLOCKED_VALUE,                        \
+       { .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE),      \
          .wait_list = LIST_HEAD_INIT((name).wait_list),        \
          .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
          __RWSEM_OPT_INIT(name)                                \
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 296d421..d5ecec3 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -80,7 +80,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
        debug_check_no_locks_freed((void *)sem, sizeof(*sem));
        lockdep_init_map(&sem->dep_map, name, key, 0);
 #endif
-       sem->count = RWSEM_UNLOCKED_VALUE;
+       atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
        raw_spin_lock_init(&sem->wait_lock);
        INIT_LIST_HEAD(&sem->wait_list);
 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
@@ -146,10 +146,11 @@ __rwsem_do_wake(struct rw_semaphore *sem, enum 
rwsem_wake_type wake_type)
        if (wake_type != RWSEM_WAKE_READ_OWNED) {
                adjustment = RWSEM_ACTIVE_READ_BIAS;
  try_reader_grant:
-               oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
+               oldcount = atomic_long_add_return(adjustment, &sem->count) - 
adjustment;
+
                if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
                        /* A writer stole the lock. Undo our reader grant. */
-                       if (rwsem_atomic_update(-adjustment, sem) &
+                       if (atomic_long_sub_return(adjustment, &sem->count) &
                                                RWSEM_ACTIVE_MASK)
                                goto out;
                        /* Last active locker left. Retry waking readers. */
@@ -179,7 +180,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, enum 
rwsem_wake_type wake_type)
                adjustment -= RWSEM_WAITING_BIAS;
 
        if (adjustment)
-               rwsem_atomic_add(adjustment, sem);
+               atomic_long_add(adjustment, &sem->count);
 
        next = sem->wait_list.next;
        loop = woken;
@@ -228,7 +229,7 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct 
rw_semaphore *sem)
        list_add_tail(&waiter.list, &sem->wait_list);
 
        /* we're now waiting on the lock, but no longer actively locking */
-       count = rwsem_atomic_update(adjustment, sem);
+       count = atomic_long_add_return(adjustment, &sem->count);
 
        /* If there are no active locks, wake the front queued process(es).
         *
@@ -276,7 +277,8 @@ static inline bool rwsem_try_write_lock(long count, struct 
rw_semaphore *sem)
                        RWSEM_ACTIVE_WRITE_BIAS :
                        RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS;
 
-       if (cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count) == 
RWSEM_WAITING_BIAS) {
+       if (atomic_long_cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count)
+                                                       == RWSEM_WAITING_BIAS) {
                rwsem_set_owner(sem);
                return true;
        }
@@ -290,13 +292,13 @@ static inline bool rwsem_try_write_lock(long count, 
struct rw_semaphore *sem)
  */
 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
 {
-       long old, count = READ_ONCE(sem->count);
+       long old, count = atomic_long_read(&sem->count);
 
        while (true) {
                if (!(count == 0 || count == RWSEM_WAITING_BIAS))
                        return false;
 
-               old = cmpxchg_acquire(&sem->count, count,
+               old = atomic_long_cmpxchg_acquire(&sem->count, count,
                                      count + RWSEM_ACTIVE_WRITE_BIAS);
                if (old == count) {
                        rwsem_set_owner(sem);
@@ -318,7 +320,7 @@ static inline bool rwsem_can_spin_on_owner(struct 
rw_semaphore *sem)
        rcu_read_lock();
        owner = READ_ONCE(sem->owner);
        if (!owner) {
-               long count = READ_ONCE(sem->count);
+               long count = atomic_long_read(&sem->count);
                /*
                 * If sem->owner is not set, yet we have just recently entered 
the
                 * slowpath with the lock being active, then there is a 
possibility
@@ -369,7 +371,7 @@ bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct 
task_struct *owner)
         * held by readers. Check the counter to verify the
         * state.
         */
-       count = READ_ONCE(sem->count);
+       count = atomic_long_read(&sem->count);
        return (count == 0 || count == RWSEM_WAITING_BIAS);
 }
 
@@ -453,7 +455,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, 
int state)
        struct rw_semaphore *ret = sem;
 
        /* undo write bias from down_write operation, stop active locking */
-       count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem);
+       count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
 
        /* do optimistic spinning and steal lock if possible */
        if (rwsem_optimistic_spin(sem))
@@ -476,7 +478,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, 
int state)
 
        /* we're now waiting on the lock, but no longer actively locking */
        if (waiting) {
-               count = READ_ONCE(sem->count);
+               count = atomic_long_read(&sem->count);
 
                /*
                 * If there were already threads queued before us and there are
@@ -485,9 +487,8 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, 
int state)
                 */
                if (count > RWSEM_WAITING_BIAS)
                        sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS);
-
        } else
-               count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
+               count = atomic_long_add_return(RWSEM_WAITING_BIAS, &sem->count);
 
        /* wait until we successfully acquire the lock */
        set_current_state(state);
@@ -503,7 +504,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, 
int state)
 
                        schedule();
                        set_current_state(state);
-               } while ((count = sem->count) & RWSEM_ACTIVE_MASK);
+               } while ((count = atomic_long_read(&sem->count)) & 
RWSEM_ACTIVE_MASK);
 
                raw_spin_lock_irq(&sem->wait_lock);
        }
-- 
2.1.4

Reply via email to