Before combining owner and count, we are adding two new helpers for
accessing the owner value in the rwsem.

 1) struct task_struct *rwsem_get_owner(struct rw_semaphore *sem)
 2) bool is_rwsem_reader_owned(struct rw_semaphore *sem)

Signed-off-by: Waiman Long <long...@redhat.com>
---
 kernel/locking/rwsem-xadd.c | 15 ++++++++++-----
 kernel/locking/rwsem.c      |  3 +--
 kernel/locking/rwsem.h      | 32 ++++++++++++++++++++++++++------
 3 files changed, 37 insertions(+), 13 deletions(-)

diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 71253d63c206..79d52624a00d 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -327,7 +327,7 @@ static inline bool rwsem_can_spin_on_owner(struct 
rw_semaphore *sem)
 
        preempt_disable();
        rcu_read_lock();
-       owner = READ_ONCE(sem->owner);
+       owner = rwsem_get_owner(sem);
        if (owner) {
                ret = is_rwsem_owner_spinnable(owner) &&
                     (is_rwsem_owner_reader(owner) || owner_on_cpu(owner));
@@ -359,15 +359,21 @@ enum owner_state {
 
 static noinline enum owner_state rwsem_spin_on_owner(struct rw_semaphore *sem)
 {
-       struct task_struct *owner = READ_ONCE(sem->owner);
+       struct task_struct *owner = rwsem_get_owner(sem);
        long count;
 
        if (!is_rwsem_owner_spinnable(owner))
                return OWNER_NONSPINNABLE;
 
        rcu_read_lock();
-       while (owner && !is_rwsem_owner_reader(owner)
-                    && (READ_ONCE(sem->owner) == owner)) {
+       while (owner && !is_rwsem_owner_reader(owner)) {
+               struct task_struct *new_owner = rwsem_get_owner(sem);
+
+               if (new_owner != owner) {
+                       owner = new_owner;
+                       break;  /* The owner has changed */
+               }
+
                /*
                 * Ensure we emit the owner->on_cpu, dereference _after_
                 * checking sem->owner still matches owner, if that fails,
@@ -394,7 +400,6 @@ static noinline enum owner_state rwsem_spin_on_owner(struct 
rw_semaphore *sem)
         * spinning except when here is no active locks and the handoff bit
         * is set. In this case, we have to stop spinning.
         */
-       owner = READ_ONCE(sem->owner);
        if (!is_rwsem_owner_spinnable(owner))
                return OWNER_NONSPINNABLE;
        if (owner && !is_rwsem_owner_reader(owner))
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index ccbf18f560ff..38d24676e01c 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -198,8 +198,7 @@ EXPORT_SYMBOL(down_write_killable_nested);
 
 void up_read_non_owner(struct rw_semaphore *sem)
 {
-       DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED),
-                               sem);
+       DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
        __up_read(sem);
 }
 
diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h
index c711f4323a52..789e8d270260 100644
--- a/kernel/locking/rwsem.h
+++ b/kernel/locking/rwsem.h
@@ -90,6 +90,11 @@ static inline void rwsem_clear_owner(struct rw_semaphore 
*sem)
        WRITE_ONCE(sem->owner, NULL);
 }
 
+static inline struct task_struct *rwsem_get_owner(struct rw_semaphore *sem)
+{
+       return READ_ONCE(sem->owner);
+}
+
 /*
  * The task_struct pointer of the last owning reader will be left in
  * the owner field.
@@ -134,6 +139,23 @@ static inline bool is_rwsem_spinnable(struct rw_semaphore 
*sem)
        return is_rwsem_owner_spinnable(READ_ONCE(sem->owner));
 }
 
+/*
+ * Return true if the rwsem is owned by a reader.
+ */
+static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
+{
+#ifdef CONFIG_DEBUG_RWSEMS
+       /*
+        * Check the count to see if it is write-locked.
+        */
+       long count = atomic_long_read(&sem->count);
+
+       if (count & RWSEM_WRITER_MASK)
+               return false;
+#endif
+       return (unsigned long)sem->owner & RWSEM_READER_OWNED;
+}
+
 /*
  * Return true if rwsem is owned by an anonymous writer or readers.
  */
@@ -154,6 +176,7 @@ static inline void rwsem_clear_reader_owned(struct 
rw_semaphore *sem)
 {
        unsigned long val = (unsigned long)current | RWSEM_READER_OWNED
                                                   | RWSEM_ANONYMOUSLY_OWNED;
+
        if (READ_ONCE(sem->owner) == (struct task_struct *)val)
                cmpxchg_relaxed((unsigned long *)&sem->owner, val,
                                RWSEM_READER_OWNED | RWSEM_ANONYMOUSLY_OWNED);
@@ -216,8 +239,7 @@ static inline void __down_read(struct rw_semaphore *sem)
        if (unlikely(atomic_long_fetch_add_acquire(RWSEM_READER_BIAS,
                        &sem->count) & RWSEM_READ_FAILED_MASK)) {
                rwsem_down_read_failed(sem);
-               DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
-                                       RWSEM_READER_OWNED), sem);
+               DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
        } else {
                rwsem_set_reader_owned(sem);
        }
@@ -229,8 +251,7 @@ static inline int __down_read_killable(struct rw_semaphore 
*sem)
                        &sem->count) & RWSEM_READ_FAILED_MASK)) {
                if (IS_ERR(rwsem_down_read_failed_killable(sem)))
                        return -EINTR;
-               DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
-                                       RWSEM_READER_OWNED), sem);
+               DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
        } else {
                rwsem_set_reader_owned(sem);
        }
@@ -297,8 +318,7 @@ static inline void __up_read(struct rw_semaphore *sem)
 {
        long tmp;
 
-       DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED),
-                               sem);
+       DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
        rwsem_clear_reader_owned(sem);
        tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
        if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS))
-- 
2.18.1

Reply via email to