This patch modifies rwsem_spin_on_owner() to return four possible
values to better reflect the state of lock holder which enables us to
make a better decision of what to do next.

Signed-off-by: Waiman Long <long...@redhat.com>
---
 kernel/locking/rwsem.c | 65 ++++++++++++++++++++++++++++++------------
 1 file changed, 47 insertions(+), 18 deletions(-)

diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 15480aa6d442..9ec25ef4e7b7 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -414,17 +414,54 @@ static inline bool rwsem_can_spin_on_owner(struct 
rw_semaphore *sem)
 }
 
 /*
- * Return true only if we can still spin on the owner field of the rwsem.
+ * The rwsem_spin_on_owner() function returns the folowing 4 values
+ * depending on the lock owner state.
+ *   OWNER_NULL  : owner is currently NULL
+ *   OWNER_WRITER: when owner changes and is a writer
+ *   OWNER_READER: when owner changes and the new owner may be a reader.
+ *   OWNER_NONSPINNABLE:
+ *                when optimistic spinning has to stop because either the
+ *                owner stops running, is unknown, or its timeslice has
+ *                been used up.
  */
-static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
+enum owner_state {
+       OWNER_NULL              = 1 << 0,
+       OWNER_WRITER            = 1 << 1,
+       OWNER_READER            = 1 << 2,
+       OWNER_NONSPINNABLE      = 1 << 3,
+};
+#define OWNER_SPINNABLE                (OWNER_NULL | OWNER_WRITER)
+
+static inline enum owner_state rwsem_owner_state(unsigned long owner)
 {
-       struct task_struct *owner = READ_ONCE(sem->owner);
+       if (!owner)
+               return OWNER_NULL;
 
-       if (!is_rwsem_owner_spinnable(owner))
-               return false;
+       if (owner & RWSEM_ANONYMOUSLY_OWNED)
+               return OWNER_NONSPINNABLE;
+
+       if (owner & RWSEM_READER_OWNED)
+               return OWNER_READER;
+
+       return OWNER_WRITER;
+}
+
+static noinline enum owner_state rwsem_spin_on_owner(struct rw_semaphore *sem)
+{
+       struct task_struct *tmp, *owner = READ_ONCE(sem->owner);
+       enum owner_state state = rwsem_owner_state((unsigned long)owner);
+
+       if (state != OWNER_WRITER)
+               return state;
 
        rcu_read_lock();
-       while (owner && (READ_ONCE(sem->owner) == owner)) {
+       for (;;) {
+               tmp = READ_ONCE(sem->owner);
+               if (tmp != owner) {
+                       state = rwsem_owner_state((unsigned long)tmp);
+                       break;
+               }
+
                /*
                 * Ensure we emit the owner->on_cpu, dereference _after_
                 * checking sem->owner still matches owner, if that fails,
@@ -433,24 +470,16 @@ static noinline bool rwsem_spin_on_owner(struct 
rw_semaphore *sem)
                 */
                barrier();
 
-               /*
-                * abort spinning when need_resched or owner is not running or
-                * owner's cpu is preempted.
-                */
                if (need_resched() || !owner_on_cpu(owner)) {
-                       rcu_read_unlock();
-                       return false;
+                       state = OWNER_NONSPINNABLE;
+                       break;
                }
 
                cpu_relax();
        }
        rcu_read_unlock();
 
-       /*
-        * If there is a new owner or the owner is not set, we continue
-        * spinning.
-        */
-       return is_rwsem_owner_spinnable(READ_ONCE(sem->owner));
+       return state;
 }
 
 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
@@ -473,7 +502,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
         *  2) readers own the lock as we can't determine if they are
         *     actively running or not.
         */
-       while (rwsem_spin_on_owner(sem)) {
+       while (rwsem_spin_on_owner(sem) & OWNER_SPINNABLE) {
                /*
                 * Try to acquire the lock
                 */
-- 
2.18.1

Reply via email to