Move the rwsem_down_read_failed() function down to below the optimistic
spinning section before enabling optimistic spinning for the readers.
There is no change in code.

Signed-off-by: Waiman Long <waiman.l...@hp.com>
---
 kernel/locking/rwsem-xadd.c |   96 +++++++++++++++++++++---------------------
 1 files changed, 48 insertions(+), 48 deletions(-)

diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 1281de8..aa03479 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -239,54 +239,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, enum 
rwsem_wake_type wake_type)
        return sem;
 }
 
-/*
- * Wait for the read lock to be granted
- */
-__visible
-struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
-{
-       long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
-       struct rwsem_waiter waiter;
-       struct task_struct *tsk = current;
-
-       /* set up my own style of waitqueue */
-       waiter.task = tsk;
-       waiter.type = RWSEM_WAITING_FOR_READ;
-       get_task_struct(tsk);
-
-       raw_spin_lock_irq(&sem->wait_lock);
-       if (list_empty(&sem->wait_list))
-               adjustment += RWSEM_WAITING_BIAS;
-       list_add_tail(&waiter.list, &sem->wait_list);
-
-       /* we're now waiting on the lock, but no longer actively locking */
-       count = rwsem_atomic_update(adjustment, sem);
-
-       /* If there are no active locks, wake the front queued process(es).
-        *
-        * If there are no writers and we are first in the queue,
-        * wake our own waiter to join the existing active readers !
-        */
-       if (count == RWSEM_WAITING_BIAS ||
-           (count > RWSEM_WAITING_BIAS &&
-            adjustment != -RWSEM_ACTIVE_READ_BIAS))
-               sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
-
-       raw_spin_unlock_irq(&sem->wait_lock);
-
-       /* wait to be given the lock */
-       while (true) {
-               set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-               if (!waiter.task)
-                       break;
-               schedule();
-       }
-
-       tsk->state = TASK_RUNNING;
-
-       return sem;
-}
-
 static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
 {
        if (!(count & RWSEM_ACTIVE_MASK)) {
@@ -465,6 +417,54 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
 #endif
 
 /*
+ * Wait for the read lock to be granted
+ */
+__visible
+struct rw_semaphore __sched * rwsem_down_read_failed(struct rw_semaphore *sem)
+{
+       long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
+       struct rwsem_waiter waiter;
+       struct task_struct *tsk = current;
+
+       /* set up my own style of waitqueue */
+       waiter.task = tsk;
+       waiter.type = RWSEM_WAITING_FOR_READ;
+       get_task_struct(tsk);
+
+       raw_spin_lock_irq(&sem->wait_lock);
+       if (list_empty(&sem->wait_list))
+               adjustment += RWSEM_WAITING_BIAS;
+       list_add_tail(&waiter.list, &sem->wait_list);
+
+       /* we're now waiting on the lock, but no longer actively locking */
+       count = rwsem_atomic_update(adjustment, sem);
+
+       /* If there are no active locks, wake the front queued process(es).
+        *
+        * If there are no writers and we are first in the queue,
+        * wake our own waiter to join the existing active readers !
+        */
+       if (count == RWSEM_WAITING_BIAS ||
+           (count > RWSEM_WAITING_BIAS &&
+            adjustment != -RWSEM_ACTIVE_READ_BIAS))
+               sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
+
+       raw_spin_unlock_irq(&sem->wait_lock);
+
+       /* wait to be given the lock */
+       while (true) {
+               set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+               if (!waiter.task)
+                       break;
+               schedule();
+       }
+
+       tsk->state = TASK_RUNNING;
+
+       return sem;
+}
+
+/*
  * Wait until we successfully acquire the write lock
  */
 __visible
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to