On 64-bit architectures, each rwsem writer will have its unique lock word for acquiring the lock. Right now, the writer code recomputes the lock word every time it tries to acquire the lock. This is a waste of time. The lock word is now cached and reused when it is needed.
On 32-bit architectures, the extra constant argument to rwsem_try_write_lock() and rwsem_try_write_lock_unqueued() should be optimized out by the compiler. Signed-off-by: Waiman Long <long...@redhat.com> --- kernel/locking/rwsem-xadd.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 8d352c375e60..87348b031b85 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c @@ -244,8 +244,8 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem, * race conditions between checking the rwsem wait list and setting the * sem->count accordingly. */ -static inline bool -rwsem_try_write_lock(long count, struct rw_semaphore *sem, bool first) +static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem, + const long wlock, bool first) { long new; @@ -255,7 +255,7 @@ rwsem_try_write_lock(long count, struct rw_semaphore *sem, bool first) if (!first && RWSEM_COUNT_HANDOFF(count)) return false; - new = (count & ~RWSEM_FLAG_HANDOFF) + RWSEM_WRITER_LOCKED - + new = (count & ~RWSEM_FLAG_HANDOFF) + wlock - (list_is_singular(&sem->wait_list) ? RWSEM_FLAG_WAITERS : 0); if (atomic_long_try_cmpxchg_acquire(&sem->count, &count, new)) { @@ -294,13 +294,14 @@ static inline bool rwsem_try_read_lock_unqueued(struct rw_semaphore *sem) /* * Try to acquire write lock before the writer has been put on wait queue. */ -static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) +static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem, + const long wlock) { long count = atomic_long_read(&sem->count); while (!RWSEM_COUNT_LOCKED_OR_HANDOFF(count)) { if (atomic_long_try_cmpxchg_acquire(&sem->count, &count, - count + RWSEM_WRITER_LOCKED)) { + count + wlock)) { rwsem_set_owner(sem); lockevent_inc(rwsem_opt_wlock); return true; @@ -416,7 +417,7 @@ static noinline enum owner_state rwsem_spin_on_owner(struct rw_semaphore *sem) return !owner ? OWNER_NULL : OWNER_READER; } -static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock) +static bool rwsem_optimistic_spin(struct rw_semaphore *sem, const long wlock) { bool taken = false; bool prev_not_writer = false; @@ -449,7 +450,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock) /* * Try to acquire the lock */ - taken = wlock ? rwsem_try_write_lock_unqueued(sem) + taken = wlock ? rwsem_try_write_lock_unqueued(sem, wlock) : rwsem_try_read_lock_unqueued(sem); if (taken) @@ -531,7 +532,8 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) return false; } -static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock) +static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem, + const long wlock) { return false; } @@ -565,7 +567,7 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state) */ atomic_long_add(-RWSEM_READER_BIAS, &sem->count); adjustment = 0; - if (rwsem_optimistic_spin(sem, false)) { + if (rwsem_optimistic_spin(sem, 0)) { unsigned long flags; /* @@ -681,10 +683,11 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state) struct rwsem_waiter waiter; struct rw_semaphore *ret = sem; DEFINE_WAKE_Q(wake_q); + const long wlock = RWSEM_WRITER_LOCKED; /* do optimistic spinning and steal lock if possible */ if (rwsem_can_spin_on_owner(sem) && - rwsem_optimistic_spin(sem, true)) + rwsem_optimistic_spin(sem, wlock)) return sem; /* @@ -743,7 +746,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state) /* wait until we successfully acquire the lock */ set_current_state(state); while (true) { - if (rwsem_try_write_lock(count, sem, first)) + if (rwsem_try_write_lock(count, sem, wlock, first)) break; raw_spin_unlock_irq(&sem->wait_lock); -- 2.18.1