On Sat, Apr 13, 2019 at 01:22:47PM -0400, Waiman Long wrote: > +#define RWSEM_COUNT_LOCKED(c) ((c) & RWSEM_LOCK_MASK)
The above doesn't seem to make it more readable or shorter. --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c @@ -192,7 +192,7 @@ static inline bool rwsem_try_write_lock( { long new; - if (RWSEM_COUNT_LOCKED(count)) + if (count & RWSEM_LOCK_MASK) return false; new = count + RWSEM_WRITER_LOCKED - @@ -214,7 +214,7 @@ static inline bool rwsem_try_write_lock_ { long count = atomic_long_read(&sem->count); - while (!RWSEM_COUNT_LOCKED(count)) { + while (!(count & RWSEM_LOCK_MASK)) { if (atomic_long_try_cmpxchg_acquire(&sem->count, &count, count + RWSEM_WRITER_LOCKED)) { rwsem_set_owner(sem); @@ -393,7 +393,7 @@ __rwsem_down_read_failed_common(struct r * If there are no writers and we are first in the queue, * wake our own waiter to join the existing active readers ! */ - if (!RWSEM_COUNT_LOCKED(count) || + if (!(count & RWSEM_LOCK_MASK) || (!(count & RWSEM_WRITER_MASK) && (adjustment & RWSEM_FLAG_WAITERS))) __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); @@ -522,7 +522,7 @@ __rwsem_down_write_failed_common(struct lockevent_inc(rwsem_sleep_writer); set_current_state(state); count = atomic_long_read(&sem->count); - } while (RWSEM_COUNT_LOCKED(count)); + } while (count & RWSEM_LOCK_MASK); raw_spin_lock_irq(&sem->wait_lock); }