On 05/18/2018 12:55 PM, Oleg Nesterov wrote:
> Add the trivial owner_on_cpu() helper for rwsem_can_spin_on_owner() and
> rwsem_spin_on_owner(), it also allows to make rwsem_can_spin_on_owner()
> a bit more clear.
>
> Signed-off-by: Oleg Nesterov <[email protected]>
> ---
>  kernel/locking/rwsem-xadd.c | 25 +++++++++++++------------
>  1 file changed, 13 insertions(+), 12 deletions(-)
>
> diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
> index a903367..3064c50 100644
> --- a/kernel/locking/rwsem-xadd.c
> +++ b/kernel/locking/rwsem-xadd.c
> @@ -347,6 +347,15 @@ static inline bool rwsem_try_write_lock_unqueued(struct 
> rw_semaphore *sem)
>       }
>  }
>  
> +static inline bool owner_on_cpu(struct task_struct *owner)
> +{
> +     /*
> +      * As lock holder preemption issue, we both skip spinning if
> +      * task is not on cpu or its cpu is preempted
> +      */
> +     return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
> +}
> +
>  static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
>  {
>       struct task_struct *owner;
> @@ -359,17 +368,10 @@ static inline bool rwsem_can_spin_on_owner(struct 
> rw_semaphore *sem)
>  
>       rcu_read_lock();
>       owner = READ_ONCE(sem->owner);
> -     if (!owner || !is_rwsem_owner_spinnable(owner)) {
> -             ret = !owner;   /* !owner is spinnable */
> -             goto done;
> +     if (owner) {
> +             ret = is_rwsem_owner_spinnable(owner) &&
> +                   owner_on_cpu(owner);
>       }
> -
> -     /*
> -      * As lock holder preemption issue, we both skip spinning if task is not
> -      * on cpu or its cpu is preempted
> -      */
> -     ret = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
> -done:
>       rcu_read_unlock();
>       return ret;
>  }
> @@ -398,8 +400,7 @@ static noinline bool rwsem_spin_on_owner(struct 
> rw_semaphore *sem)
>                * abort spinning when need_resched or owner is not running or
>                * owner's cpu is preempted.
>                */
> -             if (!owner->on_cpu || need_resched() ||
> -                             vcpu_is_preempted(task_cpu(owner))) {
> +             if (need_resched() || !owner_on_cpu(owner)) {
>                       rcu_read_unlock();
>                       return false;
>               }

Acked-by: Waiman Long <[email protected]>

Reply via email to