On Wed, Mar 22, 2017 at 11:35:56AM +0100, Peter Zijlstra wrote:
> Since there's already two copies of this code, introduce a helper now
> before we get a third instance.
> 
> Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>


An easy one!

Reviewed-by: Darren Hart (VMware) <dvh...@infradead.org>

> ---
>  kernel/futex.c                  |    5 +----
>  kernel/locking/rtmutex.c        |   12 +++++++++---
>  kernel/locking/rtmutex_common.h |    1 +
>  3 files changed, 11 insertions(+), 7 deletions(-)
> 
> --- a/kernel/futex.c
> +++ b/kernel/futex.c
> @@ -2956,10 +2956,7 @@ static int futex_wait_requeue_pi(u32 __u
>        * The waiter is allocated on our stack, manipulated by the requeue
>        * code while we sleep on uaddr.
>        */
> -     debug_rt_mutex_init_waiter(&rt_waiter);
> -     RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
> -     RB_CLEAR_NODE(&rt_waiter.tree_entry);
> -     rt_waiter.task = NULL;
> +     rt_mutex_init_waiter(&rt_waiter);
>  
>       ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
>       if (unlikely(ret != 0))
> --- a/kernel/locking/rtmutex.c
> +++ b/kernel/locking/rtmutex.c
> @@ -1153,6 +1153,14 @@ void rt_mutex_adjust_pi(struct task_stru
>                                  next_lock, NULL, task);
>  }
>  
> +void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
> +{
> +     debug_rt_mutex_init_waiter(waiter);
> +     RB_CLEAR_NODE(&waiter->pi_tree_entry);
> +     RB_CLEAR_NODE(&waiter->tree_entry);
> +     waiter->task = NULL;
> +}
> +
>  /**
>   * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
>   * @lock:             the rt_mutex to take
> @@ -1235,9 +1243,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
>       unsigned long flags;
>       int ret = 0;
>  
> -     debug_rt_mutex_init_waiter(&waiter);
> -     RB_CLEAR_NODE(&waiter.pi_tree_entry);
> -     RB_CLEAR_NODE(&waiter.tree_entry);
> +     rt_mutex_init_waiter(&waiter);

Verified that despite not assigning waiter.task to NULL here, it does no harm to
do so as it is initialized by task_blocks_on_rt_mutex before it is referenced.

>  
>       /*
>        * Technically we could use raw_spin_[un]lock_irq() here, but this can
> --- a/kernel/locking/rtmutex_common.h
> +++ b/kernel/locking/rtmutex_common.h
> @@ -103,6 +103,7 @@ extern void rt_mutex_init_proxy_locked(s
>                                      struct task_struct *proxy_owner);
>  extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
>                                 struct task_struct *proxy_owner);
> +extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
>  extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
>                                    struct rt_mutex_waiter *waiter,
>                                    struct task_struct *task);
> 
> 
> 

-- 
Darren Hart
VMware Open Source Technology Center

Reply via email to