On Fri, 23 Jun 2017 09:37:14 +0200
Mike Galbraith <efa...@gmx.de> wrote:

> V2 changes:
>   - beautification (ymmv)
>   - enable lock stealing when waiter is queued
> 
> rtmutex: Fix lock stealing logic
> 
> 1. When trying to acquire an rtmutex, we first try to grab it without
> queueing the waiter, and explicitly check for that initial attempt
> in the !waiter path of __try_to_take_rt_mutex().  Checking whether
> the lock taker is top waiter before allowing a steal attempt in that
> path is a thinko: the lock taker has not yet blocked.
> 
> 2. It seems wrong to change the definition of rt_mutex_waiter_less()
> to mean less or perhaps equal when we have an rt_mutex_waiter_equal().
> 
> Remove the thinko, restore rt_mutex_waiter_less(), implement and use
> rt_mutex_steal() based upon rt_mutex_waiter_less/equal(), moving all
> qualification criteria into the function itself.
> 
> Signed-off-by: Mike Galbraith <efa...@gmx.de>
> ---
>  kernel/locking/rtmutex.c |   76 
> ++++++++++++++++++++++-------------------------
>  1 file changed, 37 insertions(+), 39 deletions(-)
> 
> --- a/kernel/locking/rtmutex.c
> +++ b/kernel/locking/rtmutex.c
> @@ -236,26 +236,19 @@ static inline bool unlock_rt_mutex_safe(
>  }
>  #endif
>  
> -#define STEAL_NORMAL  0
> -#define STEAL_LATERAL 1
> -
>  /*
>   * Only use with rt_mutex_waiter_{less,equal}()
>   */
> -#define task_to_waiter(p)    \
> -     &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = 
> (p)->dl.deadline }
> +#define task_to_waiter(p) &(struct rt_mutex_waiter) \
> +     { .prio = (p)->prio, .deadline = (p)->dl.deadline, .task = (p) }
>  
>  static inline int
>  rt_mutex_waiter_less(struct rt_mutex_waiter *left,
> -                  struct rt_mutex_waiter *right, int mode)
> +                  struct rt_mutex_waiter *right)
>  {
> -     if (mode == STEAL_NORMAL) {
> -             if (left->prio < right->prio)
> -                     return 1;
> -     } else {
> -             if (left->prio <= right->prio)
> -                     return 1;
> -     }
> +     if (left->prio < right->prio)
> +             return 1;
> +
>       /*
>        * If both waiters have dl_prio(), we check the deadlines of the
>        * associated tasks.
> @@ -287,6 +280,27 @@ rt_mutex_waiter_equal(struct rt_mutex_wa
>       return 1;
>  }
>  
> +#define STEAL_NORMAL  0
> +#define STEAL_LATERAL 1
> +
> +static inline int
> +rt_mutex_steal(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, int 
> mode)
> +{
> +     struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock);
> +
> +     if (waiter == top_waiter || rt_mutex_waiter_less(waiter, top_waiter))
> +             return 1;
> +
> +     /*
> +      * Note that RT tasks are excluded from lateral-steals
> +      * to prevent the introduction of an unbounded latency.
> +      */
> +     if (mode == STEAL_NORMAL || rt_task(waiter->task))
> +             return 0;
> +
> +     return rt_mutex_waiter_equal(waiter, top_waiter);
> +}
> +
>  static void
>  rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
>  {
> @@ -298,7 +312,7 @@ rt_mutex_enqueue(struct rt_mutex *lock,
>       while (*link) {
>               parent = *link;
>               entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
> -             if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) {
> +             if (rt_mutex_waiter_less(waiter, entry)) {
>                       link = &parent->rb_left;
>               } else {
>                       link = &parent->rb_right;
> @@ -337,7 +351,7 @@ rt_mutex_enqueue_pi(struct task_struct *
>       while (*link) {
>               parent = *link;
>               entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
> -             if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) {
> +             if (rt_mutex_waiter_less(waiter, entry)) {
>                       link = &parent->rb_left;
>               } else {
>                       link = &parent->rb_right;
> @@ -847,6 +861,7 @@ static int rt_mutex_adjust_prio_chain(st
>   * @task:   The task which wants to acquire the lock
>   * @waiter: The waiter that is queued to the lock's wait tree if the
>   *       callsite called task_blocked_on_lock(), otherwise NULL
> + * @mode:   Lock steal mode (STEAL_NORMAL, STEAL_LATERAL)
>   */
>  static int __try_to_take_rt_mutex(struct rt_mutex *lock,
>                                 struct task_struct *task,
> @@ -886,20 +901,16 @@ static int __try_to_take_rt_mutex(struct
>        */
>       if (waiter) {
>               /*
> -              * If waiter is not the highest priority waiter of
> -              * @lock, give up.
> +              * If waiter is not the highest priority waiter of @lock,
> +              * or its peer when lateral steal is allowed, give up.
>                */
> -             if (waiter != rt_mutex_top_waiter(lock)) {
> -                     /* XXX rt_mutex_waiter_less() ? */
> +             if (!rt_mutex_steal(lock, waiter, mode))
>                       return 0;
> -             }
> -
>               /*
>                * We can acquire the lock. Remove the waiter from the
>                * lock waiters tree.
>                */
>               rt_mutex_dequeue(lock, waiter);
> -

I liked that space.

>       } else {
>               /*
>                * If the lock has waiters already we check whether @task is
> @@ -910,25 +921,12 @@ static int __try_to_take_rt_mutex(struct
>                * not need to be dequeued.
>                */
>               if (rt_mutex_has_waiters(lock)) {
> -                     struct task_struct *pown = 
> rt_mutex_top_waiter(lock)->task;
> -
> -                     if (task != pown)
> -                             return 0;

OK, I see what happened here. Yeah, this will always be true, because
when waiter == NULL, it means that the task isn't on the lock's list
yet, and pown != task is always true.


> -
> -                     /*
> -                      * Note that RT tasks are excluded from lateral-steals
> -                      * to prevent the introduction of an unbounded latency.
> -                      */
> -                     if (rt_task(task))
> -                             mode = STEAL_NORMAL;
>                       /*
> -                      * If @task->prio is greater than or equal to
> -                      * the top waiter priority (kernel view),
> -                      * @task lost.
> +                      * If @task->prio is greater than the top waiter
> +                      * priority (kernel view), or equal to it when a
> +                      * lateral steal is forbidden, @task lost.
>                        */
> -                     if (!rt_mutex_waiter_less(task_to_waiter(task),
> -                                               rt_mutex_top_waiter(lock),
> -                                               mode))
> +                     if (!rt_mutex_steal(lock, task_to_waiter(task), mode))
>                               return 0;
>                       /*
>                        * The current top waiter stays enqueued. We

Reviewed-by: Steven Rostedt (VMware) <rost...@goodmis.org>

-- Steve

Reply via email to