3.2.68-rt99-rc1 stable review patch.
If anyone has any objections, please let me know.

------------------

From: Thomas Gleixner <t...@linutronix.de>

upstream commit: a57594a13a446d1a6ab1dcd48339f799ce586843

Add a separate local variable for the boost/deboost logic to make the
code more readable. Add comments where appropriate.

Signed-off-by: Thomas Gleixner <t...@linutronix.de>
Reviewed-by: Steven Rostedt <rost...@goodmis.org>
Signed-off-by: Steven Rostedt <rost...@goodmis.org>

Conflicts:
        kernel/rtmutex.c
---
 kernel/rtmutex.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 48 insertions(+), 9 deletions(-)

diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 7d338c8b23b9..34c63cb7ff22 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -289,9 +289,10 @@ static int rt_mutex_adjust_prio_chain(struct task_struct 
*task,
                                      struct rt_mutex_waiter *orig_waiter,
                                      struct task_struct *top_task)
 {
-       struct rt_mutex *lock;
        struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
+       struct rt_mutex_waiter *prerequeue_top_waiter;
        int detect_deadlock, ret = 0, depth = 0;
+       struct rt_mutex *lock;
        unsigned long flags;
 
        detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
@@ -398,9 +399,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct 
*task,
                goto out_unlock_pi;
        }
 
-       top_waiter = rt_mutex_top_waiter(lock);
+       /*
+        * Store the current top waiter before doing the requeue
+        * operation on @lock. We need it for the boost/deboost
+        * decision below.
+        */
+       prerequeue_top_waiter = rt_mutex_top_waiter(lock);
 
-       /* Requeue the waiter */
+       /* Requeue the waiter in the lock waiter list. */
        rt_mutex_dequeue(lock, waiter);
        waiter->list_entry.prio = task->prio;
        rt_mutex_enqueue(lock, waiter);
@@ -409,6 +415,11 @@ static int rt_mutex_adjust_prio_chain(struct task_struct 
*task,
        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
        put_task_struct(task);
 
+       /*
+        * We must abort the chain walk if there is no lock owner even
+        * in the dead lock detection case, as we have nothing to
+        * follow here. This is the end of the chain we are walking.
+        */
        if (!rt_mutex_owner(lock)) {
                struct rt_mutex_waiter *lock_top_waiter;
 
@@ -417,29 +428,48 @@ static int rt_mutex_adjust_prio_chain(struct task_struct 
*task,
                 * to wake the new top waiter up to try to get the lock.
                 */
                lock_top_waiter = rt_mutex_top_waiter(lock);
-               if (top_waiter != lock_top_waiter)
+               if (prerequeue_top_waiter != lock_top_waiter)
                        rt_mutex_wake_waiter(lock_top_waiter);
                raw_spin_unlock(&lock->wait_lock);
                return 0;
        }
 
-       /* Grab the next task */
+       /* Grab the next task, i.e. the owner of @lock */
        task = rt_mutex_owner(lock);
        get_task_struct(task);
        raw_spin_lock_irqsave(&task->pi_lock, flags);
 
        if (waiter == rt_mutex_top_waiter(lock)) {
-               /* Boost the owner */
-               rt_mutex_dequeue_pi(task, top_waiter);
+               /*
+                * The waiter became the new top (highest priority)
+                * waiter on the lock. Replace the previous top waiter
+                * in the owner tasks pi waiters list with this waiter
+                * and adjust the priority of the owner.
+                */
+               rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
                rt_mutex_enqueue_pi(task, waiter);
                __rt_mutex_adjust_prio(task);
 
-       } else if (top_waiter == waiter) {
-               /* Deboost the owner */
+       } else if (prerequeue_top_waiter == waiter) {
+               /*
+                * The waiter was the top waiter on the lock, but is
+                * no longer the top prority waiter. Replace waiter in
+                * the owner tasks pi waiters list with the new top
+                * (highest priority) waiter and adjust the priority
+                * of the owner.
+                * The new top waiter is stored in @waiter so that
+                * @waiter == @top_waiter evaluates to true below and
+                * we continue to deboost the rest of the chain.
+                */
                rt_mutex_dequeue_pi(task, waiter);
                waiter = rt_mutex_top_waiter(lock);
                rt_mutex_enqueue_pi(task, waiter);
                __rt_mutex_adjust_prio(task);
+       } else {
+               /*
+                * Nothing changed. No need to do any priority
+                * adjustment.
+                */
        }
 
        /*
@@ -452,6 +482,10 @@ static int rt_mutex_adjust_prio_chain(struct task_struct 
*task,
 
        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 
+       /*
+        * Store the top waiter of @lock for the end of chain walk
+        * decision below.
+        */
        top_waiter = rt_mutex_top_waiter(lock);
        raw_spin_unlock(&lock->wait_lock);
 
@@ -462,6 +496,11 @@ static int rt_mutex_adjust_prio_chain(struct task_struct 
*task,
        if (!next_lock)
                goto out_put_task;
 
+       /*
+        * If the current waiter is not the top waiter on the lock,
+        * then we can stop the chain walk here if we are not in full
+        * deadlock detection mode.
+        */
        if (!detect_deadlock && waiter != top_waiter)
                goto out_put_task;
 
-- 
2.1.4


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to