find_lock_lowest_rq may or not releease rq lock, but it is fuzzy. If not releasing rq lock, it is unnecessary to re-call pick_next_oushable_task.
Signed-off-by: Peng Hao <peng.h...@zte.com.cn> --- kernel/sched/rt.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 2e2955a..4d7d322 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1719,6 +1719,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) { struct rq *lowest_rq = NULL; int tries; + bool release_lock = false; int cpu; for (tries = 0; tries < RT_MAX_TRIES; tries++) { @@ -1741,6 +1742,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) /* if the prio of this runqueue changed, try again */ if (double_lock_balance(rq, lowest_rq)) { + release_lock = true; /* * We had to unlock the run queue. In * the mean time, task could have @@ -1768,6 +1770,8 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) lowest_rq = NULL; } + if (!lowest_rq && !release_lock) + lowest_rq = (void *) -1; return lowest_rq; } @@ -1863,6 +1867,9 @@ static int push_rt_task(struct rq *rq) goto retry; } + if (lowest_rq == (void *) -1) + goto out; + deactivate_task(rq, next_task, 0); set_task_cpu(next_task, lowest_rq->cpu); activate_task(lowest_rq, next_task, 0); -- 1.8.3.1