Currently cpudl_find() returns the best cpu that means it has the maximum dl, however, the value is already kept in later_mask and the return value is not referred directly any more.
Now, it's enough to return whether CPUs were found or not, like rt. Signed-off-by: Byungchul Park <byungchul.p...@lge.com> --- kernel/sched/cpudeadline.c | 13 +++++-------- kernel/sched/deadline.c | 6 +++--- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 6b67016..28d057f 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -119,18 +119,16 @@ static inline int cpudl_maximum(struct cpudl *cp) * @p: the task * @later_mask: a mask to fill in with the selected CPUs (or NULL) * - * Returns: int - best CPU (heap maximum if suitable) + * Returns: (int)bool - CPUs were found */ int cpudl_find(struct cpudl *cp, struct task_struct *p, struct cpumask *later_mask) { - int best_cpu = -1; const struct sched_dl_entity *dl_se = &p->dl; if (later_mask && cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) { - best_cpu = cpumask_any(later_mask); - goto out; + return 1; } else { u64 cpudl_dl; int cpudl_cpu; @@ -157,14 +155,13 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, if (cpudl_valid != IDX_INVALID && cpumask_test_cpu(cpudl_cpu, &p->cpus_allowed) && dl_time_before(dl_se->deadline, cpudl_dl)) { - best_cpu = cpudl_cpu; if (later_mask) - cpumask_set_cpu(best_cpu, later_mask); + cpumask_set_cpu(cpudl_cpu, later_mask); + return 1; } } -out: - return best_cpu; + return 0; } /* diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 9d997d9..0223694 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1107,7 +1107,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) * let's hope p can move out. */ if (rq->curr->nr_cpus_allowed == 1 || - cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) + !cpudl_find(&rq->rd->cpudl, rq->curr, NULL)) return; /* @@ -1115,7 +1115,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) * see if it is pushed or pulled somewhere else. */ if (p->nr_cpus_allowed != 1 && - cpudl_find(&rq->rd->cpudl, p, NULL) != -1) + cpudl_find(&rq->rd->cpudl, p, NULL)) return; resched_curr(rq); @@ -1337,7 +1337,7 @@ static int find_later_rq(struct task_struct *task) * We have to consider system topology and task affinity * first, then we can look for a suitable cpu. */ - if (cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask) == -1) + if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) return -1; /* -- 1.9.1