tsk_nr_cpus_allowed() too is a pretty pointless wrapper that
is not used consistently and which makes the code both harder
to read and longer as well.

So remove it - this also shrinks <linux/sched.h> a bit.

Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Mike Galbraith <efa...@gmx.de>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 include/linux/sched.h   |  5 -----
 kernel/sched/core.c     |  2 +-
 kernel/sched/deadline.c | 28 ++++++++++++++--------------
 kernel/sched/rt.c       | 24 ++++++++++++------------
 4 files changed, 27 insertions(+), 32 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 33ea1897bcbc..c570e5e35ab2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -962,11 +962,6 @@ struct task_struct {
  */
 };
 
-static inline int tsk_nr_cpus_allowed(struct task_struct *p)
-{
-       return p->nr_cpus_allowed;
-}
-
 static inline struct pid *task_pid(struct task_struct *task)
 {
        return task->pids[PIDTYPE_PID].pid;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4e12e7cedabb..9f3f8c2b6046 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1550,7 +1550,7 @@ int select_task_rq(struct task_struct *p, int cpu, int 
sd_flags, int wake_flags)
 {
        lockdep_assert_held(&p->pi_lock);
 
-       if (tsk_nr_cpus_allowed(p) > 1)
+       if (p->nr_cpus_allowed > 1)
                cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, 
wake_flags);
        else
                cpu = cpumask_any(&p->cpus_allowed);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 8e4d6e4e3ccc..99b2c33a9fbc 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -134,7 +134,7 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, 
struct dl_rq *dl_rq)
 {
        struct task_struct *p = dl_task_of(dl_se);
 
-       if (tsk_nr_cpus_allowed(p) > 1)
+       if (p->nr_cpus_allowed > 1)
                dl_rq->dl_nr_migratory++;
 
        update_dl_migration(dl_rq);
@@ -144,7 +144,7 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, 
struct dl_rq *dl_rq)
 {
        struct task_struct *p = dl_task_of(dl_se);
 
-       if (tsk_nr_cpus_allowed(p) > 1)
+       if (p->nr_cpus_allowed > 1)
                dl_rq->dl_nr_migratory--;
 
        update_dl_migration(dl_rq);
@@ -958,7 +958,7 @@ static void enqueue_task_dl(struct rq *rq, struct 
task_struct *p, int flags)
 
        enqueue_dl_entity(&p->dl, pi_se, flags);
 
-       if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
+       if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
                enqueue_pushable_dl_task(rq, p);
 }
 
@@ -1032,9 +1032,9 @@ select_task_rq_dl(struct task_struct *p, int cpu, int 
sd_flag, int flags)
         * try to make it stay here, it might be important.
         */
        if (unlikely(dl_task(curr)) &&
-           (tsk_nr_cpus_allowed(curr) < 2 ||
+           (curr->nr_cpus_allowed < 2 ||
             !dl_entity_preempt(&p->dl, &curr->dl)) &&
-           (tsk_nr_cpus_allowed(p) > 1)) {
+           (p->nr_cpus_allowed > 1)) {
                int target = find_later_rq(p);
 
                if (target != -1 &&
@@ -1055,7 +1055,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct 
task_struct *p)
         * Current can't be migrated, useless to reschedule,
         * let's hope p can move out.
         */
-       if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
+       if (rq->curr->nr_cpus_allowed == 1 ||
            cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
                return;
 
@@ -1063,7 +1063,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct 
task_struct *p)
         * p is migratable, so let's not schedule it and
         * see if it is pushed or pulled somewhere else.
         */
-       if (tsk_nr_cpus_allowed(p) != 1 &&
+       if (p->nr_cpus_allowed != 1 &&
            cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
                return;
 
@@ -1178,7 +1178,7 @@ static void put_prev_task_dl(struct rq *rq, struct 
task_struct *p)
 {
        update_curr_dl(rq);
 
-       if (on_dl_rq(&p->dl) && tsk_nr_cpus_allowed(p) > 1)
+       if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
                enqueue_pushable_dl_task(rq, p);
 }
 
@@ -1279,7 +1279,7 @@ static int find_later_rq(struct task_struct *task)
        if (unlikely(!later_mask))
                return -1;
 
-       if (tsk_nr_cpus_allowed(task) == 1)
+       if (task->nr_cpus_allowed == 1)
                return -1;
 
        /*
@@ -1424,7 +1424,7 @@ static struct task_struct 
*pick_next_pushable_dl_task(struct rq *rq)
 
        BUG_ON(rq->cpu != task_cpu(p));
        BUG_ON(task_current(rq, p));
-       BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
+       BUG_ON(p->nr_cpus_allowed <= 1);
 
        BUG_ON(!task_on_rq_queued(p));
        BUG_ON(!dl_task(p));
@@ -1463,7 +1463,7 @@ static int push_dl_task(struct rq *rq)
         */
        if (dl_task(rq->curr) &&
            dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
-           tsk_nr_cpus_allowed(rq->curr) > 1) {
+           rq->curr->nr_cpus_allowed > 1) {
                resched_curr(rq);
                return 0;
        }
@@ -1610,9 +1610,9 @@ static void task_woken_dl(struct rq *rq, struct 
task_struct *p)
 {
        if (!task_running(rq, p) &&
            !test_tsk_need_resched(rq->curr) &&
-           tsk_nr_cpus_allowed(p) > 1 &&
+           p->nr_cpus_allowed > 1 &&
            dl_task(rq->curr) &&
-           (tsk_nr_cpus_allowed(rq->curr) < 2 ||
+           (rq->curr->nr_cpus_allowed < 2 ||
             !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
                push_dl_tasks(rq);
        }
@@ -1726,7 +1726,7 @@ static void switched_to_dl(struct rq *rq, struct 
task_struct *p)
 
        if (rq->curr != p) {
 #ifdef CONFIG_SMP
-               if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded)
+               if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
                        queue_push_tasks(rq);
 #endif
                if (dl_task(rq->curr))
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index cbd356f63883..9f3e40226dec 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -335,7 +335,7 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, 
struct rt_rq *rt_rq)
        rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 
        rt_rq->rt_nr_total++;
-       if (tsk_nr_cpus_allowed(p) > 1)
+       if (p->nr_cpus_allowed > 1)
                rt_rq->rt_nr_migratory++;
 
        update_rt_migration(rt_rq);
@@ -352,7 +352,7 @@ static void dec_rt_migration(struct sched_rt_entity *rt_se, 
struct rt_rq *rt_rq)
        rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 
        rt_rq->rt_nr_total--;
-       if (tsk_nr_cpus_allowed(p) > 1)
+       if (p->nr_cpus_allowed > 1)
                rt_rq->rt_nr_migratory--;
 
        update_rt_migration(rt_rq);
@@ -1324,7 +1324,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int 
flags)
 
        enqueue_rt_entity(rt_se, flags);
 
-       if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
+       if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
                enqueue_pushable_task(rq, p);
 }
 
@@ -1413,7 +1413,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int 
sd_flag, int flags)
         * will have to sort it out.
         */
        if (curr && unlikely(rt_task(curr)) &&
-           (tsk_nr_cpus_allowed(curr) < 2 ||
+           (curr->nr_cpus_allowed < 2 ||
             curr->prio <= p->prio)) {
                int target = find_lowest_rq(p);
 
@@ -1437,7 +1437,7 @@ static void check_preempt_equal_prio(struct rq *rq, 
struct task_struct *p)
         * Current can't be migrated, useless to reschedule,
         * let's hope p can move out.
         */
-       if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
+       if (rq->curr->nr_cpus_allowed == 1 ||
            !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
                return;
 
@@ -1445,7 +1445,7 @@ static void check_preempt_equal_prio(struct rq *rq, 
struct task_struct *p)
         * p is migratable, so let's not schedule it and
         * see if it is pushed or pulled somewhere else.
         */
-       if (tsk_nr_cpus_allowed(p) != 1
+       if (p->nr_cpus_allowed != 1
            && cpupri_find(&rq->rd->cpupri, p, NULL))
                return;
 
@@ -1579,7 +1579,7 @@ static void put_prev_task_rt(struct rq *rq, struct 
task_struct *p)
         * The previous task needs to be made eligible for pushing
         * if it is still active
         */
-       if (on_rt_rq(&p->rt) && tsk_nr_cpus_allowed(p) > 1)
+       if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
                enqueue_pushable_task(rq, p);
 }
 
@@ -1629,7 +1629,7 @@ static int find_lowest_rq(struct task_struct *task)
        if (unlikely(!lowest_mask))
                return -1;
 
-       if (tsk_nr_cpus_allowed(task) == 1)
+       if (task->nr_cpus_allowed == 1)
                return -1; /* No other targets possible */
 
        if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
@@ -1761,7 +1761,7 @@ static struct task_struct *pick_next_pushable_task(struct 
rq *rq)
 
        BUG_ON(rq->cpu != task_cpu(p));
        BUG_ON(task_current(rq, p));
-       BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
+       BUG_ON(p->nr_cpus_allowed <= 1);
 
        BUG_ON(!task_on_rq_queued(p));
        BUG_ON(!rt_task(p));
@@ -2121,9 +2121,9 @@ static void task_woken_rt(struct rq *rq, struct 
task_struct *p)
 {
        if (!task_running(rq, p) &&
            !test_tsk_need_resched(rq->curr) &&
-           tsk_nr_cpus_allowed(p) > 1 &&
+           p->nr_cpus_allowed > 1 &&
            (dl_task(rq->curr) || rt_task(rq->curr)) &&
-           (tsk_nr_cpus_allowed(rq->curr) < 2 ||
+           (rq->curr->nr_cpus_allowed < 2 ||
             rq->curr->prio <= p->prio))
                push_rt_tasks(rq);
 }
@@ -2196,7 +2196,7 @@ static void switched_to_rt(struct rq *rq, struct 
task_struct *p)
         */
        if (task_on_rq_queued(p) && rq->curr != p) {
 #ifdef CONFIG_SMP
-               if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
+               if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
                        queue_push_tasks(rq);
 #endif /* CONFIG_SMP */
                if (p->prio < rq->curr->prio)
-- 
2.7.4

Reply via email to