Validate that:
 - __smp_call_single_queue() is only used on remote CPUs
 - task and rq CPUs match on activate_task()

(and always use activate_task() where we should)

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
---
 kernel/sched/core.c |    8 ++++----
 kernel/smp.c        |    2 ++
 2 files changed, 6 insertions(+), 4 deletions(-)

--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1311,6 +1311,8 @@ static inline void dequeue_task(struct r
 
 void activate_task(struct rq *rq, struct task_struct *p, int flags)
 {
+       SCHED_WARN_ON(task_cpu(p) != cpu_of(rq));
+
        if (task_contributes_to_load(p))
                rq->nr_uninterruptible--;
 
@@ -1474,8 +1476,7 @@ static struct rq *move_queued_task(struc
 {
        lockdep_assert_held(&rq->lock);
 
-       WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
-       dequeue_task(rq, p, DEQUEUE_NOCLOCK);
+       deactivate_task(rq, p, DEQUEUE_NOCLOCK);
        set_task_cpu(p, new_cpu);
        rq_unlock(rq, rf);
 
@@ -1483,8 +1484,7 @@ static struct rq *move_queued_task(struc
 
        rq_lock(rq, rf);
        BUG_ON(task_cpu(p) != new_cpu);
-       enqueue_task(rq, p, 0);
-       p->on_rq = TASK_ON_RQ_QUEUED;
+       activate_task(rq, p, 0);
        check_preempt_curr(rq, p, 0);
 
        return rq;
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -135,6 +135,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(cal
 
 void __smp_call_single_queue(int cpu, struct llist_node *node)
 {
+       WARN_ON_ONCE(cpu == smp_processor_id());
+
        /*
         * The list addition should be visible before sending the IPI
         * handler locks the list to pull the entry off it because of


Reply via email to