migrate_enable() can be called with current->state != TASK_RUNNING.
Avoid clobbering the existing state by using stop_one_cpu_nowait().
Since we're stopping the current cpu, we know that we won't get
past __schedule() until migration_cpu_stop() has run (at least up to
the point of migrating us to another cpu).

Signed-off-by: Scott Wood <sw...@redhat.com>
---
 include/linux/stop_machine.h |  2 ++
 kernel/sched/core.c          | 22 +++++++++++++---------
 kernel/stop_machine.c        |  7 +++++--
 3 files changed, 20 insertions(+), 11 deletions(-)

diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 6d3635c86dbe..82fc686ddd9e 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -26,6 +26,8 @@ struct cpu_stop_work {
        cpu_stop_fn_t           fn;
        void                    *arg;
        struct cpu_stop_done    *done;
+       /* Did not run due to disabled stopper; for nowait debug checks */
+       bool                    disabled;
 };
 
 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5cb2a519b8bf..6383ade320f2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1051,6 +1051,7 @@ static struct rq *move_queued_task(struct rq *rq, struct 
rq_flags *rf,
 struct migration_arg {
        struct task_struct *task;
        int dest_cpu;
+       bool done;
 };
 
 /*
@@ -1086,6 +1087,11 @@ static int migration_cpu_stop(void *data)
        struct task_struct *p = arg->task;
        struct rq *rq = this_rq();
        struct rq_flags rf;
+       int dest_cpu = arg->dest_cpu;
+
+       /* We don't look at arg after this point. */
+       smp_mb();
+       arg->done = true;
 
        /*
         * The original target CPU might have gone down and we might
@@ -1108,9 +1114,9 @@ static int migration_cpu_stop(void *data)
         */
        if (task_rq(p) == rq) {
                if (task_on_rq_queued(p))
-                       rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
+                       rq = __migrate_task(rq, &rf, p, dest_cpu);
                else
-                       p->wake_cpu = arg->dest_cpu;
+                       p->wake_cpu = dest_cpu;
        }
        rq_unlock(rq, &rf);
        raw_spin_unlock(&p->pi_lock);
@@ -7392,6 +7398,7 @@ void migrate_enable(void)
        WARN_ON(smp_processor_id() != cpu);
        if (!is_cpu_allowed(p, cpu)) {
                struct migration_arg arg = { p };
+               struct cpu_stop_work work;
                struct rq_flags rf;
 
                rq = task_rq_lock(p, &rf);
@@ -7399,13 +7406,10 @@ void migrate_enable(void)
                arg.dest_cpu = select_fallback_rq(cpu, p);
                task_rq_unlock(rq, p, &rf);
 
-               preempt_lazy_enable();
-               preempt_enable();
-
-               sleeping_lock_inc();
-               stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
-               sleeping_lock_dec();
-               return;
+               stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
+                                   &arg, &work);
+               __schedule(true);
+               WARN_ON_ONCE(!arg.done && !work.disabled);
        }
 
 out:
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 2b5a6754646f..fa53a472dd44 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -85,8 +85,11 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct 
cpu_stop_work *work)
        enabled = stopper->enabled;
        if (enabled)
                __cpu_stop_queue_work(stopper, work, &wakeq);
-       else if (work->done)
-               cpu_stop_signal_done(work->done);
+       else {
+               work->disabled = true;
+               if (work->done)
+                       cpu_stop_signal_done(work->done);
+       }
        raw_spin_unlock_irqrestore(&stopper->lock, flags);
 
        wake_up_q(&wakeq);
-- 
1.8.3.1

Reply via email to