From: Peter Zijlstra <pet...@infradead.org>

[ Upstream commit ec618b84f6e15281cc3660664d34cd0dd2f2579e ]

  schedule()                            ttwu()
    deactivate_task();                    if (p->on_rq && ...) // false
                                            atomic_dec(&task_rq(p)->nr_iowait);
    if (prev->in_iowait)
      atomic_inc(&rq->nr_iowait);

Allows nr_iowait to be decremented before it gets incremented,
resulting in more dodgy IO-wait numbers than usual.

Note that because we can now do ttwu_queue_wakelist() before
p->on_cpu==0, we lose the natural ordering and have to further delay
the decrement.

Fixes: c6e7bd7afaeb ("sched/core: Optimize ttwu() spinning on p->on_cpu")
Reported-by: Tejun Heo <t...@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Acked-by: Mel Gorman <mgor...@techsingularity.net>
Link: 
https://lkml.kernel.org/r/20201117093829.gd3121...@hirez.programming.kicks-ass.net
Signed-off-by: Sasha Levin <sas...@kernel.org>
---
 kernel/sched/core.c | 15 ++++++++++-----
 1 file changed, 10 insertions(+), 5 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b1e0da56abcac..c4da7e17b9061 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2505,7 +2505,12 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, 
int wake_flags,
 #ifdef CONFIG_SMP
        if (wake_flags & WF_MIGRATED)
                en_flags |= ENQUEUE_MIGRATED;
+       else
 #endif
+       if (p->in_iowait) {
+               delayacct_blkio_end(p);
+               atomic_dec(&task_rq(p)->nr_iowait);
+       }
 
        activate_task(rq, p, en_flags);
        ttwu_do_wakeup(rq, p, wake_flags, rf);
@@ -2892,11 +2897,6 @@ try_to_wake_up(struct task_struct *p, unsigned int 
state, int wake_flags)
        if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
                goto unlock;
 
-       if (p->in_iowait) {
-               delayacct_blkio_end(p);
-               atomic_dec(&task_rq(p)->nr_iowait);
-       }
-
 #ifdef CONFIG_SMP
        /*
         * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
@@ -2967,6 +2967,11 @@ try_to_wake_up(struct task_struct *p, unsigned int 
state, int wake_flags)
 
        cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
        if (task_cpu(p) != cpu) {
+               if (p->in_iowait) {
+                       delayacct_blkio_end(p);
+                       atomic_dec(&task_rq(p)->nr_iowait);
+               }
+
                wake_flags |= WF_MIGRATED;
                psi_ttwu_dequeue(p);
                set_task_cpu(p, cpu);
-- 
2.27.0



Reply via email to