The following commit has been merged into the sched/core branch of tip:

Commit-ID:     126c2092e5c8b28623cb890cd2930aa292410676
Gitweb:        
https://git.kernel.org/tip/126c2092e5c8b28623cb890cd2930aa292410676
Author:        Peter Zijlstra <pet...@infradead.org>
AuthorDate:    Tue, 26 May 2020 18:11:03 +02:00
Committer:     Ingo Molnar <mi...@kernel.org>
CommitterDate: Thu, 28 May 2020 10:54:16 +02:00

sched: Add rq::ttwu_pending

In preparation of removing rq->wake_list, replace the
!list_empty(rq->wake_list) with rq->ttwu_pending. This is not fully
equivalent as this new variable is racy.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Signed-off-by: Ingo Molnar <mi...@kernel.org>
Link: https://lore.kernel.org/r/20200526161908.070399...@infradead.org
---
 kernel/sched/core.c  | 13 +++++++++++--
 kernel/sched/debug.c |  1 -
 kernel/sched/fair.c  |  2 +-
 kernel/sched/sched.h |  4 +++-
 4 files changed, 15 insertions(+), 5 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fa0d499..b71ed5e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2275,13 +2275,21 @@ static int ttwu_remote(struct task_struct *p, int 
wake_flags)
 void sched_ttwu_pending(void)
 {
        struct rq *rq = this_rq();
-       struct llist_node *llist = llist_del_all(&rq->wake_list);
+       struct llist_node *llist;
        struct task_struct *p, *t;
        struct rq_flags rf;
 
+       llist = llist_del_all(&rq->wake_list);
        if (!llist)
                return;
 
+       /*
+        * rq::ttwu_pending racy indication of out-standing wakeups.
+        * Races such that false-negatives are possible, since they
+        * are shorter lived that false-positives would be.
+        */
+       WRITE_ONCE(rq->ttwu_pending, 0);
+
        rq_lock_irqsave(rq, &rf);
        update_rq_clock(rq);
 
@@ -2318,6 +2326,7 @@ static void __ttwu_queue_wakelist(struct task_struct *p, 
int cpu, int wake_flags
 
        p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
 
+       WRITE_ONCE(rq->ttwu_pending, 1);
        if (llist_add(&p->wake_entry, &rq->wake_list)) {
                if (!set_nr_if_polling(rq->idle))
                        smp_call_function_single_async(cpu, &rq->wake_csd);
@@ -4705,7 +4714,7 @@ int idle_cpu(int cpu)
                return 0;
 
 #ifdef CONFIG_SMP
-       if (!llist_empty(&rq->wake_list))
+       if (rq->ttwu_pending)
                return 0;
 #endif
 
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 1c24a6b..36c5426 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -638,7 +638,6 @@ do {                                                        
                \
 
        P(nr_running);
        P(nr_switches);
-       P(nr_load_updates);
        P(nr_uninterruptible);
        PN(next_balance);
        SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", 
(long)(task_pid_nr(rq->curr)));
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2890bd5..0ed04d2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8590,7 +8590,7 @@ static int idle_cpu_without(int cpu, struct task_struct 
*p)
         */
 
 #ifdef CONFIG_SMP
-       if (!llist_empty(&rq->wake_list))
+       if (rq->ttwu_pending)
                return 0;
 #endif
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 75b0629..c86fc94 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -895,7 +895,9 @@ struct rq {
        atomic_t                nohz_flags;
 #endif /* CONFIG_NO_HZ_COMMON */
 
-       unsigned long           nr_load_updates;
+#ifdef CONFIG_SMP
+       unsigned int            ttwu_pending;
+#endif
        u64                     nr_switches;
 
 #ifdef CONFIG_UCLAMP_TASK

Reply via email to