Commit-ID:  82c65d60d64401aedc1006d6572469bbfdf148de
Gitweb:     http://git.kernel.org/tip/82c65d60d64401aedc1006d6572469bbfdf148de
Author:     Andy Lutomirski <[email protected]>
AuthorDate: Wed, 4 Jun 2014 10:31:16 -0700
Committer:  Ingo Molnar <[email protected]>
CommitDate: Thu, 5 Jun 2014 12:09:51 +0200

sched/idle: Clear polling before descheduling the idle thread

Currently, the only real guarantee provided by the polling bit is
that, if you hold rq->lock and the polling bit is set, then you can
set need_resched to force a reschedule.

The only reason the lock is needed is that the idle thread might not
be running at all when setting its need_resched bit, and rq->lock
keeps it pinned.

This is easy to fix: just clear the polling bit before scheduling.
Now the idle thread's polling bit is only ever set when
rq->curr == rq->idle.

Signed-off-by: Andy Lutomirski <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: Rafael J. Wysocki <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: [email protected]
Link: 
http://lkml.kernel.org/r/b2059fcb4c613d520cb503b6fad6e47033c7c203.1401902905.git.l...@amacapital.net
Signed-off-by: Ingo Molnar <[email protected]>
---
 kernel/sched/idle.c | 26 +++++++++++++++++++++++++-
 1 file changed, 25 insertions(+), 1 deletion(-)

diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 25b9423..fe4b24b 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -67,6 +67,10 @@ void __weak arch_cpu_idle(void)
  * cpuidle_idle_call - the main idle function
  *
  * NOTE: no locks or semaphores should be used here
+ *
+ * On archs that support TIF_POLLING_NRFLAG, is called with polling
+ * set, and it returns with polling set.  If it ever stops polling, it
+ * must clear the polling bit.
  */
 static void cpuidle_idle_call(void)
 {
@@ -175,10 +179,22 @@ exit_idle:
 
 /*
  * Generic idle loop implementation
+ *
+ * Called with polling cleared.
  */
 static void cpu_idle_loop(void)
 {
        while (1) {
+               /*
+                * If the arch has a polling bit, we maintain an invariant:
+                *
+                * Our polling bit is clear if we're not scheduled (i.e. if
+                * rq->curr != rq->idle).  This means that, if rq->idle has
+                * the polling bit set, then setting need_resched is
+                * guaranteed to cause the cpu to reschedule.
+                */
+
+               __current_set_polling();
                tick_nohz_idle_enter();
 
                while (!need_resched()) {
@@ -218,6 +234,15 @@ static void cpu_idle_loop(void)
                 */
                preempt_set_need_resched();
                tick_nohz_idle_exit();
+               __current_clr_polling();
+
+               /*
+                * We promise to reschedule if need_resched is set while
+                * polling is set.  That means that clearing polling
+                * needs to be visible before rescheduling.
+                */
+               smp_mb__after_atomic();
+
                schedule_preempt_disabled();
        }
 }
@@ -239,7 +264,6 @@ void cpu_startup_entry(enum cpuhp_state state)
         */
        boot_init_stack_canary();
 #endif
-       __current_set_polling();
        arch_cpu_idle_prepare();
        cpu_idle_loop();
 }
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to