From: Aubrey Li <aubrey...@linux.intel.com>

Short idle periods occur common under some workloads, and the idle
entry and exit path starts to dominate, so it's important to optimize
them. A fast idle routine is introduced here for short idle periods.
- tick nohz enter/exit are exclued
- RCU idle enter/exit are excluded since tick won't be stopped
- idle governor will be leveraged, hardware idle state selection
  on some arches is excluded
- default system idle is used
- any deferrable work will be excluded
---
 kernel/sched/idle.c | 59 ++++++++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 49 insertions(+), 10 deletions(-)

diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index ac6d517..cf6c11f 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -202,22 +202,39 @@ static void cpuidle_idle_call(void)
 }
 
 /*
- * Generic idle loop implementation
- *
- * Called with polling cleared.
+ * fast idle loop implementation
  */
-static void do_idle(void)
+static void cpuidle_fast(void)
 {
+       while (!need_resched()) {
+               check_pgt_cache();
+               rmb();
+
+               if (cpu_is_offline(smp_processor_id())) {
+                       cpuhp_report_idle_dead();
+                       arch_cpu_idle_dead();
+               }
+
+               local_irq_disable();
+               arch_cpu_idle_enter();
+
+               default_idle_call();
+
+               arch_cpu_idle_exit();
+       }
+
        /*
-        * If the arch has a polling bit, we maintain an invariant:
+        * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
+        * be set, propagate it into PREEMPT_NEED_RESCHED.
         *
-        * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
-        * rq->idle). This means that, if rq->idle has the polling bit set,
-        * then setting need_resched is guaranteed to cause the CPU to
-        * reschedule.
+        * This is required because for polling idle loops we will not have had
+        * an IPI to fold the state for us.
         */
+       preempt_set_need_resched();
+}
 
-       __current_set_polling();
+static void cpuidle_generic(void)
+{
        tick_nohz_idle_enter();
 
        while (!need_resched()) {
@@ -254,6 +271,28 @@ static void do_idle(void)
         */
        preempt_set_need_resched();
        tick_nohz_idle_exit();
+}
+
+/*
+ * Generic idle loop implementation
+ *
+ * Called with polling cleared.
+ */
+static void do_idle(void)
+{
+       /*
+        * If the arch has a polling bit, we maintain an invariant:
+        *
+        * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
+        * rq->idle). This means that, if rq->idle has the polling bit set,
+        * then setting need_resched is guaranteed to cause the CPU to
+        * reschedule.
+        */
+
+       __current_set_polling();
+
+       cpuidle_generic();
+
        __current_clr_polling();
 
        /*
-- 
2.7.4

Reply via email to