There is no real point in switching to dyntick-idle cputime accounting
mode if the tick is not actually stopped. This just adds overhead,
notably fetching the GTOD, on each idle exit and each idle IRQ entry for
no reason during short idle trips.

Signed-off-by: Frederic Weisbecker <[email protected]>
---
 kernel/time/tick-sched.c | 44 ++++++++++++++++++----------------------
 1 file changed, 20 insertions(+), 24 deletions(-)

diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index cbd645fb8df6..05da130d257a 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1135,8 +1135,10 @@ void tick_nohz_idle_stop_tick(void)
                ts->idle_sleeps++;
                ts->idle_expires = expires;
 
-               if (!was_stopped && tick_sched_flag_test(ts, TS_FLAG_STOPPED))
+               if (!was_stopped && tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
+                       kcpustat_dyntick_start(ts->idle_entrytime);
                        nohz_balance_enter_idle(cpu);
+               }
        } else {
                tick_nohz_retain_tick(ts);
        }
@@ -1177,7 +1179,6 @@ void tick_nohz_idle_enter(void)
        ts = this_cpu_ptr(&tick_cpu_sched);
        WARN_ON_ONCE(ts->timer_expires_base);
        ts->idle_entrytime = ktime_get();
-       kcpustat_dyntick_start(ts->idle_entrytime);
        tick_nohz_clock_sleep(ts);
 
        local_irq_enable();
@@ -1207,9 +1208,10 @@ void tick_nohz_irq_exit(void)
        struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
 
        if (tick_sched_flag_test(ts, TS_FLAG_INIDLE)) {
-               ts->idle_entrytime = ktime_get();
-               kcpustat_irq_exit(ts->idle_entrytime);
                tick_nohz_clock_sleep(ts);
+               ts->idle_entrytime = ktime_get();
+               if (tick_sched_flag_test(ts, TS_FLAG_STOPPED))
+                       kcpustat_irq_exit(ts->idle_entrytime);
        } else {
                tick_nohz_full_update_tick(ts);
        }
@@ -1310,8 +1312,11 @@ void tick_nohz_idle_restart_tick(void)
 {
        struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
 
-       if (tick_sched_flag_test(ts, TS_FLAG_STOPPED))
-               tick_nohz_restart_sched_tick(ts, ktime_get());
+       if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
+               ktime_t now = ktime_get();
+               kcpustat_dyntick_stop(now);
+               tick_nohz_restart_sched_tick(ts, now);
+       }
 }
 
 static void tick_nohz_idle_update_tick(struct tick_sched *ts, ktime_t now)
@@ -1341,7 +1346,6 @@ static void tick_nohz_idle_update_tick(struct tick_sched 
*ts, ktime_t now)
 void tick_nohz_idle_exit(void)
 {
        struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
-       bool idle_active, tick_stopped;
        ktime_t now;
 
        local_irq_disable();
@@ -1350,18 +1354,13 @@ void tick_nohz_idle_exit(void)
        WARN_ON_ONCE(ts->timer_expires_base);
 
        tick_sched_flag_clear(ts, TS_FLAG_INIDLE);
-       idle_active = tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE);
-       tick_stopped = tick_sched_flag_test(ts, TS_FLAG_STOPPED);
+       tick_nohz_clock_wakeup(ts);
 
-       if (idle_active || tick_stopped)
+       if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
                now = ktime_get();
-
-       if (idle_active)
-               tick_nohz_clock_wakeup(ts);
-
-       if (tick_stopped)
+               kcpustat_dyntick_stop(now);
                tick_nohz_idle_update_tick(ts, now);
-       kcpustat_dyntick_stop(now);
+       }
 
        local_irq_enable();
 }
@@ -1415,15 +1414,13 @@ static inline void tick_nohz_irq_enter(void)
        struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
        ktime_t now;
 
-       if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED | TS_FLAG_IDLE_ACTIVE))
+       tick_nohz_clock_wakeup(ts);
+
+       if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED))
                return;
 
        now = ktime_get();
-
-       if (tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE)) {
-               tick_nohz_clock_wakeup(ts);
-               kcpustat_irq_enter(now);
-       }
+       kcpustat_irq_enter(now);
 
        /*
         * If all CPUs are idle we may need to update a stale jiffies value.
@@ -1432,8 +1429,7 @@ static inline void tick_nohz_irq_enter(void)
         * rare case (typically stop machine). So we must make sure we have a
         * last resort.
         */
-       if (tick_sched_flag_test(ts, TS_FLAG_STOPPED))
-               tick_nohz_update_jiffies(now);
+       tick_nohz_update_jiffies(now);
 }
 
 #else
-- 
2.51.1


Reply via email to