The dyntick-idle cputime accounting always assumes that IRQ time
accounting is enabled and consequently stops elapsing the idle time
during dyntick-idle IRQs.

This doesn't mix up well with disabled IRQ time accounting because then
idle IRQs become a cputime blind-spot. Also this feature is disabled
on most configurations and the overhead of pausing dyntick-idle
accounting while in idle IRQs could then be avoided.

Fix the situation with conditionally pausing dyntick-idle accounting
during idle IRQs only if neither native vtime (which does IRQ time
accounting) nor generic IRQ time accounting are enabled.

Also make sure that the accumulated IRQ time is not accidentally
substracted from later accounting.

Signed-off-by: Frederic Weisbecker <[email protected]>
---
 kernel/sched/cputime.c | 24 +++++++++++++++++++++---
 kernel/sched/sched.h   |  1 +
 2 files changed, 22 insertions(+), 3 deletions(-)

diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index f0620b429698..3dadfaa92b27 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -45,7 +45,8 @@ static void irqtime_account_delta(struct irqtime *irqtime, 
u64 delta,
        u64_stats_update_begin(&irqtime->sync);
        cpustat[idx] += delta;
        irqtime->total += delta;
-       irqtime->tick_delta += delta;
+       if (!irqtime->idle_dyntick)
+               irqtime->tick_delta += delta;
        u64_stats_update_end(&irqtime->sync);
 }
 
@@ -80,6 +81,16 @@ void irqtime_account_irq(struct task_struct *curr, unsigned 
int offset)
                irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
 }
 
+static inline void irqtime_dyntick_start(void)
+{
+       __this_cpu_write(cpu_irqtime.idle_dyntick, true);
+}
+
+static inline void irqtime_dyntick_stop(void)
+{
+       __this_cpu_write(cpu_irqtime.idle_dyntick, false);
+}
+
 static u64 irqtime_tick_accounted(u64 maxtime)
 {
        struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
@@ -93,6 +104,9 @@ static u64 irqtime_tick_accounted(u64 maxtime)
 
 #else /* !CONFIG_IRQ_TIME_ACCOUNTING: */
 
+static inline void irqtime_dyntick_start(void) { }
+static inline void irqtime_dyntick_stop(void) { }
+
 static u64 irqtime_tick_accounted(u64 dummy)
 {
        return 0;
@@ -443,6 +457,7 @@ void kcpustat_dyntick_stop(ktime_t now)
                WARN_ON_ONCE(!kc->idle_dyntick);
                kcpustat_idle_stop(kc, now);
                kc->idle_dyntick = false;
+               irqtime_dyntick_stop();
                vtime_dyntick_stop();
                steal_account_process_time(ULONG_MAX);
        }
@@ -454,6 +469,7 @@ void kcpustat_dyntick_start(ktime_t now)
 
        if (!vtime_generic_enabled_this_cpu()) {
                vtime_dyntick_start();
+               irqtime_dyntick_start();
                kc->idle_dyntick = true;
                kcpustat_idle_start(kc, now);
        }
@@ -463,7 +479,8 @@ void kcpustat_irq_enter(ktime_t now)
 {
        struct kernel_cpustat *kc = kcpustat_this_cpu;
 
-       if (!vtime_generic_enabled_this_cpu())
+       if (!vtime_generic_enabled_this_cpu() &&
+           (irqtime_enabled() || vtime_accounting_enabled_this_cpu()))
                kcpustat_idle_stop(kc, now);
 }
 
@@ -471,7 +488,8 @@ void kcpustat_irq_exit(ktime_t now)
 {
        struct kernel_cpustat *kc = kcpustat_this_cpu;
 
-       if (!vtime_generic_enabled_this_cpu())
+       if (!vtime_generic_enabled_this_cpu() &&
+           (irqtime_enabled() || vtime_accounting_enabled_this_cpu()))
                kcpustat_idle_start(kc, now);
 }
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d30cca6870f5..cf677ff12b10 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3307,6 +3307,7 @@ static inline void sched_core_tick(struct rq *rq) { }
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
 
 struct irqtime {
+       bool                    idle_dyntick;
        u64                     total;
        u64                     tick_delta;
        u64                     irq_start_time;
-- 
2.51.1


Reply via email to