get_cpu_idle/iowait_time_us() may ultimately fail if generic vtime
accounting is enabled.

The ad-hoc replacement solution by cpufreq is to compute jiffies minus
the whole busy cputime. Although the intention should provide a coherent
low resolution estimation of the idle and iowait time, the
implementation is buggy because jiffies don't start at 0.

Enhance instead get_cpu_[idle|iowait]_time_us() to provide support for
vtime generic accounting.

Signed-off-by: Frederic Weisbecker <[email protected]>
---
 drivers/cpufreq/cpufreq.c   | 29 +----------------------------
 include/linux/kernel_stat.h |  3 +++
 include/linux/tick.h        |  4 ----
 kernel/sched/cputime.c      | 14 ++++++++++----
 4 files changed, 14 insertions(+), 36 deletions(-)

diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 4472bb1ec83c..ecb9634cd06b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -130,38 +130,11 @@ struct kobject *get_governor_parent_kobj(struct 
cpufreq_policy *policy)
 }
 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
 
-static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
-{
-       struct kernel_cpustat kcpustat;
-       u64 cur_wall_time;
-       u64 idle_time;
-       u64 busy_time;
-
-       cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
-
-       kcpustat_cpu_fetch(&kcpustat, cpu);
-
-       busy_time = kcpustat.cpustat[CPUTIME_USER];
-       busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
-       busy_time += kcpustat.cpustat[CPUTIME_IRQ];
-       busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
-       busy_time += kcpustat.cpustat[CPUTIME_STEAL];
-       busy_time += kcpustat.cpustat[CPUTIME_NICE];
-
-       idle_time = cur_wall_time - busy_time;
-       if (wall)
-               *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
-
-       return div_u64(idle_time, NSEC_PER_USEC);
-}
-
 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
 {
        u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
 
-       if (idle_time == -1ULL)
-               return get_cpu_idle_time_jiffy(cpu, wall);
-       else if (!io_busy)
+       if (!io_busy)
                idle_time += get_cpu_iowait_time_us(cpu, wall);
 
        return idle_time;
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index e1efd26e56f0..e59916477075 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -133,6 +133,9 @@ static inline bool kcpustat_idle_dyntick(void)
 }
 #endif /* CONFIG_NO_HZ_COMMON */
 
+extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
+extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
+
 /* Fetch cputime values when vtime is disabled on a CPU */
 static inline u64 kcpustat_field_default(enum cpu_usage_stat usage, int cpu)
 {
diff --git a/include/linux/tick.h b/include/linux/tick.h
index ac76ae9fa36d..1296cba67bee 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -138,8 +138,6 @@ extern bool tick_nohz_idle_got_tick(void);
 extern ktime_t tick_nohz_get_next_hrtimer(void);
 extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next);
 extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
-extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
-extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
 #else /* !CONFIG_NO_HZ_COMMON */
 #define tick_nohz_enabled (0)
 static inline int tick_nohz_tick_stopped(void) { return 0; }
@@ -160,8 +158,6 @@ static inline ktime_t tick_nohz_get_sleep_length(ktime_t 
*delta_next)
        *delta_next = TICK_NSEC;
        return *delta_next;
 }
-static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
-static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
 #endif /* !CONFIG_NO_HZ_COMMON */
 
 /*
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 9906abe5d7bc..f0620b429698 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -511,6 +511,13 @@ u64 kcpustat_field_iowait(int cpu)
                                      nr_iowait_cpu(cpu), ktime_get());
 }
 EXPORT_SYMBOL_GPL(kcpustat_field_iowait);
+#else
+static u64 kcpustat_field_dyntick(int cpu, enum cpu_usage_stat idx,
+                                 bool compute_delta, ktime_t now)
+{
+       return kcpustat_cpu(cpu).cpustat[idx];
+}
+#endif /* CONFIG_NO_HZ_COMMON */
 
 static u64 get_cpu_sleep_time_us(int cpu, enum cpu_usage_stat idx,
                                 bool compute_delta, u64 *last_update_time)
@@ -519,7 +526,7 @@ static u64 get_cpu_sleep_time_us(int cpu, enum 
cpu_usage_stat idx,
        u64 res;
 
        if (vtime_generic_enabled_cpu(cpu))
-               return -1;
+               res = kcpustat_field(idx, cpu);
        else
                res = kcpustat_field_dyntick(cpu, idx, compute_delta, now);
 
@@ -544,7 +551,7 @@ static u64 get_cpu_sleep_time_us(int cpu, enum 
cpu_usage_stat idx,
  * This time is measured via accounting rather than sampling,
  * and is as accurate as ktime_get() is.
  *
- * Return: -1 if generic vtime is enabled, else total idle time of the @cpu
+ * Return: total idle time of the @cpu
  */
 u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
 {
@@ -568,7 +575,7 @@ EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
  * This time is measured via accounting rather than sampling,
  * and is as accurate as ktime_get() is.
  *
- * Return: -1 if generic vtime is enabled, else total iowait time of @cpu
+ * Return: total iowait time of @cpu
  */
 u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
 {
@@ -576,7 +583,6 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
                                     nr_iowait_cpu(cpu), last_update_time);
 }
 EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
-#endif /* CONFIG_NO_HZ_COMMON */
 
 /*
  * Use precise platform statistics if available:
-- 
2.51.1


Reply via email to