Measure latencies caused due to delayed timer offsets in nanoseconds.
It shows the latency captured due to a delayed timer expire event. It
happens for example when a timer misses its deadline due to disabled
interrupts. A process if scheduled as a result of the timer expiration
suffers this latency. It is used to calculate the total wakeup latency
of a process which is the sum of the delayed timer offset and the
wakeup latency.

[
Initial work and idea by Carsten
Link: 
https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit/?h=v3.14-rt-rebase&id=56d50cc34943bbba12b8c5942ee1ae3b29f73acb
]

Cc: Carsten Emde <c.e...@osadl.org>
Signed-off-by: Binoy Jayan <binoy.ja...@linaro.org>
---
 include/linux/hrtimer.h |  4 ++++
 include/linux/sched.h   |  3 +++
 kernel/time/Kconfig     |  8 ++++++++
 kernel/time/hrtimer.c   | 47 +++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 62 insertions(+)

diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 5e00f80..05d8086 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -90,6 +90,7 @@ enum hrtimer_restart {
  * @is_rel:    Set if the timer was armed relative
  * @start_pid:  timer statistics field to store the pid of the task which
  *             started the timer
+ * @tim_expiry: hrtimer expiry time or 0 in case already expired
  * @start_site:        timer statistics field to store the site where the timer
  *             was started
  * @start_comm: timer statistics field to store the name of the process which
@@ -104,6 +105,9 @@ struct hrtimer {
        struct hrtimer_clock_base       *base;
        u8                              state;
        u8                              is_rel;
+#ifdef CONFIG_TRACE_DELAYED_TIMER_OFFSETS
+       ktime_t                         tim_expiry;
+#endif
 #ifdef CONFIG_TIMER_STATS
        int                             start_pid;
        void                            *start_site;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 62c68e5..7bf67f8 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1891,6 +1891,9 @@ struct task_struct {
        /* bitmask and counter of trace recursion */
        unsigned long trace_recursion;
 #endif /* CONFIG_TRACING */
+#ifdef CONFIG_TRACE_DELAYED_TIMER_OFFSETS
+       long timer_offset;
+#endif /* CONFIG_TRACE_DELAYED_TIMER_OFFSETS */
 #ifdef CONFIG_KCOV
        /* Coverage collection mode enabled for this task (0 if disabled). */
        enum kcov_mode kcov_mode;
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 4008d9f..de4793c 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -193,5 +193,13 @@ config HIGH_RES_TIMERS
          hardware is not capable then this option only increases
          the size of the kernel image.
 
+config TRACE_DELAYED_TIMER_OFFSETS
+       depends on HIGH_RES_TIMERS
+       select GENERIC_TRACER
+       bool "Delayed Timer Offsets"
+       help
+         Capture offsets of delayed hrtimer in nanoseconds. It is used
+         to construct wakeup latency histogram.
+
 endmenu
 endif
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 9ba7c82..7048f86 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -56,6 +56,8 @@
 
 #include "tick-internal.h"
 
+static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
+
 /*
  * The timer bases:
  *
@@ -960,6 +962,47 @@ static inline ktime_t hrtimer_update_lowres(struct hrtimer 
*timer, ktime_t tim,
        return tim;
 }
 
+#ifdef CONFIG_TRACE_DELAYED_TIMER_OFFSETS
+static inline void latency_hrtimer_timing_start(struct hrtimer *timer,
+                                        struct hrtimer_clock_base *new_base,
+                                        ktime_t tim)
+{
+       ktime_t now = new_base->get_time();
+
+       if (ktime_to_ns(tim) < ktime_to_ns(now))
+               timer->tim_expiry = now;
+       else
+               timer->tim_expiry = ktime_set(0, 0);
+}
+
+static inline void latency_hrtimer_timing_stop(struct hrtimer *timer,
+                                               ktime_t basenow)
+{
+       long latency;
+       struct task_struct *task;
+
+       latency = ktime_to_ns(ktime_sub(basenow,
+                             ktime_to_ns(timer->tim_expiry) ?
+                             timer->tim_expiry : hrtimer_get_expires(timer)));
+       task = timer->function == hrtimer_wakeup ?
+                       container_of(timer, struct hrtimer_sleeper,
+                                    timer)->task : NULL;
+       if (task && latency > 0)
+               task->timer_offset = latency;
+}
+#else
+static inline void latency_hrtimer_timing_start(struct hrtimer *timer,
+                                        struct hrtimer_clock_base *new_base,
+                                        ktime_t tim)
+{
+}
+static inline void latency_hrtimer_timing_stop(struct hrtimer *timer,
+                                               ktime_t basenow)
+{
+}
+
+#endif
+
 /**
  * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
  * @timer:     the timer to be added
@@ -992,6 +1035,8 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t 
tim,
 
        timer_stats_hrtimer_set_start_info(timer);
 
+       latency_hrtimer_timing_start(timer, new_base, tim);
+
        leftmost = enqueue_hrtimer(timer, new_base);
        if (!leftmost)
                goto unlock;
@@ -1284,6 +1329,8 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base 
*cpu_base, ktime_t now)
 
                        timer = container_of(node, struct hrtimer, node);
 
+                       latency_hrtimer_timing_stop(timer, basenow);
+
                        /*
                         * The immediate goal for using the softexpires is
                         * minimizing wakeups, not running timers at the
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project

Reply via email to