Setting the realtime clock triggers an IPI to all CPUs to reprogram
hrtimers.

However, only realtime and TAI clocks have their offsets updated
(and therefore potentially require a reprogram).

Check if it only has monotonic active timers, and in that case 
update the realtime and TAI base offsets remotely, skipping the IPI.

This reduces interruptions to latency sensitive applications.

Signed-off-by: Marcelo Tosatti <mtosa...@redhat.com>

---

v2:
   - Only REALTIME and TAI bases are affected by offset-to-monotonic changes 
(Thomas).
   - Don't special case nohz_full CPUs (Thomas).
   

diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 5c9d968187ae..be21b85c679d 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -871,6 +871,28 @@ static void hrtimer_reprogram(struct hrtimer *timer, bool 
reprogram)
        tick_program_event(expires, 1);
 }
 
+#define CLOCK_SET_BASES ((1U << HRTIMER_BASE_REALTIME)|                \
+                        (1U << HRTIMER_BASE_REALTIME_SOFT)|    \
+                        (1U << HRTIMER_BASE_TAI)|              \
+                        (1U << HRTIMER_BASE_TAI_SOFT))
+
+static bool need_reprogram_timer(struct hrtimer_cpu_base *cpu_base)
+{
+       unsigned int active = 0;
+
+       if (cpu_base->softirq_activated)
+               return true;
+
+       active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
+
+       active = active | (cpu_base->active_bases & HRTIMER_ACTIVE_HARD);
+
+       if ((active & CLOCK_SET_BASES) == 0)
+               return false;
+
+       return true;
+}
+
 /*
  * Clock realtime was set
  *
@@ -885,9 +907,31 @@ static void hrtimer_reprogram(struct hrtimer *timer, bool 
reprogram)
 void clock_was_set(void)
 {
 #ifdef CONFIG_HIGH_RES_TIMERS
-       /* Retrigger the CPU local events everywhere */
-       on_each_cpu(retrigger_next_event, NULL, 1);
+       cpumask_var_t mask;
+       int cpu;
+
+       if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+               on_each_cpu(retrigger_next_event, NULL, 1);
+               goto set_timerfd;
+       }
+
+       /* Avoid interrupting CPUs if possible */
+       preempt_disable();
+       for_each_online_cpu(cpu) {
+               unsigned long flags;
+               struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, 
cpu);
+
+               raw_spin_lock_irqsave(&cpu_base->lock, flags);
+               if (need_reprogram_timer(cpu_base))
+                       cpumask_set_cpu(cpu, mask);
+               raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+       }
+
+       smp_call_function_many(mask, retrigger_next_event, NULL, 1);
+       preempt_enable();
+       free_cpumask_var(mask);
 #endif
+set_timerfd:
        timerfd_clock_was_set();
 }

Reply via email to