If the timekeeping CPU is scheduled out long enough by a hypervisor the
clocksource delta multiplication can overflow and as a result time can go
backwards. That's insane to begin with, but people already triggered a
signed multiplication overflow, so a unsigned overflow is not necessarily
impossible.

Implement optional 128bit math which can be selected by a config option.

Signed-off-by: Thomas Gleixner <t...@linutronix.de>
---
 kernel/time/Kconfig       |   15 +++++++++++++++
 kernel/time/timekeeping.c |   38 ++++++++++++++++++++++++++++++++++++--
 2 files changed, 51 insertions(+), 2 deletions(-)

--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -51,6 +51,21 @@ config GENERIC_CLOCKEVENTS_MIN_ADJUST
 config GENERIC_CMOS_UPDATE
        bool
 
+config TIMEKEEPING_USE_128BIT_MATH
+       bool "Enable 128 bit math in the timekeeping hotpath"
+       default n
+       depends on !ARCH_USES_GETTIMEOFFSET && EXPERT
+       help
+
+         If VMs get scheduled out for a long time then the clocksource
+         delta to nanoseconds conversion in timekeeping can overflow the
+         64bit multiplication. As a result time going backwards might be
+         observed.
+
+         Enable this only if you want to support insane setups with
+         massive overcommitment as this introduces overhead into the
+         timekeeping hotpath.
+
 if GENERIC_CLOCKEVENTS
 menu "Timers subsystem"
 
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -298,8 +298,41 @@ u32 (*arch_gettimeoffset)(void) = defaul
 static inline u32 arch_gettimeoffset(void) { return 0; }
 #endif
 
-static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
-                                         cycle_t delta)
+/*
+ * Enabled when timekeeping is supposed to deal with virtualization keeping
+ * VMs long enough scheduled out that the 64 * 32 bit multiplication in
+ * timekeeping_delta_to_ns() overflows 64bit.
+ */
+#ifdef CONFIG_TIMEKEEPING_USE_128BIT_MATH
+
+#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
+{
+       unsigned __int128 nsec;
+
+       nsec = ((unsigned __int128)delta * tkr->mult) + tkr->xtime_nsec;
+       return (u64) (nsec >> tkr->shift);
+}
+#else
+static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
+{
+       u32 dh, dl;
+       u64 nsec;
+
+       dl = delta;
+       dh = delta >> 32;
+
+       nsec = ((u64)dl * tkr->mult) + tkr->xtime_nsec;
+       nsec >>= tkr->shift;
+       if (unlikely(dh))
+               nsec += ((u64)dh * tkr->mult) << (32 - tkr->shift);
+       return nsec;
+}
+#endif
+
+#else /* CONFIG_TIMEKEEPING_USE_128BIT_MATH */
+
+static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
 {
        u64 nsec;
 
@@ -309,6 +342,7 @@ static inline u64 timekeeping_delta_to_n
        /* If arch requires, add in get_arch_timeoffset() */
        return nsec + arch_gettimeoffset();
 }
+#endif
 
 static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
 {


Reply via email to