For the case !CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE, forego overflow
protection in the range (mask << 1) < delta <= mask, and interpret it
always as an inconsistency between CPU clock values. That allows
slightly neater code, and it is on a slow path so has no effect on
performance.

Suggested-by: Thomas Gleixner <t...@linutronix.de>
Signed-off-by: Adrian Hunter <adrian.hun...@intel.com>
---
 kernel/time/timekeeping.c | 31 +++++++++++++------------------
 1 file changed, 13 insertions(+), 18 deletions(-)

diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 111dfdbd488f..4e18db1819f8 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -266,17 +266,14 @@ static inline u64 timekeeping_debug_get_ns(const struct 
tk_read_base *tkr)
         * Try to catch underflows by checking if we are seeing small
         * mask-relative negative values.
         */
-       if (unlikely((~delta & mask) < (mask >> 3))) {
+       if (unlikely((~delta & mask) < (mask >> 3)))
                tk->underflow_seen = 1;
-               now = last;
-       }
 
-       /* Cap delta value to the max_cycles values to avoid mult overflows */
-       if (unlikely(delta > max)) {
+       /* Check for multiplication overflows */
+       if (unlikely(delta > max))
                tk->overflow_seen = 1;
-               now = last + max;
-       }
 
+       /* timekeeping_cycles_to_ns() handles both under and overflow */
        return timekeeping_cycles_to_ns(tkr, now);
 }
 #else
@@ -375,19 +372,17 @@ static inline u64 timekeeping_cycles_to_ns(const struct 
tk_read_base *tkr, u64 c
        u64 mask = tkr->mask, delta = (cycles - tkr->cycle_last) & mask;
 
        /*
-        * This detects the case where the delta overflows the multiplication
-        * with tkr->mult.
+        * This detects both negative motion and the case where the delta
+        * overflows the multiplication with tkr->mult.
         */
        if (unlikely(delta > tkr->clock->max_cycles)) {
-               if (IS_ENABLED(CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE)) {
-                       /*
-                        * Handle clocksource inconsistency between CPUs to 
prevent
-                        * time from going backwards by checking for the MSB of 
the
-                        * mask being set in the delta.
-                        */
-                       if (unlikely(delta & ~(mask >> 1)))
-                               return tkr->xtime_nsec >> tkr->shift;
-               }
+               /*
+                * Handle clocksource inconsistency between CPUs to prevent
+                * time from going backwards by checking for the MSB of the
+                * mask being set in the delta.
+                */
+               if (delta & ~(mask >> 1))
+                       return tkr->xtime_nsec >> tkr->shift;
 
                return delta_to_ns_safe(tkr, delta);
        }
-- 
2.34.1

Reply via email to