This will allow clocks with different mult and shift values,
e.g. CLOCK_MONOTONIC_RAW, to be supported in the vDSO.

The coarse clocks do not require these data so the values are not
copied for these clocks.

One could add potential new values of mult and shift alongside the
existing values in struct vsyscall_gtod_data, but it seems more
natural to group them with the actual clock data in the basetime array
at the expense of a few more cycles in update_vsyscall().

Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Andy Lutomirski <l...@kernel.org>
Signed-off-by: Huw Davies <h...@codeweavers.com>
---
 arch/x86/entry/vdso/vclock_gettime.c    | 4 ++--
 arch/x86/entry/vsyscall/vsyscall_gtod.c | 8 ++++++--
 arch/x86/include/asm/vgtod.h            | 6 +++---
 3 files changed, 11 insertions(+), 7 deletions(-)

diff --git a/arch/x86/entry/vdso/vclock_gettime.c 
b/arch/x86/entry/vdso/vclock_gettime.c
index 007b3fe9d727..a4199b846d77 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -153,8 +153,8 @@ notrace static int do_hres(clockid_t clk, struct timespec 
*ts)
                if (unlikely((s64)cycles < 0))
                        return vdso_fallback_gettime(clk, ts);
                if (cycles > last)
-                       ns += (cycles - last) * gtod->mult;
-               ns >>= gtod->shift;
+                       ns += (cycles - last) * base->mult;
+               ns >>= base->shift;
                sec = base->sec;
        } while (unlikely(gtod_read_retry(gtod, seq)));
 
diff --git a/arch/x86/entry/vsyscall/vsyscall_gtod.c 
b/arch/x86/entry/vsyscall/vsyscall_gtod.c
index e4ee83018279..ddc6a71df87c 100644
--- a/arch/x86/entry/vsyscall/vsyscall_gtod.c
+++ b/arch/x86/entry/vsyscall/vsyscall_gtod.c
@@ -43,16 +43,18 @@ void update_vsyscall(struct timekeeper *tk)
        /* copy vsyscall data */
        vdata->vclock_mode      = vclock_mode;
        vdata->cycle_last       = tk->tkr_mono.cycle_last;
-       vdata->mult             = tk->tkr_mono.mult;
-       vdata->shift            = tk->tkr_mono.shift;
 
        base = &vdata->basetime[CLOCK_REALTIME];
        base->sec = tk->xtime_sec;
        base->nsec = tk->tkr_mono.xtime_nsec;
+       base->mult = tk->tkr_mono.mult;
+       base->shift = tk->tkr_mono.shift;
 
        base = &vdata->basetime[CLOCK_TAI];
        base->sec = tk->xtime_sec + (s64)tk->tai_offset;
        base->nsec = tk->tkr_mono.xtime_nsec;
+       base->mult = tk->tkr_mono.mult;
+       base->shift = tk->tkr_mono.shift;
 
        base = &vdata->basetime[CLOCK_MONOTONIC];
        base->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
@@ -63,6 +65,8 @@ void update_vsyscall(struct timekeeper *tk)
                base->sec++;
        }
        base->nsec = nsec;
+       base->mult = tk->tkr_mono.mult;
+       base->shift = tk->tkr_mono.shift;
 
        base = &vdata->basetime[CLOCK_REALTIME_COARSE];
        base->sec = tk->xtime_sec;
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index daf69a25e46b..ae0d76491595 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -20,11 +20,13 @@ typedef unsigned long gtod_long_t;
  * clocks, this encodes the actual time.
  *
  * To confuse the reader, for high-resolution clocks, nsec is left-shifted
- * by vsyscall_gtod_data.shift.
+ * by shift.
  */
 struct vgtod_ts {
        u64             sec;
        u64             nsec;
+       u32             mult;
+       u32             shift;
 };
 
 #define VGTOD_BASES    (CLOCK_TAI + 1)
@@ -40,8 +42,6 @@ struct vsyscall_gtod_data {
 
        int             vclock_mode;
        u64             cycle_last;
-       u32             mult;
-       u32             shift;
 
        struct vgtod_ts basetime[VGTOD_BASES];
 
-- 
2.17.1

Reply via email to