Commit-ID:  3e89bf35ebf59c12e8c1476f6681fae0ebdcb2a7
Gitweb:     https://git.kernel.org/tip/3e89bf35ebf59c12e8c1476f6681fae0ebdcb2a7
Author:     Thomas Gleixner <t...@linutronix.de>
AuthorDate: Mon, 17 Sep 2018 14:45:43 +0200
Committer:  Thomas Gleixner <t...@linutronix.de>
CommitDate: Thu, 4 Oct 2018 23:00:27 +0200

x86/vdso: Move cycle_last handling into the caller

Dereferencing gtod->cycle_last all over the place and foing the cycles <
last comparison in the vclock read functions generates horrible code. Doing
it at the call site is much better and gains a few cycles both for TSC and
pvclock.

Caveat: This adds the comparison to the hyperv vclock as well, but I have
no way to test that.

Signed-off-by: Thomas Gleixner <t...@linutronix.de>
Acked-by: Andy Lutomirski <l...@kernel.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Matt Rickard <m...@softrans.com.au>
Cc: Stephen Boyd <sb...@kernel.org>
Cc: John Stultz <john.stu...@linaro.org>
Cc: Florian Weimer <fwei...@redhat.com>
Cc: "K. Y. Srinivasan" <k...@microsoft.com>
Cc: Vitaly Kuznetsov <vkuzn...@redhat.com>
Cc: de...@linuxdriverproject.org
Cc: virtualizat...@lists.linux-foundation.org
Cc: Paolo Bonzini <pbonz...@redhat.com>
Cc: Arnd Bergmann <a...@arndb.de>
Cc: Juergen Gross <jgr...@suse.com>
Link: https://lkml.kernel.org/r/20180917130707.741440...@linutronix.de

---
 arch/x86/entry/vdso/vclock_gettime.c | 39 +++++++-----------------------------
 1 file changed, 7 insertions(+), 32 deletions(-)

diff --git a/arch/x86/entry/vdso/vclock_gettime.c 
b/arch/x86/entry/vdso/vclock_gettime.c
index 40105024a210..b7ccbff26a3f 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -77,9 +77,8 @@ static notrace const struct pvclock_vsyscall_time_info 
*get_pvti0(void)
 static notrace u64 vread_pvclock(void)
 {
        const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti;
-       u64 ret;
-       u64 last;
        u32 version;
+       u64 ret;
 
        /*
         * Note: The kernel and hypervisor must guarantee that cpu ID
@@ -112,13 +111,7 @@ static notrace u64 vread_pvclock(void)
                ret = __pvclock_read_cycles(pvti, rdtsc_ordered());
        } while (pvclock_read_retry(pvti, version));
 
-       /* refer to vread_tsc() comment for rationale */
-       last = gtod->cycle_last;
-
-       if (likely(ret >= last))
-               return ret;
-
-       return last;
+       return ret;
 }
 #endif
 #ifdef CONFIG_HYPERV_TSCPAGE
@@ -131,30 +124,10 @@ static notrace u64 vread_hvclock(void)
 }
 #endif
 
-notrace static u64 vread_tsc(void)
-{
-       u64 ret = (u64)rdtsc_ordered();
-       u64 last = gtod->cycle_last;
-
-       if (likely(ret >= last))
-               return ret;
-
-       /*
-        * GCC likes to generate cmov here, but this branch is extremely
-        * predictable (it's just a function of time and the likely is
-        * very likely) and there's a data dependence, so force GCC
-        * to generate a branch instead.  I don't barrier() because
-        * we don't actually need a barrier, and if this function
-        * ever gets inlined it will generate worse code.
-        */
-       asm volatile ("");
-       return last;
-}
-
 notrace static inline u64 vgetcyc(int mode)
 {
        if (mode == VCLOCK_TSC)
-               return vread_tsc();
+               return (u64)rdtsc_ordered();
 #ifdef CONFIG_PARAVIRT_CLOCK
        else if (mode == VCLOCK_PVCLOCK)
                return vread_pvclock();
@@ -169,17 +142,19 @@ notrace static inline u64 vgetcyc(int mode)
 notrace static int do_hres(clockid_t clk, struct timespec *ts)
 {
        struct vgtod_ts *base = &gtod->basetime[clk];
+       u64 cycles, last, ns;
        unsigned int seq;
-       u64 cycles, ns;
 
        do {
                seq = gtod_read_begin(gtod);
                ts->tv_sec = base->sec;
                ns = base->nsec;
+               last = gtod->cycle_last;
                cycles = vgetcyc(gtod->vclock_mode);
                if (unlikely((s64)cycles < 0))
                        return vdso_fallback_gettime(clk, ts);
-               ns += (cycles - gtod->cycle_last) * gtod->mult;
+               if (cycles > last)
+                       ns += (cycles - last) * gtod->mult;
                ns >>= gtod->shift;
        } while (unlikely(gtod_read_retry(gtod, seq)));
 

Reply via email to