Commit-ID: b20112edeadf0b8a1416de061caa4beb11539902 Gitweb: http://git.kernel.org/tip/b20112edeadf0b8a1416de061caa4beb11539902 Author: Adrian Hunter <adrian.hun...@intel.com> AuthorDate: Fri, 21 Aug 2015 12:05:18 +0300 Committer: Ingo Molnar <mi...@kernel.org> CommitDate: Sun, 13 Sep 2015 11:27:20 +0200
perf/x86: Improve accuracy of perf/sched clock When TSC is stable perf/sched clock is based on it. However the conversion from cycles to nanoseconds is not as accurate as it could be. Because CYC2NS_SCALE_FACTOR is 10, the accuracy is +/- 1/2048 The change is to calculate the maximum shift that results in a multiplier that is still a 32-bit number. For example all frequencies over 1 GHz will have a shift of 32, making the accuracy of the conversion +/- 1/(2^33). That is achieved by using the 'clocks_calc_mult_shift()' function. Signed-off-by: Adrian Hunter <adrian.hun...@intel.com> Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org> Cc: Andy Lutomirski <l...@amacapital.net> Cc: Arnaldo Carvalho de Melo <a...@kernel.org> Cc: Arnaldo Carvalho de Melo <a...@redhat.com> Cc: Jiri Olsa <jo...@redhat.com> Cc: Linus Torvalds <torva...@linux-foundation.org> Cc: Peter Zijlstra <pet...@infradead.org> Cc: Stephane Eranian <eran...@google.com> Cc: Thomas Gleixner <t...@linutronix.de> Cc: Vince Weaver <vincent.wea...@maine.edu> Link: http://lkml.kernel.org/r/1440147918-22250-1-git-send-email-adrian.hun...@intel.com Signed-off-by: Ingo Molnar <mi...@kernel.org> --- arch/x86/kernel/tsc.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index c8d52cb..39ec3d0 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -167,21 +167,20 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data) * ns = cycles * cyc2ns_scale / SC * * And since SC is a constant power of two, we can convert the div - * into a shift. + * into a shift. The larger SC is, the more accurate the conversion, but + * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication + * (64-bit result) can be used. * - * We can use khz divisor instead of mhz to keep a better precision, since - * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. + * We can use khz divisor instead of mhz to keep a better precision. * (mathieu.desnoy...@polymtl.ca) * * -johns...@us.ibm.com "math is hard, lets go shopping!" */ -#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ - static void cyc2ns_data_init(struct cyc2ns_data *data) { data->cyc2ns_mul = 0; - data->cyc2ns_shift = CYC2NS_SCALE_FACTOR; + data->cyc2ns_shift = 0; data->cyc2ns_offset = 0; data->__count = 0; } @@ -215,14 +214,14 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc) if (likely(data == tail)) { ns = data->cyc2ns_offset; - ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); + ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift); } else { data->__count++; barrier(); ns = data->cyc2ns_offset; - ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); + ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift); barrier(); @@ -256,12 +255,11 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) * time function is continuous; see the comment near struct * cyc2ns_data. */ - data->cyc2ns_mul = - DIV_ROUND_CLOSEST(NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR, - cpu_khz); - data->cyc2ns_shift = CYC2NS_SCALE_FACTOR; + clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, cpu_khz, + NSEC_PER_MSEC, 0); + data->cyc2ns_offset = ns_now - - mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); + mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, data->cyc2ns_shift); cyc2ns_write_end(cpu, data); -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/