This code is timing 100k indirect calls, so the added overhead of
counting the number of cycles elapsed as a 64-bit number should be
insignificant.  Drop the optimization of using a 32-bit count.

Signed-off-by: Andy Lutomirski <l...@kernel.org>
---
 arch/x86/kernel/cpu/amd.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 5bd3a99dc20b..c5ceec532799 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -107,7 +107,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
                const int K6_BUG_LOOP = 1000000;
                int n;
                void (*f_vide)(void);
-               unsigned long d, d2;
+               u64 d, d2;
 
                printk(KERN_INFO "AMD K6 stepping B detected - ");
 
@@ -118,10 +118,10 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
 
                n = K6_BUG_LOOP;
                f_vide = vide;
-               rdtscl(d);
+               d = native_read_tsc();
                while (n--)
                        f_vide();
-               rdtscl(d2);
+               d2 = native_read_tsc();
                d = d2-d;
 
                if (d > 20*K6_BUG_LOOP)
-- 
2.4.2

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to