On Sat, Nov 19, 2016 at 03:37:30PM -0800, Andy Lutomirski wrote:
> Linux will have all kinds of sporadic problems on systems that don't
> have the CPUID instruction unless CONFIG_M486=y.  In particular,
> sync_core() will explode.

Btw, I think we should do something like this, in addition:

---
diff --git a/arch/x86/include/asm/cpufeatures.h 
b/arch/x86/include/asm/cpufeatures.h
index 50425dd7e113..ee9de769941f 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -105,6 +105,7 @@
 #define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
 #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
 #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state 
*/
+#define X86_FEATURE_CPUID      ( 3*32+31) /* CPU has CPUID instruction itself 
*/
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
 #define X86_FEATURE_XMM3       ( 4*32+ 0) /* "pni" SSE-3 */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 1f6a92903b09..63aa4842c0ae 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -602,34 +602,21 @@ static __always_inline void cpu_relax(void)
        rep_nop();
 }
 
-/* Stop speculative execution and prefetching of modified code. */
+/*
+ * Stop speculative execution and prefetching of modified code. CPUID is a
+ * barrier to speculative execution. Prefetched instructions are automatically
+ * invalidated when modified.
+ */
 static inline void sync_core(void)
 {
        int tmp;
 
-#ifdef CONFIG_M486
-       /*
-        * Do a CPUID if available, otherwise do a jump.  The jump
-        * can conveniently enough be the jump around CPUID.
-        */
-       asm volatile("cmpl %2,%1\n\t"
-                    "jl 1f\n\t"
-                    "cpuid\n"
-                    "1:"
-                    : "=a" (tmp)
-                    : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1)
-                    : "ebx", "ecx", "edx", "memory");
-#else
-       /*
-        * CPUID is a barrier to speculative execution.
-        * Prefetched instructions are automatically
-        * invalidated when modified.
-        */
-       asm volatile("cpuid"
-                    : "=a" (tmp)
-                    : "0" (1)
-                    : "ebx", "ecx", "edx", "memory");
-#endif
+       /* Do a CPUID if available, otherwise do a forward jump. */
+       alternative_io("jmp 1f\n\t1:", "cpuid",
+                       X86_FEATURE_CPUID,
+                       "=a" (tmp),
+                       "0" (1)
+                       : "ebx", "ecx", "edx", "memory");
 }
 
 extern void select_idle_routine(const struct cpuinfo_x86 *c);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 90c007447507..8dcdcdeec569 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -800,14 +800,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 
*c)
        memset(&c->x86_capability, 0, sizeof c->x86_capability);
        c->extended_cpuid_level = 0;
 
-       if (!have_cpuid_p())
-               identify_cpu_without_cpuid(c);
-
        /* cyrix could have cpuid enabled via c_identify()*/
        if (have_cpuid_p()) {
                cpu_detect(c);
                get_cpu_vendor(c);
                get_cpu_cap(c);
+               setup_force_cpu_cap(X86_FEATURE_CPUID);
 
                if (this_cpu->c_early_init)
                        this_cpu->c_early_init(c);
@@ -817,6 +815,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
 
                if (this_cpu->c_bsp_init)
                        this_cpu->c_bsp_init(c);
+        } else {
+               identify_cpu_without_cpuid(c);
+               setup_clear_cpu_cap(X86_FEATURE_CPUID);
        }
 
        setup_force_cpu_cap(X86_FEATURE_ALWAYS);

-- 
Regards/Gruss,
    Boris.

Good mailing practices for 400: avoid top-posting and trim the reply.

Reply via email to