native_load_sp0 relies on this.  I'm not sure why we haven't seen
reports of crashes.  Maybe no one tests new kernels on non-SEP CPUs.

Signed-off-by: Andy Lutomirski <l...@kernel.org>
---

Of course, I haven't tested on a non-SEP CPU either.  Where would I
find one?

arch/x86/kernel/cpu/common.c | 19 +++++++++++--------
 1 file changed, 11 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index d27ab6e5e671..2181692c1eb3 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -993,6 +993,17 @@ void enable_sep_cpu(void)
        cpu = get_cpu();
        tss = &per_cpu(cpu_tss, cpu);
 
+#ifdef CONFIG_X86_32
+       /*
+        * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1
+        * field -- see the big comment in struct x86_hw_tss's
+        * definition.  We need to initialize this even on non-SEP
+        * CPUs so that comparing ss1 to thread->sysenter_cs is
+        * always valid.
+        */
+       tss->x86_tss.ss1 = __KERNEL_CS;
+#endif
+
        /*
         * On 64-bit CPUs, enable SEP unconditionally.  On Intel CPUs,
         * it works and we use it.  On AMD CPUs, the MSRs exist but EIP
@@ -1005,14 +1016,6 @@ void enable_sep_cpu(void)
        if (IS_ENABLED(CONFIG_X86_32) && !boot_cpu_has(X86_FEATURE_SEP))
                goto out;
 
-#ifdef CONFIG_X86_32
-       /*
-        * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
-        * see the big comment in struct x86_hw_tss's definition.
-        */
-       tss->x86_tss.ss1 = __KERNEL_CS;
-#endif
-
        wrmsrl_safe(MSR_IA32_SYSENTER_CS, __KERNEL_CS);
        wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
                    (unsigned long)tss +
-- 
2.4.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to