There is one instruction boundary where any kind of interruption would
break the assumptions cr4_pv32_restore's debug mode checking makes on
the correlation between the CR4 register value and its in-memory cache.
Correct this (see the code comment) even in non-debug mode, or else
a subsequent cr4_pv32_restore would also be misguided into thinking the
features are enabled when they really aren't.
Signed-off-by: Jan Beulich
---
This will only apply on top of "x86: refine debugging of SMEP/SMAP
fix", despite being functionally independent.
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -183,8 +183,21 @@ ENTRY(compat_restore_all_guest)
jpe .Lcr4_alt_end
mov CPUINFO_cr4-CPUINFO_guest_cpu_user_regs(%rsp), %rax
and $~XEN_CR4_PV32_BITS, %rax
+1:
mov %rax, CPUINFO_cr4-CPUINFO_guest_cpu_user_regs(%rsp)
mov %rax, %cr4
+/*
+ * An NMI or MCE may have occurred between the previous two
+ * instructions, leaving register and cache in a state where
+ * the next exit from the guest would trigger the BUG in
+ * cr4_pv32_restore. If this happened, the cached value is no
+ * longer what we just set it to, which we can utilize to
+ * correct that state. Note that we do not have to fear this
+ * loop to cause a live lock: If NMIs/MCEs occurred at that
+ * high a rate, we'd be live locked anyway.
+ */
+cmp %rax, CPUINFO_cr4-CPUINFO_guest_cpu_user_regs(%rsp)
+jne 1b
.Lcr4_alt_end:
.section .altinstructions, "a"
altinstruction_entry .Lcr4_orig, .Lcr4_orig, X86_FEATURE_ALWAYS, 12, 0
x86: make SMEP/SMAP suppression tolerate NMI/MCE at the "wrong" time
There is one instruction boundary where any kind of interruption would
break the assumptions cr4_pv32_restore's debug mode checking makes on
the correlation between the CR4 register value and its in-memory cache.
Correct this (see the code comment) even in non-debug mode, or else
a subsequent cr4_pv32_restore would also be misguided into thinking the
features are enabled when they really aren't.
Signed-off-by: Jan Beulich
---
This will only apply on top of "x86: refine debugging of SMEP/SMAP
fix", despite being functionally independent.
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -183,8 +183,21 @@ ENTRY(compat_restore_all_guest)
jpe .Lcr4_alt_end
mov CPUINFO_cr4-CPUINFO_guest_cpu_user_regs(%rsp), %rax
and $~XEN_CR4_PV32_BITS, %rax
+1:
mov %rax, CPUINFO_cr4-CPUINFO_guest_cpu_user_regs(%rsp)
mov %rax, %cr4
+/*
+ * An NMI or MCE may have occurred between the previous two
+ * instructions, leaving register and cache in a state where
+ * the next exit from the guest would trigger the BUG in
+ * cr4_pv32_restore. If this happened, the cached value is no
+ * longer what we just set it to, which we can utilize to
+ * correct that state. Note that we do not have to fear this
+ * loop to cause a live lock: If NMIs/MCEs occurred at that
+ * high a rate, we'd be live locked anyway.
+ */
+cmp %rax, CPUINFO_cr4-CPUINFO_guest_cpu_user_regs(%rsp)
+jne 1b
.Lcr4_alt_end:
.section .altinstructions, "a"
altinstruction_entry .Lcr4_orig, .Lcr4_orig, X86_FEATURE_ALWAYS, 12, 0
___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel