From: Joerg Roedel <jroe...@suse.de>

Add unconditional cr3 switches between user and kernel cr3
to all non-NMI entry and exit points.

Signed-off-by: Joerg Roedel <jroe...@suse.de>
---
 arch/x86/entry/entry_32.S | 83 ++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 79 insertions(+), 4 deletions(-)

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 9e06431..311aefa 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -154,6 +154,33 @@
 
 #endif /* CONFIG_X86_32_LAZY_GS */
 
+/* Unconditionally switch to user cr3 */
+.macro SWITCH_TO_USER_CR3 scratch_reg:req
+       ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+
+       movl    %cr3, \scratch_reg
+       orl     $PTI_SWITCH_MASK, \scratch_reg
+       movl    \scratch_reg, %cr3
+.Lend_\@:
+.endm
+
+/*
+ * Switch to kernel cr3 if not already loaded and return current cr3 in
+ * \scratch_reg
+ */
+.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
+       ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+       movl    %cr3, \scratch_reg
+       /* Test if we are already on kernel CR3 */
+       testl   $PTI_SWITCH_MASK, \scratch_reg
+       jz      .Lend_\@
+       andl    $(~PTI_SWITCH_MASK), \scratch_reg
+       movl    \scratch_reg, %cr3
+       /* Return original CR3 in \scratch_reg */
+       orl     $PTI_SWITCH_MASK, \scratch_reg
+.Lend_\@:
+.endm
+
 .macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0
        cld
        /* Push segment registers and %eax */
@@ -288,7 +315,6 @@
 #endif /* CONFIG_X86_ESPFIX32 */
 .endm
 
-
 /*
  * Called with pt_regs fully populated and kernel segments loaded,
  * so we can access PER_CPU and use the integer registers.
@@ -301,11 +327,19 @@
  */
 
 #define CS_FROM_ENTRY_STACK    (1 << 31)
+#define CS_FROM_USER_CR3       (1 << 30)
 
 .macro SWITCH_TO_KERNEL_STACK
 
        ALTERNATIVE     "", "jmp .Lend_\@", X86_FEATURE_XENPV
 
+       SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
+
+       /*
+        * %eax now contains the entry cr3 and we carry it forward in
+        * that register for the time this macro runs
+        */
+
        /* Are we on the entry stack? Bail out if not! */
        movl    PER_CPU_VAR(cpu_entry_area), %edi
        addl    $CPU_ENTRY_AREA_entry_stack, %edi
@@ -374,7 +408,8 @@
         * but switch back to the entry-stack again when we approach
         * iret and return to the interrupted code-path. This usually
         * happens when we hit an exception while restoring user-space
-        * segment registers on the way back to user-space.
+        * segment registers on the way back to user-space or when the
+        * sysenter handler runs with eflags.tf set.
         *
         * When we switch to the task-stack here, we can't trust the
         * contents of the entry-stack anymore, as the exception handler
@@ -391,6 +426,7 @@
         *
         * %esi: Entry-Stack pointer (same as %esp)
         * %edi: Top of the task stack
+        * %eax: CR3 on kernel entry
         */
 
        /* Calculate number of bytes on the entry stack in %ecx */
@@ -407,6 +443,14 @@
        orl     $CS_FROM_ENTRY_STACK, PT_CS(%esp)
 
        /*
+        * Test the cr3 used to enter the kernel and add a marker
+        * so that we can switch back to it before iret.
+        */
+       testl   $PTI_SWITCH_MASK, %eax
+       jz      .Lcopy_pt_regs_\@
+       orl     $CS_FROM_USER_CR3, PT_CS(%esp)
+
+       /*
         * %esi and %edi are unchanged, %ecx contains the number of
         * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate
         * the stack-frame on task-stack and copy everything over
@@ -472,7 +516,7 @@
 
 /*
  * This macro handles the case when we return to kernel-mode on the iret
- * path and have to switch back to the entry stack.
+ * path and have to switch back to the entry stack and/or user-cr3
  *
  * See the comments below the .Lentry_from_kernel_\@ label in the
  * SWITCH_TO_KERNEL_STACK macro for more details.
@@ -518,6 +562,18 @@
        /* Safe to switch to entry-stack now */
        movl    %ebx, %esp
 
+       /*
+        * We came from entry-stack and need to check if we also need to
+        * switch back to user cr3.
+        */
+       testl   $CS_FROM_USER_CR3, PT_CS(%esp)
+       jz      .Lend_\@
+
+       /* Clear marker from stack-frame */
+       andl    $(~CS_FROM_USER_CR3), PT_CS(%esp)
+
+       SWITCH_TO_USER_CR3 scratch_reg=%eax
+
 .Lend_\@:
 .endm
 /*
@@ -711,6 +767,18 @@ ENTRY(xen_sysenter_target)
  * 0(%ebp) arg6
  */
 ENTRY(entry_SYSENTER_32)
+       /*
+        * On entry-stack with all userspace-regs live - save and
+        * restore eflags and %eax to use it as scratch-reg for the cr3
+        * switch.
+        */
+       pushfl
+       pushl   %eax
+       SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
+       popl    %eax
+       popfl
+
+       /* Stack empty again, switch to task stack */
        movl    TSS_entry_stack(%esp), %esp
 
 .Lsysenter_past_esp:
@@ -791,6 +859,9 @@ ENTRY(entry_SYSENTER_32)
        /* Switch to entry stack */
        movl    %eax, %esp
 
+       /* Now ready to switch the cr3 */
+       SWITCH_TO_USER_CR3 scratch_reg=%eax
+
        /*
         * Restore all flags except IF. (We restore IF separately because
         * STI gives a one-instruction window in which we won't be interrupted,
@@ -871,7 +942,11 @@ restore_all:
 .Lrestore_all_notrace:
        CHECK_AND_APPLY_ESPFIX
 .Lrestore_nocheck:
-       RESTORE_REGS 4                          # skip orig_eax/error_code
+       /* Switch back to user CR3 */
+       SWITCH_TO_USER_CR3 scratch_reg=%eax
+
+       /* Restore user state */
+       RESTORE_REGS pop=4                      # skip orig_eax/error_code
 .Lirq_return:
        /*
         * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
-- 
2.7.4

Reply via email to