Commit-ID:  0d2eb73b29996684d5bbb72f85c74b47b4c359f7
Gitweb:     https://git.kernel.org/tip/0d2eb73b29996684d5bbb72f85c74b47b4c359f7
Author:     Joerg Roedel <jroe...@suse.de>
AuthorDate: Wed, 18 Jul 2018 11:40:43 +0200
Committer:  Thomas Gleixner <t...@linutronix.de>
CommitDate: Fri, 20 Jul 2018 01:11:37 +0200

x86/entry/32: Split off return-to-kernel path

Use a separate return path when returning to the kernel.

This allows to put the PTI cr3-switch and the switch to the entry-stack
into the return-to-user path without further checking.

Signed-off-by: Joerg Roedel <jroe...@suse.de>
Signed-off-by: Thomas Gleixner <t...@linutronix.de>
Tested-by: Pavel Machek <pa...@ucw.cz>
Cc: "H . Peter Anvin" <h...@zytor.com>
Cc: linux...@kvack.org
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Andy Lutomirski <l...@kernel.org>
Cc: Dave Hansen <dave.han...@intel.com>
Cc: Josh Poimboeuf <jpoim...@redhat.com>
Cc: Juergen Gross <jgr...@suse.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Borislav Petkov <b...@alien8.de>
Cc: Jiri Kosina <jkos...@suse.cz>
Cc: Boris Ostrovsky <boris.ostrov...@oracle.com>
Cc: Brian Gerst <brge...@gmail.com>
Cc: David Laight <david.lai...@aculab.com>
Cc: Denys Vlasenko <dvlas...@redhat.com>
Cc: Eduardo Valentin <edu...@amazon.com>
Cc: Greg KH <gre...@linuxfoundation.org>
Cc: Will Deacon <will.dea...@arm.com>
Cc: aligu...@amazon.com
Cc: daniel.gr...@iaik.tugraz.at
Cc: hu...@google.com
Cc: keesc...@google.com
Cc: Andrea Arcangeli <aarca...@redhat.com>
Cc: Waiman Long <ll...@redhat.com>
Cc: "David H . Gutteridge" <dhgutteri...@sympatico.ca>
Cc: j...@8bytes.org
Link: 
https://lkml.kernel.org/r/1531906876-13451-7-git-send-email-j...@8bytes.org

---
 arch/x86/entry/entry_32.S | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 43641310b6e3..7251c4f3e99e 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -65,7 +65,7 @@
 # define preempt_stop(clobbers)        DISABLE_INTERRUPTS(clobbers); 
TRACE_IRQS_OFF
 #else
 # define preempt_stop(clobbers)
-# define resume_kernel         restore_all
+# define resume_kernel         restore_all_kernel
 #endif
 
 .macro TRACE_IRQS_IRET
@@ -399,9 +399,9 @@ ENTRY(resume_kernel)
        DISABLE_INTERRUPTS(CLBR_ANY)
 .Lneed_resched:
        cmpl    $0, PER_CPU_VAR(__preempt_count)
-       jnz     restore_all
+       jnz     restore_all_kernel
        testl   $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception 
path) ?
-       jz      restore_all
+       jz      restore_all_kernel
        call    preempt_schedule_irq
        jmp     .Lneed_resched
 END(resume_kernel)
@@ -606,6 +606,11 @@ restore_all:
         */
        INTERRUPT_RETURN
 
+restore_all_kernel:
+       TRACE_IRQS_IRET
+       RESTORE_REGS 4
+       jmp     .Lirq_return
+
 .section .fixup, "ax"
 ENTRY(iret_exc )
        pushl   $0                              # no error code

Reply via email to