Commit-ID:  2140a9942b84dd4bf559dd1215b8f43c36ece5b5
Gitweb:     http://git.kernel.org/tip/2140a9942b84dd4bf559dd1215b8f43c36ece5b5
Author:     Jan Beulich <jbeul...@suse.com>
AuthorDate: Fri, 3 Feb 2017 02:03:25 -0700
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Wed, 1 Mar 2017 10:16:35 +0100

x86/entry/64: Relax pvops stub clobber specifications

Except for the error_exit case, none of the code paths following the
{DIS,EN}ABLE_INTERRUPTS() invocations being modified here make any
assumptions on register values, so all registers can be clobbered
there. In the error_exit case a minor adjustment to register usage
(at once eliminating an instruction) also allows for this to be true.

Signed-off-by: Jan Beulich <jbeul...@suse.com>
Cc: Andy Lutomirski <l...@kernel.org>
Cc: Borislav Petkov <b...@alien8.de>
Cc: Brian Gerst <brge...@gmail.com>
Cc: Denys Vlasenko <dvlas...@redhat.com>
Cc: H. Peter Anvin <h...@zytor.com>
Cc: Josh Poimboeuf <jpoim...@redhat.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Link: http://lkml.kernel.org/r/5894556d0200007800136...@prv-mh.provo.novell.com
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 arch/x86/entry/entry_64.S | 15 +++++++--------
 1 file changed, 7 insertions(+), 8 deletions(-)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 044d18e..d2b2a2948 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -212,7 +212,7 @@ entry_SYSCALL_64_fastpath:
         * If we see that no exit work is required (which we are required
         * to check with IRQs off), then we can go straight to SYSRET64.
         */
-       DISABLE_INTERRUPTS(CLBR_NONE)
+       DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
        movq    PER_CPU_VAR(current_task), %r11
        testl   $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11)
@@ -233,7 +233,7 @@ entry_SYSCALL_64_fastpath:
         * raise(3) will trigger this, for example.  IRQs are off.
         */
        TRACE_IRQS_ON
-       ENABLE_INTERRUPTS(CLBR_NONE)
+       ENABLE_INTERRUPTS(CLBR_ANY)
        SAVE_EXTRA_REGS
        movq    %rsp, %rdi
        call    syscall_return_slowpath /* returns with IRQs disabled */
@@ -343,7 +343,7 @@ ENTRY(stub_ptregs_64)
         * Called from fast path -- disable IRQs again, pop return address
         * and jump to slow path
         */
-       DISABLE_INTERRUPTS(CLBR_NONE)
+       DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
        popq    %rax
        jmp     entry_SYSCALL64_slow_path
@@ -518,7 +518,7 @@ common_interrupt:
        interrupt do_IRQ
        /* 0(%rsp): old RSP */
 ret_from_intr:
-       DISABLE_INTERRUPTS(CLBR_NONE)
+       DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
        decl    PER_CPU_VAR(irq_count)
 
@@ -1051,7 +1051,7 @@ END(paranoid_entry)
  * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
  */
 ENTRY(paranoid_exit)
-       DISABLE_INTERRUPTS(CLBR_NONE)
+       DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF_DEBUG
        testl   %ebx, %ebx                      /* swapgs needed? */
        jnz     paranoid_exit_no_swapgs
@@ -1156,10 +1156,9 @@ END(error_entry)
  *   0: user gsbase is loaded, we need SWAPGS and standard preparation for 
return to usermode
  */
 ENTRY(error_exit)
-       movl    %ebx, %eax
-       DISABLE_INTERRUPTS(CLBR_NONE)
+       DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
-       testl   %eax, %eax
+       testl   %ebx, %ebx
        jnz     retint_kernel
        jmp     retint_user
 END(error_exit)

Reply via email to