After TESTs, use logically correct JZ mnemonic instead of JE (this doesn't change code).
Tidy up CMPW insns: Modern CPUs are not good with 16-bit operations. The instructions with 16-bit immediates are especially bad, on many CPUs they cause length changing prefix stall in the decoders, costing ~6 cycles to recover. Replace CMPWs with CMPLs. Of these, for form with 8-bit sign-extended immediates it is a win because they are smaller now (no 0x66 prefix anymore); ones with 16-bit immediates are faster. Signed-off-by: Denys Vlasenko <dvlas...@redhat.com> CC: Linus Torvalds <torva...@linux-foundation.org> CC: Steven Rostedt <rost...@goodmis.org> CC: Ingo Molnar <mi...@kernel.org> CC: Borislav Petkov <b...@alien8.de> CC: "H. Peter Anvin" <h...@zytor.com> CC: Andy Lutomirski <l...@amacapital.net> CC: Oleg Nesterov <o...@redhat.com> CC: Frederic Weisbecker <fweis...@gmail.com> CC: Alexei Starovoitov <a...@plumgrid.com> CC: Will Drewry <w...@chromium.org> CC: Kees Cook <keesc...@chromium.org> CC: x...@kernel.org CC: linux-kernel@vger.kernel.org --- arch/x86/kernel/entry_32.S | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 4c8cc34..9a31d5e 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -432,7 +432,7 @@ sysenter_after_call: TRACE_IRQS_OFF movl TI_flags(%ebp), %ecx testl $_TIF_ALLWORK_MASK, %ecx - jne sysexit_audit + jnz sysexit_audit sysenter_exit: /* if something modifies registers it must also disable sysexit */ movl PT_EIP(%esp), %edx @@ -460,7 +460,7 @@ sysenter_audit: sysexit_audit: testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx - jne syscall_exit_work + jnz syscall_exit_work TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_ANY) movl %eax,%edx /* second arg, syscall return value */ @@ -472,7 +472,7 @@ sysexit_audit: TRACE_IRQS_OFF movl TI_flags(%ebp), %ecx testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx - jne syscall_exit_work + jnz syscall_exit_work movl PT_EAX(%esp),%eax /* reload syscall return value */ jmp sysenter_exit #endif @@ -510,7 +510,7 @@ syscall_exit: TRACE_IRQS_OFF movl TI_flags(%ebp), %ecx testl $_TIF_ALLWORK_MASK, %ecx # current->work - jne syscall_exit_work + jnz syscall_exit_work restore_all: TRACE_IRQS_IRET @@ -612,7 +612,7 @@ work_notifysig: # deal with pending signals and #ifdef CONFIG_VM86 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) movl %esp, %eax - jne work_notifysig_v86 # returning to kernel-space or + jnz work_notifysig_v86 # returning to kernel-space or # vm86-space 1: #else @@ -708,7 +708,7 @@ END(sysenter_badsys) #ifdef CONFIG_X86_ESPFIX32 movl %ss, %eax /* see if on espfix stack */ - cmpw $__ESPFIX_SS, %ax + cmpl $__ESPFIX_SS, %eax jne 27f movl $__KERNEL_DS, %eax movl %eax, %ds @@ -1275,7 +1275,7 @@ END(page_fault) * the instruction that would have done it for sysenter. */ .macro FIX_STACK offset ok label - cmpw $__KERNEL_CS, 4(%esp) + cmpl $__KERNEL_CS, 4(%esp) jne \ok \label: movl TSS_sysenter_sp0 + \offset(%esp), %esp @@ -1318,7 +1318,7 @@ ENTRY(nmi) #ifdef CONFIG_X86_ESPFIX32 pushl_cfi %eax movl %ss, %eax - cmpw $__ESPFIX_SS, %ax + cmpl $__ESPFIX_SS, %eax popl_cfi %eax je nmi_espfix_stack #endif @@ -1352,7 +1352,7 @@ nmi_stack_fixup: nmi_debug_stack_check: /* We have a RING0_INT_FRAME here */ - cmpw $__KERNEL_CS,16(%esp) + cmpl $__KERNEL_CS,16(%esp) jne nmi_stack_correct cmpl $debug,(%esp) jb nmi_stack_correct -- 1.8.1.4 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/