We've always masked off the top 32 bits when x32 is enabled, but
hopefully no-one relies on that.  Now that the slow path is in C, we
check all the bits there, regardless of whether x32 is enabled.  Let's
make the fast path consistent with it.

Signed-off-by: Ben Hutchings <b...@decadent.org.uk>
Cc: Andy Lutomirski <l...@kernel.org>
---
 arch/x86/entry/entry_64.S | 8 +++-----
 1 file changed, 3 insertions(+), 5 deletions(-)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 858b555e274b..17ba2ca9b24d 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -190,12 +190,10 @@ entry_SYSCALL_64_fastpath:
         */
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS(CLBR_NONE)
-#if __SYSCALL_MASK == ~0
-       cmpq    $__NR_syscall_max, %rax
-#else
-       andl    $__SYSCALL_MASK, %eax
-       cmpl    $__NR_syscall_max, %eax
+#if __SYSCALL_MASK != ~0
+       andq    $__SYSCALL_MASK, %rax
 #endif
+       cmpq    $__NR_syscall_max, %rax
        ja      1f                              /* return -ENOSYS (already in 
pt_regs->ax) */
        movq    %r10, %rcx
 

Attachment: signature.asc
Description: Digital signature

Reply via email to