x86 stack is expected to be 16-byte aligned to allow for instructions
like movaps that involve xmm operands to directly use the stack.

However the 16-byte alignment is what's expected at startup time.
All later functions will have the stack misaligned by a pointer size's
worth because call pushes the return address to the call stack.

Add the missing (mis)alignment. This fixes a segmentation fault observed
using initjmp on x86_64.

Signed-off-by: Ahmad Fatoum <a.fat...@pengutronix.de>
---
 arch/x86/lib/setjmp_32.S | 4 +++-
 arch/x86/lib/setjmp_64.S | 2 ++
 2 files changed, 5 insertions(+), 1 deletion(-)

diff --git a/arch/x86/lib/setjmp_32.S b/arch/x86/lib/setjmp_32.S
index 38dcb68c1b59..30db5f989af6 100644
--- a/arch/x86/lib/setjmp_32.S
+++ b/arch/x86/lib/setjmp_32.S
@@ -8,6 +8,7 @@
 #define _REGPARM
 
 #include <linux/linkage.h>
+#include <asm-generic/pointer.h>
 
 .text
 .align 8
@@ -53,7 +54,8 @@ ENDPROC(longjmp)
 ENTRY(initjmp)
 
        movl %edx, 20(%eax)     /* Return address */
-       movl %ecx, 4(%eax)      /* Post-return %esp! */
+       sub $ASM_SZPTR, %ecx    /* ESP - 4 has to be 16-byte aligned on entry */
+       movl %ecx, 4(%eax)      /* Stack top */
        xorl %eax, %eax         /* Return value */
        ret
 
diff --git a/arch/x86/lib/setjmp_64.S b/arch/x86/lib/setjmp_64.S
index 28ea576cd22e..d5cf99a1557f 100644
--- a/arch/x86/lib/setjmp_64.S
+++ b/arch/x86/lib/setjmp_64.S
@@ -6,6 +6,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm-generic/pointer.h>
 
 .text
 .align 8
@@ -53,6 +54,7 @@ ENDPROC(longjmp)
 ENTRY(initjmp)
 
        movq    %rsi, (%rdi)    /* Return address */
+       sub     $ASM_SZPTR, %rdx        /* RSP - 8 has to be 16-byte aligned on 
entry */
        movq    %rdx, 8(%rdi)   /* Stack top */
        xorq    %rax, %rax
        ret
-- 
2.39.2


Reply via email to