Replace a couple of instances in the x86_64 entry code where the
absolute address of a symbol is taken in a manner that is not supported
when linking in PIE mode, and use RIP-relative references instead, which
don't require boot-time fixups at all.

Signed-off-by: Ard Biesheuvel <[email protected]>
---
 arch/x86/entry/calling.h  |  9 +++++----
 arch/x86/entry/entry_64.S | 14 +++++++-------
 2 files changed, 12 insertions(+), 11 deletions(-)

diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 77e2d920a640..a37b402432a3 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -376,8 +376,8 @@ For 32-bit we have the following conventions - kernel is 
built with
 .endm
 
 .macro SAVE_AND_SET_GSBASE scratch_reg:req save_reg:req
+       GET_PERCPU_BASE \scratch_reg \save_reg
        rdgsbase \save_reg
-       GET_PERCPU_BASE \scratch_reg
        wrgsbase \scratch_reg
 .endm
 
@@ -413,15 +413,16 @@ For 32-bit we have the following conventions - kernel is 
built with
  * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives
  * while running KVM's run loop.
  */
-.macro GET_PERCPU_BASE reg:req
+.macro GET_PERCPU_BASE reg:req scratch:req
        LOAD_CPU_AND_NODE_SEG_LIMIT \reg
        andq    $VDSO_CPUNODE_MASK, \reg
-       movq    __per_cpu_offset(, \reg, 8), \reg
+       leaq    __per_cpu_offset(%rip), \scratch
+       movq    (\scratch, \reg, 8), \reg
 .endm
 
 #else
 
-.macro GET_PERCPU_BASE reg:req
+.macro GET_PERCPU_BASE reg:req scratch:req
        movq    pcpu_unit_offsets(%rip), \reg
 .endm
 
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index f9983a1907bf..77584f5ebb4b 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1040,7 +1040,8 @@ SYM_CODE_START(error_entry)
        movl    %ecx, %eax                      /* zero extend */
        cmpq    %rax, RIP+8(%rsp)
        je      .Lbstep_iret
-       cmpq    $.Lgs_change, RIP+8(%rsp)
+       leaq    .Lgs_change(%rip), %rcx
+       cmpq    %rcx, RIP+8(%rsp)
        jne     .Lerror_entry_done_lfence
 
        /*
@@ -1252,10 +1253,10 @@ SYM_CODE_START(asm_exc_nmi)
         * the outer NMI.
         */
 
-       movq    $repeat_nmi, %rdx
+       leaq    repeat_nmi(%rip), %rdx
        cmpq    8(%rsp), %rdx
        ja      1f
-       movq    $end_repeat_nmi, %rdx
+       leaq    end_repeat_nmi(%rip), %rdx
        cmpq    8(%rsp), %rdx
        ja      nested_nmi_out
 1:
@@ -1309,7 +1310,8 @@ nested_nmi:
        pushq   %rdx
        pushfq
        pushq   $__KERNEL_CS
-       pushq   $repeat_nmi
+       leaq    repeat_nmi(%rip), %rdx
+       pushq   %rdx
 
        /* Put stack back */
        addq    $(6*8), %rsp
@@ -1348,10 +1350,8 @@ first_nmi:
        addq    $8, (%rsp)      /* Fix up RSP */
        pushfq                  /* RFLAGS */
        pushq   $__KERNEL_CS    /* CS */
-       pushq   $1f             /* RIP */
-       iretq                   /* continues at repeat_nmi below */
+       call    native_irq_return_iret
        UNWIND_HINT_IRET_REGS
-1:
 #endif
 
 repeat_nmi:
-- 
2.47.3


Reply via email to