Replace absolute references in inline asm with RIP-relative ones, to
avoid the need for relocation fixups at boot time. This is a
prerequisite for PIE linking, which only permits 64-bit wide
loader-visible absolute references.

Signed-off-by: Ard Biesheuvel <[email protected]>
---
 arch/x86/kernel/kvm.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index df78ddee0abb..1a0335f328e1 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -807,8 +807,9 @@ extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
  * restoring to/from the stack.
  */
 #define PV_VCPU_PREEMPTED_ASM                                               \
- "movq   __per_cpu_offset(,%rdi,8), %rax\n\t"                               \
- "cmpb   $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax)\n\t" \
+ "0:leaq 0b(%rip), %rax\n\t"                                                \
+ "addq   __per_cpu_offset - 0b(%rax,%rdi,8), %rax\n\t"                      \
+ "cmpb   $0, " __stringify(KVM_STEAL_TIME_preempted) 
"+steal_time-0b(%rax)\n\t" \
  "setne  %al\n\t"
 
 DEFINE_ASM_FUNC(__raw_callee_save___kvm_vcpu_is_preempted,
-- 
2.47.3


Reply via email to