Use macros for bitness-insensitive register names, instead of
rolling our own.

Signed-off-by: Avi Kivity <a...@redhat.com>
---
 arch/x86/kvm/svm.c | 46 ++++++++++++++++++++--------------------------
 1 file changed, 20 insertions(+), 26 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 611c728..818fceb 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3782,12 +3782,6 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu)
        svm_complete_interrupts(svm);
 }
 
-#ifdef CONFIG_X86_64
-#define R "r"
-#else
-#define R "e"
-#endif
-
 static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -3814,13 +3808,13 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        local_irq_enable();
 
        asm volatile (
-               "push %%"R"bp; \n\t"
-               "mov %c[rbx](%[svm]), %%"R"bx \n\t"
-               "mov %c[rcx](%[svm]), %%"R"cx \n\t"
-               "mov %c[rdx](%[svm]), %%"R"dx \n\t"
-               "mov %c[rsi](%[svm]), %%"R"si \n\t"
-               "mov %c[rdi](%[svm]), %%"R"di \n\t"
-               "mov %c[rbp](%[svm]), %%"R"bp \n\t"
+               "push %%" _ASM_BP "; \n\t"
+               "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
+               "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
+               "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
+               "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
+               "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
+               "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
 #ifdef CONFIG_X86_64
                "mov %c[r8](%[svm]),  %%r8  \n\t"
                "mov %c[r9](%[svm]),  %%r9  \n\t"
@@ -3833,20 +3827,20 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 #endif
 
                /* Enter guest mode */
-               "push %%"R"ax \n\t"
-               "mov %c[vmcb](%[svm]), %%"R"ax \n\t"
+               "push %%" _ASM_AX " \n\t"
+               "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
                __ex(SVM_VMLOAD) "\n\t"
                __ex(SVM_VMRUN) "\n\t"
                __ex(SVM_VMSAVE) "\n\t"
-               "pop %%"R"ax \n\t"
+               "pop %%" _ASM_AX " \n\t"
 
                /* Save guest registers, load host registers */
-               "mov %%"R"bx, %c[rbx](%[svm]) \n\t"
-               "mov %%"R"cx, %c[rcx](%[svm]) \n\t"
-               "mov %%"R"dx, %c[rdx](%[svm]) \n\t"
-               "mov %%"R"si, %c[rsi](%[svm]) \n\t"
-               "mov %%"R"di, %c[rdi](%[svm]) \n\t"
-               "mov %%"R"bp, %c[rbp](%[svm]) \n\t"
+               "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
+               "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
+               "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
+               "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
+               "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
+               "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
 #ifdef CONFIG_X86_64
                "mov %%r8,  %c[r8](%[svm]) \n\t"
                "mov %%r9,  %c[r9](%[svm]) \n\t"
@@ -3857,7 +3851,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
                "mov %%r14, %c[r14](%[svm]) \n\t"
                "mov %%r15, %c[r15](%[svm]) \n\t"
 #endif
-               "pop %%"R"bp"
+               "pop %%" _ASM_BP
                :
                : [svm]"a"(svm),
                  [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
@@ -3878,9 +3872,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
                  [r15]"i"(offsetof(struct vcpu_svm, 
vcpu.arch.regs[VCPU_REGS_R15]))
 #endif
                : "cc", "memory"
-               , R"bx", R"cx", R"dx", R"si", R"di"
 #ifdef CONFIG_X86_64
+               , "rbx", "rcx", "rdx", "rsi", "rdi"
                , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
+#else
+               , "ebx", "ecx", "edx", "esi", "edi"
 #endif
                );
 
@@ -3940,8 +3936,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        mark_all_clean(svm->vmcb);
 }
 
-#undef R
-
 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
-- 
1.7.12

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to