On Mon, Oct 29, 2018 at 04:40:42PM +0100, Julian Stecklina wrote:
> The VM entry/exit path is a giant inline assembly statement. Simplify it
> by doing CR2 context switching in plain C. Move CR2 restore behind IBRS
> clearing, so we reduce the amount of code we execute with IBRS on.

I think it's worth documenting two things in the changelog:

  - Using {read,write}_cr2() means KVM will use pv_mmu_ops instead of
    open coding native_{read,write}_cr2().

  - The CR2 code has been done in assembly since KVM's genesis[1],
    which predates the addition of the paravirt ops[2], i.e. KVM isn't
    deliberately avoiding the paravirt ops.

The above info makes it trivially easy to review this patch from a
correctness standpoint.  With that:

Reviewed-by: Sean Christopherson <sean.j.christopher...@intel.com>


[1] Commit 6aa8b732ca01 ("[PATCH] kvm: userspace interface")
[2] Commit d3561b7fa0fb ("[PATCH] paravirt: header and stubs for 
paravirtualisation")
 
> Signed-off-by: Julian Stecklina <jstec...@amazon.de>
> Reviewed-by: Jan H. Schönherr <jscho...@amazon.de>
> Reviewed-by: Konrad Jan Miller <k...@amazon.de>
> Reviewed-by: Jim Mattson <jmatt...@google.com>
> ---
>  arch/x86/kvm/vmx.c | 15 +++++----------
>  1 file changed, 5 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index ccc6a01..a6e5a5c 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -11212,6 +11212,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu 
> *vcpu)
>       evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
>               (unsigned long)&current_evmcs->host_rsp : 0;
>  
> +     if (read_cr2() != vcpu->arch.cr2)
> +             write_cr2(vcpu->arch.cr2);
> +
>       if (static_branch_unlikely(&vmx_l1d_should_flush))
>               vmx_l1d_flush(vcpu);
>  
> @@ -11231,13 +11234,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu 
> *vcpu)
>               "2: \n\t"
>               __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
>               "1: \n\t"
> -             /* Reload cr2 if changed */
> -             "mov %c[cr2](%0), %%" _ASM_AX " \n\t"
> -             "mov %%cr2, %%" _ASM_DX " \n\t"
> -             "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
> -             "je 3f \n\t"
> -             "mov %%" _ASM_AX", %%cr2 \n\t"
> -             "3: \n\t"
>               /* Check if vmlaunch of vmresume is needed */
>               "cmpl $0, %c[launched](%0) \n\t"
>               /* Load guest registers.  Don't clobber flags. */
> @@ -11298,8 +11294,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu 
> *vcpu)
>               "xor %%r14d, %%r14d \n\t"
>               "xor %%r15d, %%r15d \n\t"
>  #endif
> -             "mov %%cr2, %%" _ASM_AX "   \n\t"
> -             "mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
>  
>               "xor %%eax, %%eax \n\t"
>               "xor %%ebx, %%ebx \n\t"
> @@ -11331,7 +11325,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu 
> *vcpu)
>               [r14]"i"(offsetof(struct vcpu_vmx, 
> vcpu.arch.regs[VCPU_REGS_R14])),
>               [r15]"i"(offsetof(struct vcpu_vmx, 
> vcpu.arch.regs[VCPU_REGS_R15])),
>  #endif
> -             [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
>               [wordsize]"i"(sizeof(ulong))
>             : "cc", "memory"
>  #ifdef CONFIG_X86_64
> @@ -11365,6 +11358,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu 
> *vcpu)
>       /* Eliminate branch target predictions from guest mode */
>       vmexit_fill_RSB();
>  
> +     vcpu->arch.cr2 = read_cr2();
> +
>       /* All fields are clean at this point */
>       if (static_branch_unlikely(&enable_evmcs))
>               current_evmcs->hv_clean_fields |=
> -- 
> 2.7.4
> 

Reply via email to