Tianyu Lan <tianyu....@microsoft.com> writes:

> This patch is to check ept table pointer of each cpus when set ept
> tables and store identical ept table pointer if all ept table pointers
> of single VM are same. This is for support of para-virt ept flush
> hypercall.
>
> Signed-off-by: Lan Tianyu <tianyu....@microsoft.com>
> ---
>  arch/x86/kvm/vmx.c | 31 +++++++++++++++++++++++++++++++
>  1 file changed, 31 insertions(+)
>
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 1689f433f3a0..0b1e4e9fef2b 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -194,6 +194,9 @@ struct kvm_vmx {
>       unsigned int tss_addr;
>       bool ept_identity_pagetable_done;
>       gpa_t ept_identity_map_addr;
> +
> +     u64 identical_ept_pointer;
> +     spinlock_t ept_pointer_lock;
>  };
>
>  #define NR_AUTOLOAD_MSRS 8
> @@ -853,6 +856,7 @@ struct vcpu_vmx {
>        */
>       u64 msr_ia32_feature_control;
>       u64 msr_ia32_feature_control_valid_bits;
> +     u64 ept_pointer;
>  };
>
>  enum segment_cache_field {
> @@ -4958,6 +4962,29 @@ static u64 construct_eptp(struct kvm_vcpu *vcpu, 
> unsigned long root_hpa)
>       return eptp;
>  }
>
> +static void check_ept_pointer(struct kvm_vcpu *vcpu, u64 eptp)
> +{
> +     struct kvm *kvm = vcpu->kvm;
> +     u64 tmp_eptp = INVALID_PAGE;
> +     int i;
> +
> +     spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
> +     to_vmx(vcpu)->ept_pointer = eptp;
> +
> +     kvm_for_each_vcpu(i, vcpu, kvm) {
> +             if (!VALID_PAGE(tmp_eptp)) {
> +                     tmp_eptp = to_vmx(vcpu)->ept_pointer;
> +             } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
> +                     to_kvm_vmx(kvm)->identical_ept_pointer = INVALID_PAGE;
> +                     spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
> +                     return;
> +             }
> +     }
> +
> +     to_kvm_vmx(kvm)->identical_ept_pointer = tmp_eptp;
> +     spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);

It seems we can get away with identical_ept_pointer being just 'bool':
go through the vCPU list and compare ept_pointer with ept_pointer for
the current vcpu. It would also make sense to rename it to something
like 'ept_pointers_match'.

I'm also not sure we need a dedicated ept_pointer_lock, can't we just
use the already existent mmu_lock from struct kvm?

> +}
> +
>  static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
>  {
>       unsigned long guest_cr3;
> @@ -4967,6 +4994,8 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned 
> long cr3)
>       if (enable_ept) {
>               eptp = construct_eptp(vcpu, cr3);
>               vmcs_write64(EPT_POINTER, eptp);
> +             check_ept_pointer(vcpu, eptp);

Do we always get here when we need? E.g, do we need to enforce
CPU_BASED_CR3_STORE_EXITING?

> +
>               if (enable_unrestricted_guest || is_paging(vcpu) ||
>                   is_guest_mode(vcpu))
>                       guest_cr3 = kvm_read_cr3(vcpu);
> @@ -10383,6 +10412,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm 
> *kvm, unsigned int id)
>
>  static int vmx_vm_init(struct kvm *kvm)
>  {
> +     spin_lock_init(&to_kvm_vmx(kvm)->ept_pointer_lock);
> +
>       if (!ple_gap)
>               kvm->arch.pause_in_guest = true;
>       return 0;

-- 
  Vitaly

Reply via email to