Hi Sean:
        Thank for your review.

On 7/3/2018 1:29 AM, Sean Christopherson wrote:
> On Mon, Jul 02, 2018 at 02:17:29PM +0000, Tianyu Lan wrote:
>> This patch is to check ept table pointer of each cpus when set ept
>> tables and store identical ept table pointer if all ept table pointers
>> of single VM are same. This is for support of para-virt ept flush
>> hypercall.
>>
>> Signed-off-by: Lan Tianyu <tianyu....@microsoft.com>
>> ---
>>   arch/x86/kvm/vmx.c | 31 +++++++++++++++++++++++++++++++
>>   1 file changed, 31 insertions(+)
>>
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index 1689f433f3a0..0b1e4e9fef2b 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -194,6 +194,9 @@ struct kvm_vmx {
>>      unsigned int tss_addr;
>>      bool ept_identity_pagetable_done;
>>      gpa_t ept_identity_map_addr;
>> +
>> +    u64 identical_ept_pointer;
>> +    spinlock_t ept_pointer_lock;
>>   };
>>   
>>   #define NR_AUTOLOAD_MSRS 8
>> @@ -853,6 +856,7 @@ struct vcpu_vmx {
>>       */
>>      u64 msr_ia32_feature_control;
>>      u64 msr_ia32_feature_control_valid_bits;
>> +    u64 ept_pointer;
>>   };
>>   
>>   enum segment_cache_field {
>> @@ -4958,6 +4962,29 @@ static u64 construct_eptp(struct kvm_vcpu *vcpu, 
>> unsigned long root_hpa)
>>      return eptp;
>>   }
>>   
>> +static void check_ept_pointer(struct kvm_vcpu *vcpu, u64 eptp)
>> +{
>> +    struct kvm *kvm = vcpu->kvm;
>> +    u64 tmp_eptp = INVALID_PAGE;
>> +    int i;
>> +
>> +    spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
>> +    to_vmx(vcpu)->ept_pointer = eptp;
>> +
>> +    kvm_for_each_vcpu(i, vcpu, kvm) {
>> +            if (!VALID_PAGE(tmp_eptp)) {
>> +                    tmp_eptp = to_vmx(vcpu)->ept_pointer;
>> +            } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
>> +                    to_kvm_vmx(kvm)->identical_ept_pointer = INVALID_PAGE;
>> +                    spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
>> +                    return;
>> +            }
>> +    }
>> +
>> +    to_kvm_vmx(kvm)->identical_ept_pointer = tmp_eptp;
>> +    spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
>> +}
>> +
>>   static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
>>   {
>>      unsigned long guest_cr3;
>> @@ -4967,6 +4994,8 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, 
>> unsigned long cr3)
>>      if (enable_ept) {
>>              eptp = construct_eptp(vcpu, cr3);
>>              vmcs_write64(EPT_POINTER, eptp);
>> +            check_ept_pointer(vcpu, eptp);
> 
> Shouldn't this call, or the function itself, be conditional on Hyper-V
> or remote flushing, e.g. by checking kvm_x86_ops->tlb_remote_flush?
> 

Yes, good suggestion and will update in the next version.

>> +
>>              if (enable_unrestricted_guest || is_paging(vcpu) ||
>>                  is_guest_mode(vcpu))
>>                      guest_cr3 = kvm_read_cr3(vcpu);
>> @@ -10383,6 +10412,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm 
>> *kvm, unsigned int id)
>>   
>>   static int vmx_vm_init(struct kvm *kvm)
>>   {
>> +    spin_lock_init(&to_kvm_vmx(kvm)->ept_pointer_lock);
>> +
>>      if (!ple_gap)
>>              kvm->arch.pause_in_guest = true;
>>      return 0;
>> -- 
>> 2.14.3

Reply via email to