On Fri, Aug 02, 2013 at 04:06:00PM +0800, Xiao Guangrong wrote:
> On 08/01/2013 10:08 PM, Gleb Natapov wrote:
> 
> > +/* Emulate the INVEPT instruction */
> > +static int handle_invept(struct kvm_vcpu *vcpu)
> > +{
> > +   u32 vmx_instruction_info;
> > +   bool ok;
> > +   unsigned long type;
> > +   gva_t gva;
> > +   struct x86_exception e;
> > +   struct {
> > +           u64 eptp, gpa;
> > +   } operand;
> > +
> > +   if (!(nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_EPT) ||
> > +       !(nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) {
> > +           kvm_queue_exception(vcpu, UD_VECTOR);
> > +           return 1;
> > +   }
> > +
> > +   if (!nested_vmx_check_permission(vcpu))
> > +           return 1;
> > +
> > +   if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) {
> > +           kvm_queue_exception(vcpu, UD_VECTOR);
> > +           return 1;
> > +   }
> > +
> > +   /* According to the Intel VMX instruction reference, the memory
> > +    * operand is read even if it isn't needed (e.g., for type==global)
> > +    */
> > +   vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
> > +   if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
> > +                   vmx_instruction_info, &gva))
> > +           return 1;
> > +   if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
> > +                           sizeof(operand), &e)) {
> > +           kvm_inject_page_fault(vcpu, &e);
> > +           return 1;
> > +   }
> > +
> > +   type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
> > +
> > +   switch (type) {
> > +   case VMX_EPT_EXTENT_GLOBAL:
> > +   case VMX_EPT_EXTENT_CONTEXT:
> > +           ok = !!(nested_vmx_ept_caps &
> > +                           (1UL << (type + VMX_EPT_EXTENT_SHIFT)));
> > +           break;
> > +   default:
> > +           ok = false;
> > +   }
> > +
> > +   if (ok) {
> > +           kvm_mmu_sync_roots(vcpu);
> > +           kvm_mmu_flush_tlb(vcpu);
> > +           nested_vmx_succeed(vcpu);
> > +   }
> > +   else
> > +           nested_vmx_failValid(vcpu,
> > +                           VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
> > +
> > +   skip_emulated_instruction(vcpu);
> > +   return 1;
> 
> Can this code be changed to:
> 
>       switch (type) {
>       case VMX_EPT_EXTENT_GLOBAL:
>       case VMX_EPT_EXTENT_CONTEXT:
>               if (nested_vmx_ept_caps &
>                               (1UL << (type + VMX_EPT_EXTENT_SHIFT) {
>                       kvm_mmu_sync_roots(vcpu);
>                       kvm_mmu_flush_tlb(vcpu);
>                       nested_vmx_succeed(vcpu);
>                       break;
>               }
>       default:
>               nested_vmx_failValid(vcpu,
>                               VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
>       }
> ?
> That's clearer i think.
> 
> Also, we can sync the shadow page table only if the current eptp is the 
> required
> eptp, that means:
> 
> if (type == GLOBAL || operand.eptp == nested_ept_get_cr3(vcpu)) {
>       kvm_mmu_sync_roots(vcpu);
>       ......
> }
Good point. Will do.

--
                        Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to