Enhance allocate/free_vid to handle shadow vpid.

Suggested-by: Wincy Van <fanwenyi0...@gmail.com>
Signed-off-by: Wanpeng Li <wanpeng...@hotmail.com>
---
 arch/x86/kvm/vmx.c | 33 +++++++++++++++++++++++++++------
 1 file changed, 27 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index da1590e..bd07d88 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -424,6 +424,8 @@ struct nested_vmx {
        /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
        u64 vmcs01_debugctl;
 
+       u16 vpid02;
+
        u32 nested_vmx_procbased_ctls_low;
        u32 nested_vmx_procbased_ctls_high;
        u32 nested_vmx_true_procbased_ctls_low;
@@ -4155,18 +4157,29 @@ static int alloc_identity_pagetable(struct kvm *kvm)
        return r;
 }
 
-static void allocate_vpid(struct vcpu_vmx *vmx)
+static void allocate_vpid(struct vcpu_vmx *vmx, bool nested)
 {
        int vpid;
 
-       vmx->vpid = 0;
        if (!enable_vpid)
                return;
+       if (!nested)
+               vmx->vpid = 0;
+       else
+               vmx->nested.vpid02 = 0;
        spin_lock(&vmx_vpid_lock);
-       vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
-       if (vpid < VMX_NR_VPIDS) {
+       if (!nested) {
+               vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
+               if (vpid < VMX_NR_VPIDS) {
                vmx->vpid = vpid;
                __set_bit(vpid, vmx_vpid_bitmap);
+               }
+       } else {
+               vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
+               if (vpid < VMX_NR_VPIDS) {
+                       vmx->nested.vpid02 = vpid;
+                       __set_bit(vpid, vmx_vpid_bitmap);
+               }
        }
        spin_unlock(&vmx_vpid_lock);
 }
@@ -4178,6 +4191,12 @@ static void free_vpid(struct vcpu_vmx *vmx)
        spin_lock(&vmx_vpid_lock);
        if (vmx->vpid != 0)
                __clear_bit(vmx->vpid, vmx_vpid_bitmap);
+       if (!nested) {
+               spin_unlock(&vmx_vpid_lock);
+               return;
+       }
+       if (vmx->nested.vpid02)
+               __clear_bit(vmx->nested.vpid02, vmx_vpid_bitmap);
        spin_unlock(&vmx_vpid_lock);
 }
 
@@ -8509,7 +8528,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, 
unsigned int id)
        if (!vmx)
                return ERR_PTR(-ENOMEM);
 
-       allocate_vpid(vmx);
+       allocate_vpid(vmx, false);
 
        err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
        if (err)
@@ -8557,8 +8576,10 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, 
unsigned int id)
                        goto free_vmcs;
        }
 
-       if (nested)
+       if (nested) {
                nested_vmx_setup_ctls_msrs(vmx);
+               allocate_vpid(vmx, true);
+       }
 
        vmx->nested.posted_intr_nv = -1;
        vmx->nested.current_vmptr = -1ull;
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to