From 54dc26e44f1c0aa460bef409b799f36dae56a911 Mon Sep 17 00:00:00 2001
From: Sheng Yang <[EMAIL PROTECTED]>
Date: Wed, 18 Jun 2008 11:23:13 +0800
Subject: [PATCH] KVM: VMX: Add ept_sync_context in flush_tlb

Fix a potention issue caused by kvm_mmu_slot_remove_write_access(). 
The old behavior don't sync EPT TLB with modified EPT entry, which 
result in inconsistent content of EPT TLB and EPT table.

Signed-off-by: Sheng Yang <[EMAIL PROTECTED]>
---
 arch/x86/kvm/vmx.c |   18 ++++++++++++------
 1 files changed, 12 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6e4278d..5e2a800 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -83,6 +83,7 @@ struct vcpu_vmx {
                } irq;
        } rmode;
        int vpid;
+       u64 eptp;
 };

 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
@@ -364,24 +365,24 @@ static inline void ept_sync_global(void)
                __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
 }

-static inline void ept_sync_context(u64 eptp)
+static inline void ept_sync_context(struct vcpu_vmx *vmx)
 {
        if (vm_need_ept()) {
                if (cpu_has_vmx_invept_context())
-                       __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
+                       __invept(VMX_EPT_EXTENT_CONTEXT, vmx->eptp, 0);
                else
                        ept_sync_global();
        }
 }

-static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa)
+static inline void ept_sync_individual_addr(struct vcpu_vmx *vmx, 
gpa_t gpa)
 {
        if (vm_need_ept()) {
                if (cpu_has_vmx_invept_individual_addr())
                        __invept(VMX_EPT_EXTENT_INDIVIDUAL_ADDR,
-                                       eptp, gpa);
+                                       vmx->eptp, gpa);
                else
-                       ept_sync_context(eptp);
+                       ept_sync_context(vmx);
        }
 }

@@ -1407,6 +1408,8 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
 static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
 {
        vpid_sync_vcpu_all(to_vmx(vcpu));
+       if (vm_need_ept())
+               ept_sync_context(to_vmx(vcpu));
 }

 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
@@ -1517,12 +1520,15 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, 
unsigned long cr3)
 {
        unsigned long guest_cr3;
        u64 eptp;
+       struct vcpu_vmx *vmx;

+       vmx = to_vmx(vcpu);
        guest_cr3 = cr3;
        if (vm_need_ept()) {
                eptp = construct_eptp(cr3);
                vmcs_write64(EPT_POINTER, eptp);
-               ept_sync_context(eptp);
+               vmx->eptp = eptp;
+               ept_sync_context(vmx);
                ept_load_pdptrs(vcpu);
                guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 :
                        VMX_EPT_IDENTITY_PAGETABLE_ADDR;
--
1.5.5

From 54dc26e44f1c0aa460bef409b799f36dae56a911 Mon Sep 17 00:00:00 2001
From: Sheng Yang <[EMAIL PROTECTED]>
Date: Wed, 18 Jun 2008 11:23:13 +0800
Subject: [PATCH] KVM: VMX: Add ept_sync_context in flush_tlb

Fix a potention issue caused by kvm_mmu_slot_remove_write_access(). The
old behavior don't sync EPT TLB with modified EPT entry, which result
in inconsistent content of EPT TLB and EPT table.

Signed-off-by: Sheng Yang <[EMAIL PROTECTED]>
---
 arch/x86/kvm/vmx.c |   18 ++++++++++++------
 1 files changed, 12 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6e4278d..5e2a800 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -83,6 +83,7 @@ struct vcpu_vmx {
 		} irq;
 	} rmode;
 	int vpid;
+	u64 eptp;
 };
 
 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
@@ -364,24 +365,24 @@ static inline void ept_sync_global(void)
 		__invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
 }
 
-static inline void ept_sync_context(u64 eptp)
+static inline void ept_sync_context(struct vcpu_vmx *vmx)
 {
 	if (vm_need_ept()) {
 		if (cpu_has_vmx_invept_context())
-			__invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
+			__invept(VMX_EPT_EXTENT_CONTEXT, vmx->eptp, 0);
 		else
 			ept_sync_global();
 	}
 }
 
-static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa)
+static inline void ept_sync_individual_addr(struct vcpu_vmx *vmx, gpa_t gpa)
 {
 	if (vm_need_ept()) {
 		if (cpu_has_vmx_invept_individual_addr())
 			__invept(VMX_EPT_EXTENT_INDIVIDUAL_ADDR,
-					eptp, gpa);
+					vmx->eptp, gpa);
 		else
-			ept_sync_context(eptp);
+			ept_sync_context(vmx);
 	}
 }
 
@@ -1407,6 +1408,8 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
 static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
 {
 	vpid_sync_vcpu_all(to_vmx(vcpu));
+	if (vm_need_ept())
+		ept_sync_context(to_vmx(vcpu));
 }
 
 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
@@ -1517,12 +1520,15 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 {
 	unsigned long guest_cr3;
 	u64 eptp;
+	struct vcpu_vmx *vmx;
 
+	vmx = to_vmx(vcpu);
 	guest_cr3 = cr3;
 	if (vm_need_ept()) {
 		eptp = construct_eptp(cr3);
 		vmcs_write64(EPT_POINTER, eptp);
-		ept_sync_context(eptp);
+		vmx->eptp = eptp;
+		ept_sync_context(vmx);
 		ept_load_pdptrs(vcpu);
 		guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 :
 			VMX_EPT_IDENTITY_PAGETABLE_ADDR;
-- 
1.5.5

Reply via email to