From: Yulei Zhang <yuleixzh...@tencent.com>

Release the pre-pinned memory in direct build ept when guest VM
exit.

Signed-off-by: Yulei Zhang <yuleixzh...@tencent.com>
---
 arch/x86/kvm/mmu/mmu.c | 39 ++++++++++++++++++++++++++++++---------
 1 file changed, 30 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 539974183653..df703deac928 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4360,8 +4360,11 @@ static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, 
gpa_t new_cr3,
 
 void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush)
 {
-       __kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu),
-                         skip_tlb_flush);
+       if (!vcpu->arch.direct_build_tdp)
+               __kvm_mmu_new_cr3(vcpu, new_cr3, 
kvm_mmu_calc_root_page_role(vcpu),
+                                 skip_tlb_flush);
+       else
+               vcpu->arch.mmu->root_hpa = INVALID_PAGE;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_new_cr3);
 
@@ -5204,10 +5207,16 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load);
 
 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
 {
-       kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
-       WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
-       kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
-       WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
+
+       if (!vcpu->arch.direct_build_tdp) {
+               kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, 
KVM_MMU_ROOTS_ALL);
+               WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
+               kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, 
KVM_MMU_ROOTS_ALL);
+               WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
+       }
+
+       vcpu->arch.direct_build_tdp = false;
+       vcpu->arch.mmu->root_hpa = INVALID_PAGE;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_unload);
 
@@ -6510,6 +6519,14 @@ void kvm_direct_tdp_remove_page_table(struct kvm *kvm, 
struct kvm_memory_slot *s
        kvm_flush_remote_tlbs(kvm);
 }
 
+void kvm_direct_tdp_release_global_root(struct kvm *kvm)
+{
+       if (kvm->arch.global_root_hpa)
+               __kvm_walk_global_page(kvm, kvm->arch.global_root_hpa, 
kvm_x86_ops.get_tdp_level(NULL));
+
+       return;
+}
+
 /*
  * Calculate mmu pages needed for kvm.
  */
@@ -6536,9 +6553,13 @@ unsigned long kvm_mmu_calculate_default_mmu_pages(struct 
kvm *kvm)
 
 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
 {
-       kvm_mmu_unload(vcpu);
-       free_mmu_pages(&vcpu->arch.root_mmu);
-       free_mmu_pages(&vcpu->arch.guest_mmu);
+       if (vcpu->arch.direct_build_tdp) {
+               vcpu->arch.mmu->root_hpa = INVALID_PAGE;
+       } else {
+               kvm_mmu_unload(vcpu);
+               free_mmu_pages(&vcpu->arch.root_mmu);
+               free_mmu_pages(&vcpu->arch.guest_mmu);
+       }
        mmu_free_memory_caches(vcpu);
 }
 
-- 
2.17.1

Reply via email to