From: Ben Gardon <bgar...@google.com>

[ Upstream commit a9442f594147f95307f691cfba0c31e25dc79b9d ]

Move the work of adding and removing TDP MMU pages to/from  "secondary"
data structures to helper functions. These functions will be built on in
future commits to enable MMU operations to proceed (mostly) in parallel.

No functional change expected.

Signed-off-by: Ben Gardon <bgar...@google.com>
Message-Id: <20210202185734.1680553-20-bgar...@google.com>
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
Signed-off-by: Sasha Levin <sas...@kernel.org>
---
 arch/x86/kvm/mmu/tdp_mmu.c | 47 +++++++++++++++++++++++++++++++-------
 1 file changed, 39 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 136311be5890..14d69c01c710 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -262,6 +262,39 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, 
int as_id, gfn_t gfn,
        }
 }
 
+/**
+ * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU
+ *
+ * @kvm: kvm instance
+ * @sp: the new page
+ * @account_nx: This page replaces a NX large page and should be marked for
+ *             eventual reclaim.
+ */
+static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+                             bool account_nx)
+{
+       lockdep_assert_held_write(&kvm->mmu_lock);
+
+       list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
+       if (account_nx)
+               account_huge_nx_page(kvm, sp);
+}
+
+/**
+ * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU
+ *
+ * @kvm: kvm instance
+ * @sp: the page to be removed
+ */
+static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+       lockdep_assert_held_write(&kvm->mmu_lock);
+
+       list_del(&sp->link);
+       if (sp->lpage_disallowed)
+               unaccount_huge_nx_page(kvm, sp);
+}
+
 /**
  * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure
  *
@@ -281,10 +314,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, 
u64 *pt)
 
        trace_kvm_mmu_prepare_zap_page(sp);
 
-       list_del(&sp->link);
-
-       if (sp->lpage_disallowed)
-               unaccount_huge_nx_page(kvm, sp);
+       tdp_mmu_unlink_page(kvm, sp);
 
        for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
                old_child_spte = READ_ONCE(*(pt + i));
@@ -704,15 +734,16 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 
error_code,
 
                if (!is_shadow_present_pte(iter.old_spte)) {
                        sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
-                       list_add(&sp->link, &vcpu->kvm->arch.tdp_mmu_pages);
                        child_pt = sp->spt;
+
+                       tdp_mmu_link_page(vcpu->kvm, sp,
+                                         huge_page_disallowed &&
+                                         req_level >= iter.level);
+
                        new_spte = make_nonleaf_spte(child_pt,
                                                     !shadow_accessed_mask);
 
                        trace_kvm_mmu_get_page(sp, true);
-                       if (huge_page_disallowed && req_level >= iter.level)
-                               account_huge_nx_page(vcpu->kvm, sp);
-
                        tdp_mmu_set_spte(vcpu->kvm, &iter, new_spte);
                }
        }
-- 
2.30.1



Reply via email to