From: David Matlack <dmatl...@google.com>

Consolidate kvm_mmu_alloc_page() and kvm_mmu_alloc_shadow_page() under
the latter so that all shadow page allocation and initialization happens
in one place.

No functional change intended.

Signed-off-by: David Matlack <dmatl...@google.com>
Message-Id: <20220516232138.1783324-8-dmatl...@google.com>
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
---
 arch/x86/kvm/mmu/mmu.c | 39 +++++++++++++++++----------------------
 1 file changed, 17 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index a59fe860da29..8b84cdd8c6cd 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1664,27 +1664,6 @@ static void drop_parent_pte(struct kvm_mmu_page *sp,
        mmu_spte_clear_no_track(parent_pte);
 }
 
-static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, bool 
direct)
-{
-       struct kvm_mmu_page *sp;
-
-       sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
-       sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
-       if (!direct)
-               sp->gfns = 
kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
-       set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
-
-       /*
-        * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
-        * depends on valid pages being added to the head of the list.  See
-        * comments in kvm_zap_obsolete_pages().
-        */
-       sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
-       list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
-       kvm_mod_used_mmu_pages(vcpu->kvm, +1);
-       return sp;
-}
-
 static void mark_unsync(u64 *spte);
 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
 {
@@ -2072,7 +2051,23 @@ static struct kvm_mmu_page 
*kvm_mmu_alloc_shadow_page(struct kvm_vcpu *vcpu,
                                                      struct hlist_head 
*sp_list,
                                                      union kvm_mmu_page_role 
role)
 {
-       struct kvm_mmu_page *sp = kvm_mmu_alloc_page(vcpu, role.direct);
+       struct kvm_mmu_page *sp;
+
+       sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
+       sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
+       if (!role.direct)
+               sp->gfns = 
kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
+
+       set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
+
+       /*
+        * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
+        * depends on valid pages being added to the head of the list.  See
+        * comments in kvm_zap_obsolete_pages().
+        */
+       sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
+       list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
+       kvm_mod_used_mmu_pages(vcpu->kvm, +1);
 
        sp->gfn = gfn;
        sp->role = role;
-- 
2.31.1


_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to