From: David Matlack <dmatl...@google.com>

Update the page stats in __rmap_add() rather than at the call site. This
will avoid having to manually update page stats when splitting huge
pages in a subsequent commit.

No functional change intended.

Reviewed-by: Ben Gardon <bgar...@google.com>
Reviewed-by: Peter Xu <pet...@redhat.com>
Signed-off-by: David Matlack <dmatl...@google.com>
Message-Id: <20220516232138.1783324-17-dmatl...@google.com>
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
---
 arch/x86/kvm/mmu/mmu.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index a8cdbe2958d9..7cca28d89a85 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1562,6 +1562,8 @@ static void __rmap_add(struct kvm *kvm,
 
        sp = sptep_to_sp(spte);
        kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
+       kvm_update_page_stats(kvm, sp->role.level, 1);
+
        rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
        rmap_count = pte_list_add(cache, spte, rmap_head);
 
@@ -2783,7 +2785,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct 
kvm_memory_slot *slot,
 
        if (!was_rmapped) {
                WARN_ON_ONCE(ret == RET_PF_SPURIOUS);
-               kvm_update_page_stats(vcpu->kvm, level, 1);
                rmap_add(vcpu, slot, sptep, gfn);
        }
 
-- 
2.31.1


_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to