There's no need to increase the largepage shadow count when syncing
since there's no count decrement on unsync, only on destruction.

Signed-off-by: Marcelo Tosatti <[EMAIL PROTECTED]>

Index: kvm/arch/x86/kvm/mmu.c
===================================================================
--- kvm.orig/arch/x86/kvm/mmu.c
+++ kvm/arch/x86/kvm/mmu.c
@@ -661,8 +661,6 @@ static void rmap_write_protect(struct kv
 
        if (write_protected)
                kvm_flush_remote_tlbs(kvm);
-
-       account_shadowed(kvm, gfn);
 }
 
 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
@@ -1130,8 +1128,10 @@ static struct kvm_mmu_page *kvm_mmu_get_
        sp->gfn = gfn;
        sp->role = role;
        hlist_add_head(&sp->hash_link, bucket);
-       if (!metaphysical)
+       if (!metaphysical) {
                rmap_write_protect(vcpu->kvm, gfn);
+               account_shadowed(vcpu->kvm, gfn);
+       }
        if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
                vcpu->arch.mmu.prefetch_page(vcpu, sp);
        else
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to