- calculate zapped page number properly in mmu_zap_unsync_children()
- calculate freeed page number properly kvm_mmu_change_mmu_pages()
- restart list walking if have children page zapped

Signed-off-by: Xiao Guangrong <xiaoguangr...@cn.fujitsu.com>
---
 arch/x86/kvm/mmu.c |    7 ++++---
 1 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a23ca75..8f4f781 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1483,8 +1483,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
                for_each_sp(pages, sp, parents, i) {
                        kvm_mmu_zap_page(kvm, sp);
                        mmu_pages_clear_parents(&parents);
+                       zapped++;
                }
-               zapped += pages.nr;
                kvm_mmu_pages_init(parent, &parents, &pages);
        }
 
@@ -1540,7 +1540,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned 
int kvm_nr_mmu_pages)
 
                        page = container_of(kvm->arch.active_mmu_pages.prev,
                                            struct kvm_mmu_page, link);
-                       kvm_mmu_zap_page(kvm, page);
+                       used_pages -= kvm_mmu_zap_page(kvm, page);
                        used_pages--;
                }
                kvm->arch.n_free_mmu_pages = 0;
@@ -1589,7 +1589,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
                    && !sp->role.invalid) {
                        pgprintk("%s: zap %lx %x\n",
                                 __func__, gfn, sp->role.word);
-                       kvm_mmu_zap_page(kvm, sp);
+                       if (kvm_mmu_zap_page(kvm, sp))
+                               nn = bucket->first;
                }
        }
 }
-- 
1.6.1.2


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to