Currently, kvm_mmu_zap_page() returning the number of freed children sp.
This might confuse the caller, because caller don't know the actual freed
number. Let's make kvm_mmu_zap_page() return the number of pages it actually
freed.

Signed-off-by: Gui Jianfeng <guijianf...@cn.fujitsu.com>
---
 arch/x86/kvm/mmu.c |    5 +++--
 1 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 51eb6d6..8ab6820 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1503,6 +1503,8 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct 
kvm_mmu_page *sp)
        if (sp->unsync)
                kvm_unlink_unsync_page(kvm, sp);
        if (!sp->root_count) {
+               /* Count self */
+               ret++;
                hlist_del(&sp->hash_link);
                kvm_mmu_free_page(kvm, sp);
        } else {
@@ -1539,7 +1541,6 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned 
int kvm_nr_mmu_pages)
                        page = container_of(kvm->arch.active_mmu_pages.prev,
                                            struct kvm_mmu_page, link);
                        used_pages -= kvm_mmu_zap_page(kvm, page);
-                       used_pages--;
                }
                kvm_nr_mmu_pages = used_pages;
                kvm->arch.n_free_mmu_pages = 0;
@@ -2908,7 +2909,7 @@ static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm 
*kvm)
 
        page = container_of(kvm->arch.active_mmu_pages.prev,
                            struct kvm_mmu_page, link);
-       return kvm_mmu_zap_page(kvm, page) + 1;
+       return kvm_mmu_zap_page(kvm, page);
 }
 
 static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
-- 
1.6.5.2


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to