Currently, in kvm_mmu_change_mmu_pages(kvm, page), "used_pages--" is  performed 
after calling
kvm_mmu_zap_page() in spite of that whether "page" is actually reclaimed. 
Because root sp won't 
be reclaimed by kvm_mmu_zap_page(). So making kvm_mmu_zap_page() return total 
number of reclaimed 
sp makes more sense. A new flag is put into kvm_mmu_zap_page() to indicate 
whether the top page is
reclaimed.

Signed-off-by: Gui Jianfeng <guijianf...@cn.fujitsu.com>
---
 arch/x86/kvm/mmu.c |   53 +++++++++++++++++++++++++++++++++++----------------
 1 files changed, 36 insertions(+), 17 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7a17db1..d0960f1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1195,12 +1195,13 @@ static void kvm_unlink_unsync_page(struct kvm *kvm, 
struct kvm_mmu_page *sp)
        --kvm->stat.mmu_unsync;
 }
 
-static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
+static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+                           int *self_deleted);
 
 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
        if (sp->role.cr4_pae != !!is_pae(vcpu)) {
-               kvm_mmu_zap_page(vcpu->kvm, sp);
+               kvm_mmu_zap_page(vcpu->kvm, sp, NULL);
                return 1;
        }
 
@@ -1209,7 +1210,7 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct 
kvm_mmu_page *sp)
                kvm_flush_remote_tlbs(vcpu->kvm);
        kvm_unlink_unsync_page(vcpu->kvm, sp);
        if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
-               kvm_mmu_zap_page(vcpu->kvm, sp);
+               kvm_mmu_zap_page(vcpu->kvm, sp, NULL);
                return 1;
        }
 
@@ -1480,7 +1481,7 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
                struct kvm_mmu_page *sp;
 
                for_each_sp(pages, sp, parents, i) {
-                       kvm_mmu_zap_page(kvm, sp);
+                       kvm_mmu_zap_page(kvm, sp, NULL);
                        mmu_pages_clear_parents(&parents);
                        zapped++;
                }
@@ -1490,7 +1491,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
        return zapped;
 }
 
-static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+                           int *self_deleted)
 {
        int ret;
 
@@ -1507,11 +1509,16 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct 
kvm_mmu_page *sp)
        if (!sp->root_count) {
                hlist_del(&sp->hash_link);
                kvm_mmu_free_page(kvm, sp);
+               /* Count self */
+               ret++;
+               if (self_deleted)
+                       *self_deleted = 1;
        } else {
                sp->role.invalid = 1;
                list_move(&sp->link, &kvm->arch.active_mmu_pages);
                kvm_reload_remote_mmus(kvm);
        }
+
        kvm_mmu_reset_last_pte_updated(kvm);
        return ret;
 }
@@ -1540,8 +1547,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned 
int kvm_nr_mmu_pages)
 
                        page = container_of(kvm->arch.active_mmu_pages.prev,
                                            struct kvm_mmu_page, link);
-                       used_pages -= kvm_mmu_zap_page(kvm, page);
-                       used_pages--;
+                       used_pages -= kvm_mmu_zap_page(kvm, page, NULL);
                }
                kvm_nr_mmu_pages = used_pages;
                kvm->arch.n_free_mmu_pages = 0;
@@ -1560,6 +1566,8 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t 
gfn)
        struct kvm_mmu_page *sp;
        struct hlist_node *node, *n;
        int r;
+       int self_deleted = 0;
+       int ret;
 
        pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
        r = 0;
@@ -1571,7 +1579,8 @@ restart:
                        pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
                                 sp->role.word);
                        r = 1;
-                       if (kvm_mmu_zap_page(kvm, sp))
+                       ret = kvm_mmu_zap_page(kvm, sp, &self_deleted);
+                       if (ret > 1 || (ret == 1 && self_deleted == 0))
                                goto restart;
                }
        return r;
@@ -1583,6 +1592,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
        struct hlist_head *bucket;
        struct kvm_mmu_page *sp;
        struct hlist_node *node, *nn;
+       int ret;
+       int self_deleted = 0;
 
        index = kvm_page_table_hashfn(gfn);
        bucket = &kvm->arch.mmu_page_hash[index];
@@ -1592,7 +1603,8 @@ restart:
                    && !sp->role.invalid) {
                        pgprintk("%s: zap %lx %x\n",
                                 __func__, gfn, sp->role.word);
-                       if (kvm_mmu_zap_page(kvm, sp))
+                       ret = kvm_mmu_zap_page(kvm, sp, &self_deleted);
+                       if (ret > 1 || (ret == 1 && self_deleted == 0))
                                goto restart;
                }
        }
@@ -2028,7 +2040,7 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
                sp = page_header(root);
                --sp->root_count;
                if (!sp->root_count && sp->role.invalid)
-                       kvm_mmu_zap_page(vcpu->kvm, sp);
+                       kvm_mmu_zap_page(vcpu->kvm, sp, NULL);
                vcpu->arch.mmu.root_hpa = INVALID_PAGE;
                spin_unlock(&vcpu->kvm->mmu_lock);
                return;
@@ -2041,7 +2053,7 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
                        sp = page_header(root);
                        --sp->root_count;
                        if (!sp->root_count && sp->role.invalid)
-                               kvm_mmu_zap_page(vcpu->kvm, sp);
+                               kvm_mmu_zap_page(vcpu->kvm, sp, NULL);
                }
                vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
        }
@@ -2616,6 +2628,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        int npte;
        int r;
        int invlpg_counter;
+       int ret;
+       int self_deleted = 0;
 
        pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
 
@@ -2694,7 +2708,9 @@ restart:
                         */
                        pgprintk("misaligned: gpa %llx bytes %d role %x\n",
                                 gpa, bytes, sp->role.word);
-                       if (kvm_mmu_zap_page(vcpu->kvm, sp))
+
+                       ret = kvm_mmu_zap_page(vcpu->kvm, sp, &self_deleted);
+                       if (ret > 1 || (ret == 1 && self_deleted == 0))
                                goto restart;
                        ++vcpu->kvm->stat.mmu_flooded;
                        continue;
@@ -2762,7 +2778,7 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 
                sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
                                  struct kvm_mmu_page, link);
-               kvm_mmu_zap_page(vcpu->kvm, sp);
+               kvm_mmu_zap_page(vcpu->kvm, sp, NULL);
                ++vcpu->kvm->stat.mmu_recycled;
        }
 }
@@ -2902,13 +2918,16 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, 
int slot)
 void kvm_mmu_zap_all(struct kvm *kvm)
 {
        struct kvm_mmu_page *sp, *node;
+       int ret;
+       int self_deleted = 0;
 
        spin_lock(&kvm->mmu_lock);
 restart:
-       list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
-               if (kvm_mmu_zap_page(kvm, sp))
+       list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
+               ret = kvm_mmu_zap_page(kvm, sp, &self_deleted);
+               if (ret > 1 || (ret == 1 && self_deleted == 0))
                        goto restart;
-
+       }
        spin_unlock(&kvm->mmu_lock);
 
        kvm_flush_remote_tlbs(kvm);
@@ -2920,7 +2939,7 @@ static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm 
*kvm)
 
        page = container_of(kvm->arch.active_mmu_pages.prev,
                            struct kvm_mmu_page, link);
-       kvm_mmu_zap_page(kvm, page);
+       kvm_mmu_zap_page(kvm, page, NULL);
 }
 
 static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
-- 
1.6.5.2



--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to