It helps us to cleanup release pfn in the later patches

Signed-off-by: Xiao Guangrong <xiaoguangr...@linux.vnet.ibm.com>
---
 arch/x86/kvm/mmu.c         |   29 ++++++++++++++---------------
 arch/x86/kvm/paging_tmpl.h |   18 +++++++++++-------
 2 files changed, 25 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f74c63a..29ce28b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2496,9 +2496,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 
*sptep,
                                rmap_recycle(vcpu, sptep, gfn);
                }
        }
-
-       if (!is_error_pfn(pfn))
-               kvm_release_pfn_clean(pfn);
 }

 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -2535,12 +2532,15 @@ static int direct_pte_prefetch_many(struct kvm_vcpu 
*vcpu,
        if (ret <= 0)
                return -1;

-       for (i = 0; i < ret; i++, gfn++, start++)
+       for (i = 0; i < ret; i++, gfn++, start++) {
                mmu_set_spte(vcpu, start, ACC_ALL,
                             access, 0, 0, NULL,
                             sp->role.level, gfn,
                             page_to_pfn(pages[i]), true, true);

+               kvm_release_page_clean(pages[i]);
+       }
+
        return 0;
 }

@@ -2863,23 +2863,22 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t 
v, u32 error_code,
                return r;

        spin_lock(&vcpu->kvm->mmu_lock);
-       if (mmu_notifier_retry(vcpu, mmu_seq))
+       if (mmu_notifier_retry(vcpu, mmu_seq)) {
+               r = 0;
                goto out_unlock;
+       }
+
        kvm_mmu_free_some_pages(vcpu);
        if (likely(!force_pt_level))
                transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
        r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn,
                         prefault);
-       spin_unlock(&vcpu->kvm->mmu_lock);
-
-
-       return r;

 out_unlock:
        spin_unlock(&vcpu->kvm->mmu_lock);
        if (!is_error_pfn(pfn))
                kvm_release_pfn_clean(pfn);
-       return 0;
+       return r;
 }


@@ -3333,22 +3332,22 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t 
gpa, u32 error_code,
                return r;

        spin_lock(&vcpu->kvm->mmu_lock);
-       if (mmu_notifier_retry(vcpu, mmu_seq))
+       if (mmu_notifier_retry(vcpu, mmu_seq)) {
+               r = 0;
                goto out_unlock;
+       }
+
        kvm_mmu_free_some_pages(vcpu);
        if (likely(!force_pt_level))
                transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
        r = __direct_map(vcpu, gpa, write, map_writable,
                         level, gfn, pfn, prefault);
-       spin_unlock(&vcpu->kvm->mmu_lock);
-
-       return r;

 out_unlock:
        spin_unlock(&vcpu->kvm->mmu_lock);
        if (!is_error_pfn(pfn))
                kvm_release_pfn_clean(pfn);
-       return 0;
+       return r;
 }

 static void nonpaging_free(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index c004ab6..92f466c 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -380,6 +380,9 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct 
kvm_mmu_page *sp,
        mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
                     NULL, PT_PAGE_TABLE_LEVEL,
                     gpte_to_gfn(gpte), pfn, true, true);
+
+       if (!is_error_pfn(pfn))
+               kvm_release_pfn_clean(pfn);
 }

 static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
@@ -452,6 +455,8 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, 
struct guest_walker *gw,
                mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
                             NULL, PT_PAGE_TABLE_LEVEL, gfn,
                             pfn, true, true);
+               if (!is_error_pfn(pfn))
+                       kvm_release_pfn_clean(pfn);
        }
 }

@@ -544,8 +549,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 out_gpte_changed:
        if (sp)
                kvm_mmu_put_page(sp, it.sptep);
-       if (!is_error_pfn(pfn))
-               kvm_release_pfn_clean(pfn);
+
        return NULL;
 }

@@ -625,8 +629,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t 
addr, u32 error_code,
                return r;

        spin_lock(&vcpu->kvm->mmu_lock);
-       if (mmu_notifier_retry(vcpu, mmu_seq))
+       if (mmu_notifier_retry(vcpu, mmu_seq)) {
+               r = 0;
                goto out_unlock;
+       }

        kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
        kvm_mmu_free_some_pages(vcpu);
@@ -640,15 +646,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t 
addr, u32 error_code,

        ++vcpu->stat.pf_fixed;
        kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
-       spin_unlock(&vcpu->kvm->mmu_lock);
-
-       return emulate;
+       r = emulate;

 out_unlock:
        spin_unlock(&vcpu->kvm->mmu_lock);
        if (!is_error_pfn(pfn))
                kvm_release_pfn_clean(pfn);
-       return 0;
+       return r;
 }

 static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
-- 
1.7.7.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to