Every time kvm_mmu_get_page() is called with a non-NULL parent_pte
argument, link_shadow_page() follows that to set the parent entry so
that the new mapping will point to the returned page table.

Moving parent_pte handling there allows to clean up the code because
parent_pte is passed to kvm_mmu_get_page() just for mark_unsync() and
mmu_page_add_parent_pte().

In addition, the patch avoids calling mark_unsync() for other parents in
the sp->parent_ptes chain than the newly added parent_pte, because they
have been there since before the current page fault handling started.

Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya...@lab.ntt.co.jp>
Cc: Xiao Guangrong <guangrong.x...@linux.intel.com>
---
 arch/x86/kvm/mmu.c         | 23 +++++++++--------------
 arch/x86/kvm/paging_tmpl.h |  6 ++----
 2 files changed, 11 insertions(+), 18 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7f46e3e..ec61b22 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2119,12 +2119,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct 
kvm_vcpu *vcpu,
                if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
                        break;
 
-               mmu_page_add_parent_pte(vcpu, sp, parent_pte);
-               if (sp->unsync_children) {
+               if (sp->unsync_children)
                        kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
-                       kvm_mmu_mark_parents_unsync(sp);
-               } else if (sp->unsync)
-                       kvm_mmu_mark_parents_unsync(sp);
 
                __clear_sp_write_flooding_count(sp);
                trace_kvm_mmu_get_page(sp, false);
@@ -2135,8 +2131,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct 
kvm_vcpu *vcpu,
 
        sp = kvm_mmu_alloc_page(vcpu, direct);
 
-       mmu_page_add_parent_pte(vcpu, sp, parent_pte);
-
        sp->gfn = gfn;
        sp->role = role;
        hlist_add_head(&sp->hash_link,
@@ -2204,7 +2198,8 @@ static void shadow_walk_next(struct 
kvm_shadow_walk_iterator *iterator)
        return __shadow_walk_next(iterator, *iterator->sptep);
 }
 
-static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
+static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
+                            struct kvm_mmu_page *sp)
 {
        u64 spte;
 
@@ -2215,6 +2210,11 @@ static void link_shadow_page(u64 *sptep, struct 
kvm_mmu_page *sp)
               shadow_user_mask | shadow_x_mask | shadow_accessed_mask;
 
        mmu_spte_set(sptep, spte);
+
+       mmu_page_add_parent_pte(vcpu, sp, sptep);
+
+       if (sp->unsync_children || sp->unsync)
+               mark_unsync(sptep);
 }
 
 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
@@ -2273,11 +2273,6 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
                mmu_page_zap_pte(kvm, sp, sp->spt + i);
 }
 
-static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
-{
-       mmu_page_remove_parent_pte(sp, parent_pte);
-}
-
 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        u64 *sptep;
@@ -2743,7 +2738,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, int write, 
int map_writable,
                                              iterator.level - 1,
                                              1, ACC_ALL, iterator.sptep);
 
-                       link_shadow_page(iterator.sptep, sp);
+                       link_shadow_page(vcpu, iterator.sptep, sp);
                }
        }
        return emulate;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 11650ea..0dcf9c8 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -598,7 +598,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                        goto out_gpte_changed;
 
                if (sp)
-                       link_shadow_page(it.sptep, sp);
+                       link_shadow_page(vcpu, it.sptep, sp);
        }
 
        for (;
@@ -618,7 +618,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 
                sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
                                      true, direct_access, it.sptep);
-               link_shadow_page(it.sptep, sp);
+               link_shadow_page(vcpu, it.sptep, sp);
        }
 
        clear_sp_write_flooding_count(it.sptep);
@@ -629,8 +629,6 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
        return emulate;
 
 out_gpte_changed:
-       if (sp)
-               kvm_mmu_put_page(sp, it.sptep);
        kvm_release_pfn_clean(pfn);
        return 0;
 }
-- 
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to