This patch is to use range list flush function in the
mmu_sync_children(), kvm_mmu_commit_zap_page() and
FNAME(sync_page)().

Signed-off-by: Lan Tianyu <tianyu....@microsoft.com>
---
 arch/x86/kvm/mmu.c         | 26 +++++++++++++++++++++++---
 arch/x86/kvm/paging_tmpl.h |  5 ++++-
 2 files changed, 27 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 73e19ce589e7..a071da797a15 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1092,6 +1092,13 @@ static void update_gfn_disallow_lpage_count(struct 
kvm_memory_slot *slot,
        }
 }
 
+static void kvm_mmu_queue_flush_request(struct kvm_mmu_page *sp,
+               struct list_head *flush_list)
+{
+       if (sp->sptep && is_last_spte(*sp->sptep, sp->role.level))
+               list_add(&sp->flush_link, flush_list);
+}
+
 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
 {
        update_gfn_disallow_lpage_count(slot, gfn, 1);
@@ -2373,12 +2380,16 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
 
        while (mmu_unsync_walk(parent, &pages)) {
                bool protected = false;
+               LIST_HEAD(flush_list);
 
-               for_each_sp(pages, sp, parents, i)
+               for_each_sp(pages, sp, parents, i) {
                        protected |= rmap_write_protect(vcpu, sp->gfn);
+                       kvm_mmu_queue_flush_request(sp, &flush_list);
+               }
 
                if (protected) {
-                       kvm_flush_remote_tlbs(vcpu->kvm);
+                       kvm_flush_remote_tlbs_with_list(vcpu->kvm,
+                                       &flush_list);
                        flush = false;
                }
 
@@ -2715,6 +2726,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
                                    struct list_head *invalid_list)
 {
        struct kvm_mmu_page *sp, *nsp;
+       LIST_HEAD(flush_list);
 
        if (list_empty(invalid_list))
                return;
@@ -2728,7 +2740,15 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
         * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
         * guest mode and/or lockless shadow page table walks.
         */
-       kvm_flush_remote_tlbs(kvm);
+       if (kvm_available_flush_tlb_with_range()) {
+               list_for_each_entry(sp, invalid_list, link)
+                       kvm_mmu_queue_flush_request(sp, &flush_list);
+
+               if (!list_empty(&flush_list))
+                       kvm_flush_remote_tlbs_with_list(kvm, &flush_list);
+       } else {
+               kvm_flush_remote_tlbs(kvm);
+       }
 
        list_for_each_entry_safe(sp, nsp, invalid_list, link) {
                WARN_ON(!sp->role.invalid || sp->root_count);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index bb8c2cdf70c3..aa450e0596a4 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -976,6 +976,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct 
kvm_mmu_page *sp)
        bool host_writable;
        gpa_t first_pte_gpa;
        int set_spte_ret = 0;
+       LIST_HEAD(flush_list);
 
        /* direct kvm_mmu_page can not be unsync. */
        BUG_ON(sp->role.direct);
@@ -1036,10 +1037,12 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, 
struct kvm_mmu_page *sp)
                                         pte_access, PT_PAGE_TABLE_LEVEL,
                                         gfn, spte_to_pfn(sp->spt[i]),
                                         true, false, host_writable);
+               if (set_spte_ret && kvm_available_flush_tlb_with_range())
+                       kvm_mmu_queue_flush_request(sp, &flush_list);
        }
 
        if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH)
-               kvm_flush_remote_tlbs(vcpu->kvm);
+               kvm_flush_remote_tlbs_with_list(vcpu->kvm, &flush_list);
 
        return nr_present;
 }
-- 
2.14.4
_______________________________________________
devel mailing list
de...@linuxdriverproject.org
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel

Reply via email to