Originally, flush tlb is done by slot_handle_level_range(). This patch
is to flush tlb directly in the kvm_zap_gfn_range() when range
flush is available.

Signed-off-by: Lan Tianyu <tianyu....@microsoft.com>
---
 arch/x86/kvm/mmu.c | 16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 877edae0401f..f24101ef763e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5578,6 +5578,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, 
gfn_t gfn_end)
 {
        struct kvm_memslots *slots;
        struct kvm_memory_slot *memslot;
+       bool flush = false;
        int i;
 
        spin_lock(&kvm->mmu_lock);
@@ -5585,18 +5586,27 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t 
gfn_start, gfn_t gfn_end)
                slots = __kvm_memslots(kvm, i);
                kvm_for_each_memslot(memslot, slots) {
                        gfn_t start, end;
+                       bool flush_tlb = true;
 
                        start = max(gfn_start, memslot->base_gfn);
                        end = min(gfn_end, memslot->base_gfn + memslot->npages);
                        if (start >= end)
                                continue;
 
-                       slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
-                                               PT_PAGE_TABLE_LEVEL, 
PT_MAX_HUGEPAGE_LEVEL,
-                                               start, end - 1, true);
+                       if (kvm_available_flush_tlb_with_range())
+                               flush_tlb = false;
+
+                       flush = slot_handle_level_range(kvm, memslot,
+                                       kvm_zap_rmapp, PT_PAGE_TABLE_LEVEL,
+                                       PT_MAX_HUGEPAGE_LEVEL, start,
+                                       end - 1, flush_tlb);
                }
        }
 
+       if (flush && kvm_available_flush_tlb_with_range())
+               kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
+                               gfn_end - gfn_start + 1);
+
        spin_unlock(&kvm->mmu_lock);
 }
 
-- 
2.14.4

Reply via email to