From: Lan Tianyu <tianyu....@microsoft.com>

Originally, flush tlb is done by slot_handle_level_range(). This patch
is to flush tlb directly in the kvm_zap_gfn_range() when range
flush is available.

Signed-off-by: Lan Tianyu <tianyu....@microsoft.com>
---
Change since v4:
        Move operation of setting flush_tlb out of for loop.
---
 arch/x86/kvm/mmu.c | 16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 64bd848f021e..94b0540b1471 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5624,8 +5624,13 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, 
gfn_t gfn_end)
 {
        struct kvm_memslots *slots;
        struct kvm_memory_slot *memslot;
+       bool flush_tlb = true;
+       bool flush = false;
        int i;
 
+       if (kvm_available_flush_tlb_with_range())
+               flush_tlb = false;
+
        spin_lock(&kvm->mmu_lock);
        for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
                slots = __kvm_memslots(kvm, i);
@@ -5637,12 +5642,17 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t 
gfn_start, gfn_t gfn_end)
                        if (start >= end)
                                continue;
 
-                       slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
-                                               PT_PAGE_TABLE_LEVEL, 
PT_MAX_HUGEPAGE_LEVEL,
-                                               start, end - 1, true);
+                       flush |= slot_handle_level_range(kvm, memslot,
+                                       kvm_zap_rmapp, PT_PAGE_TABLE_LEVEL,
+                                       PT_MAX_HUGEPAGE_LEVEL, start,
+                                       end - 1, flush_tlb);
                }
        }
 
+       if (flush)
+               kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
+                               gfn_end - gfn_start + 1);
+
        spin_unlock(&kvm->mmu_lock);
 }
 
-- 
2.14.4

Reply via email to