From: Lan Tianyu <tianyu....@microsoft.com> This patch is to introduce tlb flush with range list interface and use struct kvm_mmu_page as list entry. Use flush list function in the kvm_mmu_commit_zap_page().
Signed-off-by: Lan Tianyu <tianyu....@microsoft.com> --- arch/x86/kvm/mmu.c | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 8d43b7c0f56f..7a862c56b954 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -291,6 +291,20 @@ static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, range.start_gfn = start_gfn; range.pages = pages; + range.flush_list = NULL; + + kvm_flush_remote_tlbs_with_range(kvm, &range); +} + +static void kvm_flush_remote_tlbs_with_list(struct kvm *kvm, + struct hlist_head *flush_list) +{ + struct kvm_tlb_range range; + + if (hlist_empty(flush_list)) + return; + + range.flush_list = flush_list; kvm_flush_remote_tlbs_with_range(kvm, &range); } @@ -2719,6 +2733,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, struct list_head *invalid_list) { struct kvm_mmu_page *sp, *nsp; + HLIST_HEAD(flush_list); if (list_empty(invalid_list)) return; @@ -2732,7 +2747,14 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit * guest mode and/or lockless shadow page table walks. */ - kvm_flush_remote_tlbs(kvm); + if (kvm_available_flush_tlb_with_range()) { + list_for_each_entry(sp, invalid_list, link) + hlist_add_head(&sp->flush_link, &flush_list); + + kvm_flush_remote_tlbs_with_list(kvm, &flush_list); + } else { + kvm_flush_remote_tlbs(kvm); + } list_for_each_entry_safe(sp, nsp, invalid_list, link) { WARN_ON(!sp->role.invalid || sp->root_count); -- 2.14.4 _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm