This patch is to register tlb_remote_flush_with_range callback with
hv tlb range flush interface.

Signed-off-by: Lan Tianyu <tianyu....@microsoft.com>
---
Change since v1:
       Pass flush range with new hyper-v tlb flush struct rather
than KVM tlb flush struct.
---
 arch/x86/kvm/vmx.c | 58 +++++++++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 51 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2869c3e78168..70e1f916bfc9 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1555,7 +1555,43 @@ static void check_ept_pointer_match(struct kvm *kvm)
        to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
 }
 
-static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
+int kvm_parse_flush_list_func(union hv_gpa_page_range gpa_list[],
+               int offset, struct list_head *flush_list,
+               int (*fill_flush_list)(union hv_gpa_page_range gpa_list[],
+               int offset, u64 start_gfn, u64 end_gfn))
+{
+       struct kvm_mmu_page *sp;
+
+       list_for_each_entry(sp, flush_list,
+                       flush_link) {
+               offset = fill_flush_list(gpa_list, offset,
+                               sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
+       }
+
+       return offset;
+}
+
+static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm,
+               struct kvm_vcpu *vcpu, struct kvm_tlb_range *range)
+{
+       u64 ept_pointer = to_vmx(vcpu)->ept_pointer;
+       struct hyperv_tlb_range flush_range;
+
+       if (range) {
+               flush_range.start_gfn = range->start_gfn;
+               flush_range.pages = range->pages;
+               flush_range.flush_list = range->flush_list;
+               flush_range.parse_flush_list_func = kvm_parse_flush_list_func;
+
+               return hyperv_flush_guest_mapping_range(ept_pointer,
+                               &flush_range);
+       } else {
+               return hyperv_flush_guest_mapping(ept_pointer);
+       }
+}
+
+static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
+               struct kvm_tlb_range *range)
 {
        struct kvm_vcpu *vcpu;
        int ret = -ENOTSUPP, i;
@@ -1567,16 +1603,21 @@ static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
 
        if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
                kvm_for_each_vcpu(i, vcpu, kvm)
-                       ret |= hyperv_flush_guest_mapping(
-                               to_vmx(kvm_get_vcpu(kvm, i))->ept_pointer);
+                       ret |= __hv_remote_flush_tlb_with_range(
+                                       kvm, vcpu, range);
        } else {
-               ret = hyperv_flush_guest_mapping(
-                               to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer);
+               ret = __hv_remote_flush_tlb_with_range(kvm,
+                               kvm_get_vcpu(kvm, 0), range);
        }
 
        spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
        return ret;
 }
+
+static int hv_remote_flush_tlb(struct kvm *kvm)
+{
+       return hv_remote_flush_tlb_with_range(kvm, NULL);
+}
 #else /* !IS_ENABLED(CONFIG_HYPERV) */
 static inline void evmcs_write64(unsigned long field, u64 value) {}
 static inline void evmcs_write32(unsigned long field, u32 value) {}
@@ -7918,8 +7959,11 @@ static __init int hardware_setup(void)
 
 #if IS_ENABLED(CONFIG_HYPERV)
        if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
-           && enable_ept)
-               kvm_x86_ops->tlb_remote_flush = vmx_hv_remote_flush_tlb;
+           && enable_ept) {
+               kvm_x86_ops->tlb_remote_flush = hv_remote_flush_tlb;
+               kvm_x86_ops->tlb_remote_flush_with_range =
+                               hv_remote_flush_tlb_with_range;
+       }
 #endif
 
        if (!cpu_has_vmx_ple()) {
-- 
2.14.4

Reply via email to