VP_INDEX almost always matches VCPU id and get_vcpu_by_vpidx() is fast,
use it instead of traversing full vCPU list every time.

To support the change split off get_vcpu_idx_by_vpidx() from
get_vcpu_by_vpidx().

Signed-off-by: Vitaly Kuznetsov <vkuzn...@redhat.com>
---
 arch/x86/kvm/hyperv.c | 78 ++++++++++++++++++++-------------------------------
 1 file changed, 31 insertions(+), 47 deletions(-)

diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index c5b921c0a467..8db20c9d4529 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -127,20 +127,31 @@ static int synic_set_sint(struct kvm_vcpu_hv_synic 
*synic, int sint,
        return 0;
 }
 
-static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
+static u32 get_vcpu_idx_by_vpidx(struct kvm *kvm, u32 vpidx)
 {
        struct kvm_vcpu *vcpu = NULL;
        int i;
 
        if (vpidx >= KVM_MAX_VCPUS)
-               return NULL;
+               return U32_MAX;
 
        vcpu = kvm_get_vcpu(kvm, vpidx);
        if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
-               return vcpu;
+               return vpidx;
        kvm_for_each_vcpu(i, vcpu, kvm)
                if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
-                       return vcpu;
+                       return i;
+       return U32_MAX;
+}
+
+static __always_inline struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm,
+                                                         u32 vpidx)
+{
+       u32 vcpu_idx = get_vcpu_idx_by_vpidx(kvm, vpidx);
+
+       if (vcpu_idx < KVM_MAX_VCPUS)
+               return kvm_get_vcpu(kvm, vcpu_idx);
+
        return NULL;
 }
 
@@ -1244,20 +1255,6 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 
msr, u64 *pdata)
                return kvm_hv_get_msr(vcpu, msr, pdata);
 }
 
-static __always_inline int get_sparse_bank_no(u64 valid_bank_mask, int bank_no)
-{
-       int i = 0, j;
-
-       if (!(valid_bank_mask & BIT_ULL(bank_no)))
-               return -1;
-
-       for (j = 0; j < bank_no; j++)
-               if (valid_bank_mask & BIT_ULL(j))
-                       i++;
-
-       return i;
-}
-
 static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
                            u16 rep_cnt, bool ex)
 {
@@ -1265,11 +1262,10 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu 
*current_vcpu, u64 ingpa,
        struct kvm_vcpu_hv *hv_current = &current_vcpu->arch.hyperv;
        struct hv_tlb_flush_ex flush_ex;
        struct hv_tlb_flush flush;
-       struct kvm_vcpu *vcpu;
        unsigned long vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)] = {0};
-       unsigned long valid_bank_mask = 0;
+       unsigned long valid_bank_mask;
        u64 sparse_banks[64];
-       int sparse_banks_len, i;
+       int sparse_banks_len, bank, i;
        bool all_cpus;
 
        if (!ex) {
@@ -1279,6 +1275,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu 
*current_vcpu, u64 ingpa,
                trace_kvm_hv_flush_tlb(flush.processor_mask,
                                       flush.address_space, flush.flags);
 
+               valid_bank_mask = BIT_ULL(0);
                sparse_banks[0] = flush.processor_mask;
                all_cpus = flush.flags & HV_FLUSH_ALL_PROCESSORS;
        } else {
@@ -1319,38 +1316,25 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu 
*current_vcpu, u64 ingpa,
                goto ret_success;
        }
 
-       kvm_for_each_vcpu(i, vcpu, kvm) {
-               struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
-               int bank = hv->vp_index / 64, sbank = 0;
+       for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
+                        BITS_PER_LONG) {
 
-               /* Banks >64 can't be represented */
-               if (bank >= 64)
-                       continue;
+               for_each_set_bit(i, (unsigned long *)&sparse_banks[bank],
+                                BITS_PER_LONG) {
+                       u32 vp_index = bank * 64 + i;
+                       u32 vcpu_idx = get_vcpu_idx_by_vpidx(kvm, vp_index);
 
-               /* Non-ex hypercalls can only address first 64 vCPUs */
-               if (!ex && bank)
-                       continue;
+                       /* A non-existent vCPU was specified */
+                       if (vcpu_idx >= KVM_MAX_VCPUS)
+                               return HV_STATUS_INVALID_HYPERCALL_INPUT;
 
-               if (ex) {
                        /*
-                        * Check is the bank of this vCPU is in sparse
-                        * set and get the sparse bank number.
+                        * vcpu->arch.cr3 may not be up-to-date for running
+                        * vCPUs so we can't analyze it here, flush TLB
+                        * regardless of the specified address space.
                         */
-                       sbank = get_sparse_bank_no(valid_bank_mask, bank);
-
-                       if (sbank < 0)
-                               continue;
+                       __set_bit(vcpu_idx, vcpu_bitmap);
                }
-
-               if (!(sparse_banks[sbank] & BIT_ULL(hv->vp_index % 64)))
-                       continue;
-
-               /*
-                * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we
-                * can't analyze it here, flush TLB regardless of the specified
-                * address space.
-                */
-               __set_bit(i, vcpu_bitmap);
        }
 
        kvm_make_vcpus_request_mask(kvm,
-- 
2.14.4

Reply via email to