We can use 'NULL' to represent 'all cpus' case in
kvm_make_vcpus_request_mask() and avoid building vCPU mask with
all vCPUs.

Suggested-by: Radim Krčmář <rkrc...@redhat.com>
Signed-off-by: Vitaly Kuznetsov <vkuzn...@redhat.com>
Reviewed-by: Roman Kagan <rka...@virtuozzo.com>
---
 arch/x86/kvm/hyperv.c | 42 +++++++++++++++++++++++-------------------
 virt/kvm/kvm_main.c   |  6 ++----
 2 files changed, 25 insertions(+), 23 deletions(-)

diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 0cd597b0f754..b45ce136be2f 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1325,35 +1325,39 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu 
*current_vcpu, u64 ingpa,
 
        cpumask_clear(&hv_current->tlb_lush);
 
+       if (all_cpus) {
+               kvm_make_vcpus_request_mask(kvm,
+                                   KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP,
+                                   NULL, &hv_current->tlb_lush);
+               goto ret_success;
+       }
+
        kvm_for_each_vcpu(i, vcpu, kvm) {
                struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
                int bank = hv->vp_index / 64, sbank = 0;
 
-               if (!all_cpus) {
-                       /* Banks >64 can't be represented */
-                       if (bank >= 64)
-                               continue;
-
-                       /* Non-ex hypercalls can only address first 64 vCPUs */
-                       if (!ex && bank)
-                               continue;
+               /* Banks >64 can't be represented */
+               if (bank >= 64)
+                       continue;
 
-                       if (ex) {
-                               /*
-                                * Check is the bank of this vCPU is in sparse
-                                * set and get the sparse bank number.
-                                */
-                               sbank = get_sparse_bank_no(valid_bank_mask,
-                                                          bank);
+               /* Non-ex hypercalls can only address first 64 vCPUs */
+               if (!ex && bank)
+                       continue;
 
-                               if (sbank < 0)
-                                       continue;
-                       }
+               if (ex) {
+                       /*
+                        * Check is the bank of this vCPU is in sparse
+                        * set and get the sparse bank number.
+                        */
+                       sbank = get_sparse_bank_no(valid_bank_mask, bank);
 
-                       if (!(sparse_banks[sbank] & BIT_ULL(hv->vp_index % 64)))
+                       if (sbank < 0)
                                continue;
                }
 
+               if (!(sparse_banks[sbank] & BIT_ULL(hv->vp_index % 64)))
+                       continue;
+
                /*
                 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we
                 * can't analyze it here, flush TLB regardless of the specified
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f986e31fa68c..587e1a0a8715 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -219,7 +219,7 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned 
int req,
        me = get_cpu();
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
-               if (!test_bit(i, vcpu_bitmap))
+               if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
                        continue;
 
                kvm_make_request(req, vcpu);
@@ -243,12 +243,10 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned 
int req)
 {
        cpumask_var_t cpus;
        bool called;
-       static unsigned long vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)]
-               = {[0 ... BITS_TO_LONGS(KVM_MAX_VCPUS)-1] = ULONG_MAX};
 
        zalloc_cpumask_var(&cpus, GFP_ATOMIC);
 
-       called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap, cpus);
+       called = kvm_make_vcpus_request_mask(kvm, req, NULL, cpus);
 
        free_cpumask_var(cpus);
        return called;
-- 
2.17.1

Reply via email to