From: Haiwei Li <lihai...@tencent.com>

check the allocation of per-cpu __pv_cpu_mask. Init
'send_IPI_mask_allbutself' only when successful and check the allocation
of __pv_cpu_mask in 'kvm_flush_tlb_others'.

Suggested-by: Vitaly Kuznetsov <vkuzn...@redhat.com>
Signed-off-by: Haiwei Li <lihai...@tencent.com>
---
v1 -> v2:
 * add CONFIG_SMP for kvm_send_ipi_mask_allbutself to prevent build error
v2 -> v3:
 * always check the allocation of __pv_cpu_mask in kvm_flush_tlb_others
v3 -> v4:
 * mov kvm_setup_pv_ipi to kvm_alloc_cpumask and get rid of kvm_apic_init

 arch/x86/kernel/kvm.c | 53 +++++++++++++++++++++++++++++--------------
 1 file changed, 36 insertions(+), 17 deletions(-)

diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 42c6e0deff9e..be28203cc098 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -547,16 +547,6 @@ static void kvm_send_ipi_mask_allbutself(const struct 
cpumask *mask, int vector)
        __send_ipi_mask(local_mask, vector);
 }
 
-/*
- * Set the IPI entry points
- */
-static void kvm_setup_pv_ipi(void)
-{
-       apic->send_IPI_mask = kvm_send_ipi_mask;
-       apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
-       pr_info("setup PV IPIs\n");
-}
-
 static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
 {
        int cpu;
@@ -619,6 +609,11 @@ static void kvm_flush_tlb_others(const struct cpumask 
*cpumask,
        struct kvm_steal_time *src;
        struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
 
+       if (unlikely(!flushmask)) {
+               native_flush_tlb_others(cpumask, info);
+               return;
+       }
+
        cpumask_copy(flushmask, cpumask);
        /*
         * We have to call flush only on online vCPUs. And
@@ -732,10 +727,6 @@ static uint32_t __init kvm_detect(void)
 
 static void __init kvm_apic_init(void)
 {
-#if defined(CONFIG_SMP)
-       if (pv_ipi_supported())
-               kvm_setup_pv_ipi();
-#endif
 }
 
 static void __init kvm_init_platform(void)
@@ -765,10 +756,18 @@ static __init int activate_jump_labels(void)
 }
 arch_initcall(activate_jump_labels);
 
+static void kvm_free_cpumask(void)
+{
+       unsigned int cpu;
+
+       for_each_possible_cpu(cpu)
+               free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
+}
+
 static __init int kvm_alloc_cpumask(void)
 {
        int cpu;
-       bool alloc = false;
+       bool alloc = false, alloced = true;
 
        if (!kvm_para_available() || nopv)
                return 0;
@@ -783,10 +782,30 @@ static __init int kvm_alloc_cpumask(void)
 
        if (alloc)
                for_each_possible_cpu(cpu) {
-                       zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, 
cpu),
-                               GFP_KERNEL, cpu_to_node(cpu));
+                       if (!zalloc_cpumask_var_node(
+                               per_cpu_ptr(&__pv_cpu_mask, cpu),
+                               GFP_KERNEL, cpu_to_node(cpu))) {
+                               alloced = false;
+                               break;
+                       }
                }
 
+#if defined(CONFIG_SMP)
+       /* Set the IPI entry points */
+       if (pv_ipi_supported()) {
+               apic->send_IPI_mask = kvm_send_ipi_mask;
+               if (alloced)
+                       apic->send_IPI_mask_allbutself =
+                               kvm_send_ipi_mask_allbutself;
+               pr_info("setup PV IPIs\n");
+       }
+#endif
+
+       if (!alloced) {
+               kvm_free_cpumask();
+               return -ENOMEM;
+       }
+
        return 0;
 }
 arch_initcall(kvm_alloc_cpumask);
-- 
2.18.4

Reply via email to