lihaiwei.ker...@gmail.com writes:

> From: Haiwei Li <lihai...@tencent.com>
>
> check the allocation of per-cpu __pv_cpu_mask.
>
> Suggested-by: Vitaly Kuznetsov <vkuzn...@redhat.com>
> Signed-off-by: Haiwei Li <lihai...@tencent.com>
> ---
> v1 -> v2:
>  * add CONFIG_SMP for kvm_send_ipi_mask_allbutself to prevent build error
> v2 -> v3:
>  * always check the allocation of __pv_cpu_mask in kvm_flush_tlb_others
>
>  arch/x86/kernel/kvm.c | 27 ++++++++++++++++++++++++---
>  1 file changed, 24 insertions(+), 3 deletions(-)
>
> diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
> index 9663ba31347c..1e5da6db519c 100644
> --- a/arch/x86/kernel/kvm.c
> +++ b/arch/x86/kernel/kvm.c
> @@ -553,7 +553,6 @@ static void kvm_send_ipi_mask_allbutself(const struct 
> cpumask *mask, int vector)
>  static void kvm_setup_pv_ipi(void)
>  {
>       apic->send_IPI_mask = kvm_send_ipi_mask;
> -     apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;

I see that kvm_send_ipi_mask_allbutself() uses per CPU __pv_cpu_mask and
kvm_send_ipi_mask doesn't but assigning send_IPI_mask here and
send_IPI_mask_allbutself in kvm_alloc_cpumask() looks weird. Personally,
I'd prefet to move apic->send_IPI_mask to kvm_alloc_cpumask() too
(probably call kvm_setup_pv_ipi() and get rid of kvm_apic_init()
completely).

Alternatively, we can save the original apic->send_IPI_mask_allbutself
value to a variable and call it from kvm_send_ipi_mask_allbutself() when
__pv_cpu_mask wasn't allocated.

>       pr_info("setup PV IPIs\n");
>  }
>  
> @@ -619,6 +618,11 @@ static void kvm_flush_tlb_others(const struct cpumask 
> *cpumask,
>       struct kvm_steal_time *src;
>       struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
>  
> +     if (unlikely(!flushmask)) {
> +             native_flush_tlb_others(cpumask, info);
> +             return;
> +     }
> +
>       cpumask_copy(flushmask, cpumask);
>       /*
>        * We have to call flush only on online vCPUs. And
> @@ -765,6 +769,14 @@ static __init int activate_jump_labels(void)
>  }
>  arch_initcall(activate_jump_labels);
>  
> +static void kvm_free_cpumask(void)
> +{
> +     unsigned int cpu;
> +
> +     for_each_possible_cpu(cpu)
> +             free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
> +}
> +
>  static __init int kvm_alloc_cpumask(void)
>  {
>       int cpu;
> @@ -783,11 +795,20 @@ static __init int kvm_alloc_cpumask(void)
>  
>       if (alloc)
>               for_each_possible_cpu(cpu) {
> -                     zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, 
> cpu),
> -                             GFP_KERNEL, cpu_to_node(cpu));
> +                     if (!zalloc_cpumask_var_node(
> +                             per_cpu_ptr(&__pv_cpu_mask, cpu),
> +                             GFP_KERNEL, cpu_to_node(cpu)))
> +                             goto zalloc_cpumask_fail;
>               }
>  
> +#if defined(CONFIG_SMP)
> +     apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
> +#endif
>       return 0;
> +
> +zalloc_cpumask_fail:
> +     kvm_free_cpumask();
> +     return -ENOMEM;
>  }
>  arch_initcall(kvm_alloc_cpumask);

-- 
Vitaly

Reply via email to