Wanpeng Li <[email protected]> writes:

> From: Wanpeng Li <[email protected]>
>
> Implement PV IPIs in guest kernel.
>
> Cc: Paolo Bonzini <[email protected]>
> Cc: Radim Krčmář <[email protected]>
> Cc: Vitaly Kuznetsov <[email protected]>
> Signed-off-by: Wanpeng Li <[email protected]>
> ---
>  arch/x86/kernel/kvm.c         | 99 
> +++++++++++++++++++++++++++++++++++++++++++
>  include/uapi/linux/kvm_para.h |  1 +
>  2 files changed, 100 insertions(+)
>
> diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
> index 5b2300b..7e3ee25 100644
> --- a/arch/x86/kernel/kvm.c
> +++ b/arch/x86/kernel/kvm.c
> @@ -47,6 +47,7 @@
>  #include <asm/hypervisor.h>
>  #include <asm/kvm_guest.h>
>
> +static struct apic orig_apic;
>  static int kvmapf = 1;
>
>  static int __init parse_no_kvmapf(char *arg)
> @@ -454,6 +455,89 @@ static void __init sev_map_percpu_data(void)
>  }
>
>  #ifdef CONFIG_SMP
> +
> +#ifdef CONFIG_X86_64
> +static bool __send_ipi_mask(const struct cpumask *mask, int vector)
> +{
> +     unsigned long flags, ipi_bitmap_low = 0, ipi_bitmap_high = 0, icr = 0;
> +     int cpu, apic_id, ret = 1;
> +
> +     if (cpumask_empty(mask))
> +             return true;
> +
> +     local_irq_save(flags);
> +
> +     for_each_cpu(cpu, mask) {
> +             apic_id = per_cpu(x86_cpu_to_apicid, cpu);
> +             if (apic_id < BITS_PER_LONG)
> +                     __set_bit(apic_id, &ipi_bitmap_low);
> +             else if (apic_id < 2 * BITS_PER_LONG)
> +                     __set_bit(apic_id - BITS_PER_LONG, &ipi_bitmap_high);
> +             else
> +                     goto ipi_mask_done;

Nit:

Both the fact that we don't set 'ret' here and the fact that the label
is named 'ipi_mask_done' -- which sounds like 'all OK' at least to me --
contribute to the feeling that we just skip sending IPIs in some cases.

I would prefer to see something like

else {
   ret = -EFAULT;
   goto irq_restore_exit;
}

> +     }
> +
> +     switch (vector) {
> +     default:
> +             icr = APIC_DM_FIXED | vector;
> +             break;
> +     case NMI_VECTOR:
> +             icr = APIC_DM_NMI;
> +             break;
> +     }
> +
> +     ret = kvm_hypercall3(KVM_HC_SEND_IPI, ipi_bitmap_low, ipi_bitmap_high, 
> icr);
> +
> +ipi_mask_done:
> +     local_irq_restore(flags);
> +     return ((ret == 0) ? true : false);

... and why in the first place do we need to make this function return
'bool' then? Let's just make it return 'int'.

> +}
> +
> +static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
> +{
> +     if (!__send_ipi_mask(mask, vector))
> +             orig_apic.send_IPI_mask(mask, vector);
> +}
> +
> +static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int 
> vector)
> +{
> +     unsigned int this_cpu = smp_processor_id();
> +     struct cpumask new_mask;
> +     const struct cpumask *local_mask;
> +
> +     cpumask_copy(&new_mask, mask);
> +     cpumask_clear_cpu(this_cpu, &new_mask);
> +     local_mask = &new_mask;
> +     if (!__send_ipi_mask(local_mask, vector))
> +             orig_apic.send_IPI_mask_allbutself(mask, vector);
> +}
> +
> +static void kvm_send_ipi_allbutself(int vector)
> +{
> +     kvm_send_ipi_mask_allbutself(cpu_online_mask, vector);
> +}
> +
> +static void kvm_send_ipi_all(int vector)
> +{
> +     if (!__send_ipi_mask(cpu_online_mask, vector))
> +             orig_apic.send_IPI_all(vector);
> +}
> +
> +/*
> + * Set the IPI entry points
> + */
> +static void kvm_setup_pv_ipi(void)
> +{
> +     orig_apic = *apic;
> +
> +     apic->send_IPI_mask = kvm_send_ipi_mask;
> +     apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
> +     apic->send_IPI_allbutself = kvm_send_ipi_allbutself;
> +     apic->send_IPI_all = kvm_send_ipi_all;
> +     pr_info("KVM setup pv IPIs\n");
> +}
> +#endif
> +
>  static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
>  {
>       native_smp_prepare_cpus(max_cpus);
> @@ -624,12 +708,27 @@ static uint32_t __init kvm_detect(void)
>       return kvm_cpuid_base();
>  }
>
> +static void __init kvm_apic_init(void)
> +{
> +#if defined(CONFIG_SMP) && defined(CONFIG_X86_64)
> +     if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI) &&
> +             num_possible_cpus() <= 2 * BITS_PER_LONG)
> +             kvm_setup_pv_ipi();
> +#endif
> +}
> +
> +static void __init kvm_init_platform(void)
> +{
> +     x86_platform.apic_post_init = kvm_apic_init;
> +}
> +
>  const __initconst struct hypervisor_x86 x86_hyper_kvm = {
>       .name                   = "KVM",
>       .detect                 = kvm_detect,
>       .type                   = X86_HYPER_KVM,
>       .init.guest_late_init   = kvm_guest_init,
>       .init.x2apic_available  = kvm_para_available,
> +     .init.init_platform     = kvm_init_platform,
>  };
>
>  static __init int activate_jump_labels(void)
> diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h
> index dcf629d..7395f38 100644
> --- a/include/uapi/linux/kvm_para.h
> +++ b/include/uapi/linux/kvm_para.h
> @@ -26,6 +26,7 @@
>  #define KVM_HC_MIPS_EXIT_VM          7
>  #define KVM_HC_MIPS_CONSOLE_OUTPUT   8
>  #define KVM_HC_CLOCK_PAIRING         9
> +#define KVM_HC_SEND_IPI                      10
>
>  /*
>   * hypercalls use architecture specific

-- 
  Vitaly

Reply via email to