On 03/02/2016 17:23, Radim Krčmář wrote:
> Following patches would be even uglier if inject_lock didn't go away.
> 
> Patch changes the virtual wire comment to better describe our situation.
> 
> Signed-off-by: Radim Krčmář <[email protected]>
> ---
>  arch/x86/kvm/i8254.c | 67 
> ++++++++++++++++++++++------------------------------
>  arch/x86/kvm/i8254.h |  3 +--
>  2 files changed, 29 insertions(+), 41 deletions(-)
> 
> diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
> index de5f5018026f..a137eb381012 100644
> --- a/arch/x86/kvm/i8254.c
> +++ b/arch/x86/kvm/i8254.c
> @@ -236,16 +236,13 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier 
> *kian)
>  {
>       struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state,
>                                                irq_ack_notifier);
> -     int value;
>  
> -     spin_lock(&ps->inject_lock);
> +     atomic_set(&ps->irq_ack, 1);

smp_mb__before_atomic();

>       if (atomic_add_unless(&ps->pending, -1, 0))
>               /* in this case, we had multiple outstanding pit interrupts
>                * that we needed to inject.  Reinject
>                */
>               queue_kthread_work(&ps->pit->worker, &ps->pit->expired);
> -     ps->irq_ack = 1;
> -     spin_unlock(&ps->inject_lock);
>  }
>  
>  void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
> @@ -276,34 +273,25 @@ static void pit_do_work(struct kthread_work *work)
>       struct kvm_vcpu *vcpu;
>       int i;
>       struct kvm_kpit_state *ps = &pit->pit_state;
> -     int inject = 0;
>  
> -     /* Try to inject pending interrupts when
> -      * last one has been acked.
> +     if (!atomic_xchg(&ps->irq_ack, 0))
> +             return;
> +
> +     kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1, false);
> +     kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0, false);
> +
> +     /*
> +      * Provides NMI watchdog support via Virtual Wire mode.
> +      * The route is: PIT -> LVT0 in NMI mode.
> +      *
> +      * Note: Our Virtual Wire implementation does not follow
> +      * the MP specification.  We propagate a PIT interrupt to all
> +      * VCPUs and only when LVT0 is in NMI mode.  The interrupt can
> +      * also be simultaneously delivered through PIC and IOAPIC.
>        */
> -     spin_lock(&ps->inject_lock);
> -     if (ps->irq_ack) {
> -             ps->irq_ack = 0;
> -             inject = 1;
> -     }
> -     spin_unlock(&ps->inject_lock);
> -     if (inject) {
> -             kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1, false);
> -             kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0, false);
> -
> -             /*
> -              * Provides NMI watchdog support via Virtual Wire mode.
> -              * The route is: PIT -> PIC -> LVT0 in NMI mode.
> -              *
> -              * Note: Our Virtual Wire implementation is simplified, only
> -              * propagating PIT interrupts to all VCPUs when they have set
> -              * LVT0 to NMI delivery. Other PIC interrupts are just sent to
> -              * VCPU0, and only if its LVT0 is in EXTINT mode.
> -              */
> -             if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
> -                     kvm_for_each_vcpu(i, vcpu, kvm)
> -                             kvm_apic_nmi_wd_deliver(vcpu);
> -     }
> +     if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
> +             kvm_for_each_vcpu(i, vcpu, kvm)
> +                     kvm_apic_nmi_wd_deliver(vcpu);
>  }
>  
>  static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
> @@ -323,6 +311,12 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer 
> *data)
>               return HRTIMER_NORESTART;
>  }
>  
> +static void kvm_pit_reset_reinject(struct kvm_pit *pit)
> +{
> +     atomic_set(&pit->pit_state.pending, 0);

smp_wmb()?

Looks safe otherwise.  (Please also add a comment before the memory
barriers to show the pairing).

Paolo

Reply via email to