On Tue, Nov 02, 2010 at 04:49:18PM +0100, Jan Kiszka wrote:
> This improves the IRQ forwarding for assigned devices: By using the
> kernel's threaded IRQ scheme, we can get rid of the latency-prone work
> queue and simplify the code in the same run.
Interesting. Do you see any latency improvements from this?
> Moreover, we no longer have to hold assigned_dev_lock while raising the
> guest IRQ, which can be a lenghty operation as we may have to iterate
> over all VCPUs. The lock is now only used for synchronizing masking vs.
> unmasking of INTx-type IRQs, thus is renames to intx_lock.
>
> Signed-off-by: Jan Kiszka
> ---
> include/linux/kvm_host.h | 12 +
> virt/kvm/assigned-dev.c | 106 ++---
> 2 files changed, 35 insertions(+), 83 deletions(-)
>
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 462b982..a786419 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -456,16 +456,8 @@ struct kvm_irq_ack_notifier {
> void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
> };
>
> -#define KVM_ASSIGNED_MSIX_PENDING0x1
> -struct kvm_guest_msix_entry {
> - u32 vector;
> - u16 entry;
> - u16 flags;
> -};
> -
> struct kvm_assigned_dev_kernel {
> struct kvm_irq_ack_notifier ack_notifier;
> - struct work_struct interrupt_work;
> struct list_head list;
> int assigned_dev_id;
> int host_segnr;
> @@ -476,13 +468,13 @@ struct kvm_assigned_dev_kernel {
> bool host_irq_disabled;
> struct msix_entry *host_msix_entries;
> int guest_irq;
> - struct kvm_guest_msix_entry *guest_msix_entries;
> + struct msix_entry *guest_msix_entries;
> unsigned long irq_requested_type;
> int irq_source_id;
> int flags;
> struct pci_dev *dev;
> struct kvm *kvm;
> - spinlock_t assigned_dev_lock;
> + spinlock_t intx_lock;
> };
>
> struct kvm_irq_mask_notifier {
> diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
> index ecc4419..d0b07ea 100644
> --- a/virt/kvm/assigned-dev.c
> +++ b/virt/kvm/assigned-dev.c
> @@ -55,58 +55,31 @@ static int find_index_from_host_irq(struct
> kvm_assigned_dev_kernel
> return index;
> }
>
> -static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
> +static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id)
> {
> - struct kvm_assigned_dev_kernel *assigned_dev;
> - int i;
> + struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
> + u32 vector;
> + int index;
>
> - assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
> - interrupt_work);
> + if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_INTX) {
> + spin_lock(&assigned_dev->intx_lock);
> + disable_irq_nosync(irq);
> + assigned_dev->host_irq_disabled = true;
> + spin_unlock(&assigned_dev->intx_lock);
> + }
>
> - spin_lock_irq(&assigned_dev->assigned_dev_lock);
> if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
> - struct kvm_guest_msix_entry *guest_entries =
> - assigned_dev->guest_msix_entries;
> - for (i = 0; i < assigned_dev->entries_nr; i++) {
> - if (!(guest_entries[i].flags &
> - KVM_ASSIGNED_MSIX_PENDING))
> - continue;
> - guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING;
> + index = find_index_from_host_irq(assigned_dev, irq);
> + if (index >= 0) {
> + vector = assigned_dev->
> + guest_msix_entries[index].vector;
> kvm_set_irq(assigned_dev->kvm,
> - assigned_dev->irq_source_id,
> - guest_entries[i].vector, 1);
> + assigned_dev->irq_source_id, vector, 1);
> }
> } else
> kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
> assigned_dev->guest_irq, 1);
>
> - spin_unlock_irq(&assigned_dev->assigned_dev_lock);
> -}
> -
> -static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
> -{
> - unsigned long flags;
> - struct kvm_assigned_dev_kernel *assigned_dev =
> - (struct kvm_assigned_dev_kernel *) dev_id;
> -
> - spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags);
> - if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
> - int index = find_index_from_host_irq(assigned_dev, irq);
> - if (index < 0)
> - goto out;
> - assigned_dev->guest_msix_entries[index].flags |=
> - KVM_ASSIGNED_MSIX_PENDING;
> - }
> -
> - schedule_work(&assigned_dev->interrupt_work);
> -
> - if (assigne