Avi's purpose, to use single kvm_set_irq() to deal with all interrupt, including
MSI. So here is it.

struct gsi_route_entry is a mapping from a special gsi(with KVM_GSI_MSG_MASK) to
MSI/MSI-X message address/data. And the struct can also be extended for other
purpose.

Now we support up to 256 gsi_route_entry mapping, and gsi is allocated by 
kernel and
provide two ioctls to userspace, which is more flexiable.

Signed-off-by: Sheng Yang <sh...@linux.intel.com>
---
 include/linux/kvm.h      |   26 +++++++++++
 include/linux/kvm_host.h |   20 +++++++++
 virt/kvm/irq_comm.c      |   70 ++++++++++++++++++++++++++++++
 virt/kvm/kvm_main.c      |  105 ++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 221 insertions(+), 0 deletions(-)

diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 71c150f..bbefce6 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -399,6 +399,9 @@ struct kvm_trace_rec {
 #if defined(CONFIG_X86)
 #define KVM_CAP_REINJECT_CONTROL 24
 #endif
+#if defined(CONFIG_X86)
+#define KVM_CAP_GSI_ROUTE 25
+#endif
 
 /*
  * ioctls for VM fds
@@ -433,6 +436,8 @@ struct kvm_trace_rec {
 #define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \
                            struct kvm_assigned_irq)
 #define KVM_REINJECT_CONTROL      _IO(KVMIO, 0x71)
+#define KVM_REQUEST_GSI_ROUTE    _IOWR(KVMIO, 0x72, void *)
+#define KVM_FREE_GSI_ROUTE       _IOR(KVMIO, 0x73, void *)
 
 /*
  * ioctls for vcpu fds
@@ -553,4 +558,25 @@ struct kvm_assigned_irq {
 #define KVM_DEV_IRQ_ASSIGN_MSI_ACTION  KVM_DEV_IRQ_ASSIGN_ENABLE_MSI
 #define KVM_DEV_IRQ_ASSIGN_ENABLE_MSI  (1 << 0)
 
+struct kvm_gsi_route_guest {
+       __u32 entries_nr;
+       struct kvm_gsi_route_entry_guest *entries;
+};
+
+#define KVM_GSI_ROUTE_MSI      (1 << 0)
+struct kvm_gsi_route_entry_guest {
+       __u32 gsi;
+       __u32 type;
+       __u32 flags;
+       __u32 reserved;
+       union {
+               struct {
+                       __u32 addr_lo;
+                       __u32 addr_hi;
+                       __u32 data;
+               } msi;
+               __u32 padding[8];
+       };
+};
+
 #endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a8bcad0..6a00201 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -136,6 +136,9 @@ struct kvm {
        unsigned long mmu_notifier_seq;
        long mmu_notifier_count;
 #endif
+       struct hlist_head gsi_route_list;
+#define KVM_NR_GSI_ROUTE_ENTRIES    256
+       DECLARE_BITMAP(gsi_route_bitmap, KVM_NR_GSI_ROUTE_ENTRIES);
 };
 
 /* The guest did something we don't support. */
@@ -336,6 +339,19 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int 
irq,
                                      struct kvm_irq_mask_notifier *kimn);
 void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
 
+#define KVM_GSI_ROUTE_MASK    0x1000000ull
+struct kvm_gsi_route_entry {
+       u32 gsi;
+       u32 type;
+       u32 flags;
+       u32 reserved;
+       union {
+               struct msi_msg msi;
+               u32 reserved[8];
+       };
+       struct hlist_node link;
+};
+
 void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
 void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi);
 void kvm_register_irq_ack_notifier(struct kvm *kvm,
@@ -343,6 +359,10 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
 void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian);
 int kvm_request_irq_source_id(struct kvm *kvm);
 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
+int kvm_update_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry *entry);
+struct kvm_gsi_route_entry *kvm_find_gsi_route_entry(struct kvm *kvm, u32 gsi);
+void kvm_free_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry *entry);
+void kvm_free_gsi_route_list(struct kvm *kvm);
 
 #ifdef CONFIG_DMAR
 int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 5162a41..64ca4cf 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -123,3 +123,73 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, 
bool mask)
                        kimn->func(kimn, mask);
 }
 
+int kvm_update_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry *entry)
+{
+       struct kvm_gsi_route_entry *found_entry, *new_entry;
+       int r, gsi;
+
+       mutex_lock(&kvm->lock);
+       /* Find whether we need a update or a new entry */
+       found_entry = kvm_find_gsi_route_entry(kvm, entry->gsi);
+       if (found_entry)
+               *found_entry = *entry;
+       else {
+               gsi = find_first_zero_bit(kvm->gsi_route_bitmap,
+                                         KVM_NR_GSI_ROUTE_ENTRIES);
+               if (gsi >= KVM_NR_GSI_ROUTE_ENTRIES) {
+                       r = -ENOSPC;
+                       goto out;
+               }
+               new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
+               if (!new_entry) {
+                       r = -ENOMEM;
+                       goto out;
+               }
+               *new_entry = *entry;
+               entry->gsi = gsi | KVM_GSI_ROUTE_MASK;
+               __set_bit(gsi, kvm->gsi_route_bitmap);
+               hlist_add_head(&new_entry->link, &kvm->gsi_route_list);
+       }
+       r = 0;
+out:
+       mutex_unlock(&kvm->lock);
+       return r;
+}
+
+/* Call with kvm->lock hold */
+struct kvm_gsi_route_entry *kvm_find_gsi_route_entry(struct kvm *kvm, u32 gsi)
+{
+       struct kvm_gsi_route_entry *entry;
+       struct hlist_node *n;
+
+       if (!(gsi & KVM_GSI_ROUTE_MASK))
+               return NULL;
+       hlist_for_each_entry(entry, n, &kvm->gsi_route_list, link)
+               if (entry->gsi == gsi)
+                       goto out;
+       entry = NULL;
+out:
+       return entry;
+}
+
+/* Call with kvm->lock hold */
+void kvm_free_gsi_route(struct kvm *kvm, struct kvm_gsi_route_entry *entry)
+{
+       if (!entry)
+               return;
+       __clear_bit(entry->gsi & ~KVM_GSI_ROUTE_MASK, kvm->gsi_route_bitmap);
+       hlist_del(&entry->link);
+       kfree(entry);
+}
+
+void kvm_free_gsi_route_list(struct kvm *kvm)
+{
+       struct kvm_gsi_route_entry *entry;
+       struct hlist_node *pos, *n;
+
+       mutex_lock(&kvm->lock);
+       hlist_for_each_entry_safe(entry, pos, n, &kvm->gsi_route_list, link)
+               kvm_free_gsi_route(kvm, entry);
+       mutex_unlock(&kvm->lock);
+}
+
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 61688a6..8aa939c 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -839,6 +839,7 @@ static struct kvm *kvm_create_vm(void)
 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
        kvm_coalesced_mmio_init(kvm);
 #endif
+       INIT_HLIST_HEAD(&kvm->gsi_route_list);
 out:
        return kvm;
 }
@@ -877,6 +878,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
        struct mm_struct *mm = kvm->mm;
 
        kvm_arch_sync_events(kvm);
+       kvm_free_gsi_route_list(kvm);
        spin_lock(&kvm_lock);
        list_del(&kvm->vm_list);
        spin_unlock(&kvm_lock);
@@ -1605,6 +1607,45 @@ static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu 
*vcpu, sigset_t *sigset)
        return 0;
 }
 
+static int kvm_vm_ioctl_request_gsi_route(struct kvm *kvm,
+                       struct kvm_gsi_route_guest *gsi_route,
+                       struct kvm_gsi_route_entry_guest *guest_entries)
+{
+       struct kvm_gsi_route_entry entry;
+       int r, i;
+
+       for (i = 0; i < gsi_route->entries_nr; i++) {
+               memcpy(&entry, &guest_entries[i], sizeof(entry));
+               r = kvm_update_gsi_route(kvm, &entry);
+               if (r == 0)
+                       guest_entries[i].gsi = entry.gsi;
+               else
+                       break;
+       }
+       return r;
+}
+
+static int kvm_vm_ioctl_free_gsi_route(struct kvm *kvm,
+                       struct kvm_gsi_route_guest *gsi_route,
+                       struct kvm_gsi_route_entry_guest *guest_entries)
+{
+       struct kvm_gsi_route_entry *entry;
+       int r, i;
+
+       mutex_lock(&kvm->lock);
+       for (i = 0; i < gsi_route->entries_nr; i++) {
+               entry = kvm_find_gsi_route_entry(kvm, guest_entries[i].gsi);
+               if (!entry) {
+                       r = -EINVAL;
+                       goto out;
+               }
+               kvm_free_gsi_route(kvm, entry);
+       }
+out:
+       mutex_unlock(&kvm->lock);
+       return r;
+}
+
 static long kvm_vcpu_ioctl(struct file *filp,
                           unsigned int ioctl, unsigned long arg)
 {
@@ -1803,6 +1844,7 @@ static long kvm_vm_ioctl(struct file *filp,
 {
        struct kvm *kvm = filp->private_data;
        void __user *argp = (void __user *)arg;
+       struct kvm_gsi_route_entry_guest *gsi_entries = NULL;
        int r;
 
        if (kvm->mm != current->mm)
@@ -1887,10 +1929,73 @@ static long kvm_vm_ioctl(struct file *filp,
                break;
        }
 #endif
+       case KVM_REQUEST_GSI_ROUTE: {
+               struct kvm_gsi_route_guest gsi_route;
+               r = -EFAULT;
+               if (copy_from_user(&gsi_route, argp, sizeof gsi_route))
+                       goto out;
+               if (gsi_route.entries_nr == 0 ||
+                           gsi_route.entries_nr >= KVM_NR_GSI_ROUTE_ENTRIES) {
+                       r = -EFAULT;
+                       goto out;
+               }
+               gsi_entries = vmalloc(gsi_route.entries_nr *
+                                     sizeof(struct kvm_gsi_route_entry_guest));
+               if (!gsi_entries) {
+                       r = -ENOMEM;
+                       goto out;
+               }
+               r = -EFAULT;
+               if (copy_from_user(gsi_entries,
+                                  (void __user *)gsi_route.entries,
+                                  gsi_route.entries_nr *
+                                  sizeof(struct kvm_gsi_route_entry_guest)))
+                       goto out;
+               r = kvm_vm_ioctl_request_gsi_route(kvm, &gsi_route,
+                                                  gsi_entries);
+               if (r)
+                       goto out;
+               r = -EFAULT;
+               if (copy_to_user((void __user *)gsi_route.entries,
+                               gsi_entries,
+                               gsi_route.entries_nr *
+                               sizeof(struct kvm_gsi_route_entry_guest)))
+                       goto out;
+               r = 0;
+               break;
+       }
+       case KVM_FREE_GSI_ROUTE: {
+               struct kvm_gsi_route_guest gsi_route;
+               r = -EFAULT;
+               if (copy_from_user(&gsi_route, argp, sizeof gsi_route))
+                       goto out;
+               if (gsi_route.entries_nr == 0 ||
+                           gsi_route.entries_nr >= KVM_NR_GSI_ROUTE_ENTRIES) {
+                       r = -EFAULT;
+                       goto out;
+               }
+               gsi_entries = vmalloc(gsi_route.entries_nr *
+                                     sizeof(struct kvm_gsi_route_entry_guest));
+               if (!gsi_entries) {
+                       r = -ENOMEM;
+                       goto out;
+               }
+               r = -EFAULT;
+               if (copy_from_user(gsi_entries,
+                                  (void __user *)gsi_route.entries,
+                                  gsi_route.entries_nr *
+                                  sizeof(struct kvm_gsi_route_entry_guest)))
+                       goto out;
+               r = kvm_vm_ioctl_free_gsi_route(kvm, &gsi_route, gsi_entries);
+               if (r)
+                       goto out;
+               break;
+       }
        default:
                r = kvm_arch_vm_ioctl(filp, ioctl, arg);
        }
 out:
+       vfree(gsi_entries);
        return r;
 }
 
-- 
1.5.4.5

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to