Avi's purpose, to use single kvm_set_irq() to deal with all interrupt, including MSI. So here is it.
struct gsi_msg is a mapping from a special gsi(with KVM_GSI_MSG_MASK) to MSI/MSI-X message address/data. Now we support up to 256 gsi_msg mapping, and gsi_msg is allocated by kernel and provide two ioctls to userspace, which is more flexiable. Signed-off-by: Sheng Yang <[email protected]> --- include/linux/kvm.h | 12 ++++++++ include/linux/kvm_host.h | 16 ++++++++++ virt/kvm/irq_comm.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++ virt/kvm/kvm_main.c | 63 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 161 insertions(+), 0 deletions(-) diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 5b965f6..b091a86 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -394,6 +394,7 @@ struct kvm_trace_rec { #define KVM_CAP_USER_NMI 22 #endif #define KVM_CAP_SET_GUEST_DEBUG 23 +#define KVM_CAP_GSI_MSG 24 /* * ioctls for VM fds @@ -427,6 +428,8 @@ struct kvm_trace_rec { struct kvm_assigned_pci_dev) #define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \ struct kvm_assigned_irq) +#define KVM_REQUEST_GSI_MSG _IOWR(KVMIO, 0x71, struct kvm_assigned_gsi_msg) +#define KVM_FREE_GSI_MSG _IOR(KVMIO, 0x72, __u32) /* * ioctls for vcpu fds @@ -547,4 +550,13 @@ struct kvm_assigned_irq { #define KVM_DEV_IRQ_ASSIGN_MSI_ACTION (1 << 0) #define KVM_DEV_IRQ_ASSIGN_ENABLE_MSI (1 << 1) +struct kvm_assigned_gsi_msg { + __u32 gsi; + struct { + __u32 addr_lo; + __u32 addr_hi; + __u32 data; + } msg; +}; + #endif diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d63e9a4..0e5741a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -132,6 +132,10 @@ struct kvm { unsigned long mmu_notifier_seq; long mmu_notifier_count; #endif + struct hlist_head gsi_msg_list; + struct mutex gsi_msg_lock; +#define KVM_NR_GSI_MSG 256 + DECLARE_BITMAP(gsi_msg_bitmap, KVM_NR_GSI_MSG); }; /* The guest did something we don't support. */ @@ -319,6 +323,14 @@ struct kvm_assigned_dev_kernel { struct pci_dev *dev; struct kvm *kvm; }; + +#define KVM_GSI_MSG_MASK 0x1000000ull +struct kvm_gsi_msg { + u32 gsi; + struct msi_msg msg; + struct hlist_node link; +}; + void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level); void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi); void kvm_register_irq_ack_notifier(struct kvm *kvm, @@ -326,6 +338,10 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm, void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian); int kvm_request_irq_source_id(struct kvm *kvm); void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); +int kvm_update_gsi_msg(struct kvm *kvm, struct kvm_gsi_msg *gsi_msg); +struct kvm_gsi_msg *kvm_find_gsi_msg(struct kvm *kvm, u32 gsi); +void kvm_free_gsi_msg(struct kvm *kvm, struct kvm_gsi_msg *gsi_msg); +void kvm_free_gsi_msg_list(struct kvm *kvm); #ifdef CONFIG_DMAR int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index aa5d1e5..e95ec3f 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c @@ -99,3 +99,73 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) clear_bit(irq_source_id, &kvm->arch.irq_states[i]); clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); } + +int kvm_update_gsi_msg(struct kvm *kvm, struct kvm_gsi_msg *gsi_msg) +{ + struct kvm_gsi_msg *found_msg, *new_gsi_msg; + int r, gsi; + + mutex_lock(&kvm->gsi_msg_lock); + /* Find whether we need a update or a new entry */ + found_msg = kvm_find_gsi_msg(kvm, gsi_msg->gsi); + if (found_msg) + *found_msg = *gsi_msg; + else { + gsi = find_first_zero_bit(kvm->gsi_msg_bitmap, KVM_NR_GSI_MSG); + if (gsi >= KVM_NR_GSI_MSG) { + r = -EFAULT; + goto out; + } + __set_bit(gsi, kvm->gsi_msg_bitmap); + gsi_msg->gsi = gsi | KVM_GSI_MSG_MASK; + new_gsi_msg = kzalloc(sizeof(*new_gsi_msg), GFP_KERNEL); + if (!new_gsi_msg) { + r = -ENOMEM; + goto out; + } + *new_gsi_msg = *gsi_msg; + hlist_add_head(&new_gsi_msg->link, &kvm->gsi_msg_list); + } + r = 0; +out: + mutex_unlock(&kvm->gsi_msg_lock); + return r; +} + +/* Call with kvm->gsi_msg_lock hold */ +struct kvm_gsi_msg *kvm_find_gsi_msg(struct kvm *kvm, u32 gsi) +{ + struct kvm_gsi_msg *gsi_msg; + struct hlist_node *n; + + if (!(gsi & KVM_GSI_MSG_MASK)) + return NULL; + hlist_for_each_entry(gsi_msg, n, &kvm->gsi_msg_list, link) + if (gsi_msg->gsi == gsi) + goto out; + gsi_msg = NULL; +out: + return gsi_msg; +} + +/* Call with kvm->gsi_msg_lock hold */ +void kvm_free_gsi_msg(struct kvm *kvm, struct kvm_gsi_msg *gsi_msg) +{ + if (!gsi_msg) + return; + __clear_bit(gsi_msg->gsi & ~KVM_GSI_MSG_MASK, kvm->gsi_msg_bitmap); + hlist_del(&gsi_msg->link); + kfree(gsi_msg); +} + +void kvm_free_gsi_msg_list(struct kvm *kvm) +{ + struct kvm_gsi_msg *gsi_msg; + struct hlist_node *pos, *n; + + mutex_lock(&kvm->gsi_msg_lock); + hlist_for_each_entry_safe(gsi_msg, pos, n, &kvm->gsi_msg_list, link) + kvm_free_gsi_msg(kvm, gsi_msg); + mutex_unlock(&kvm->gsi_msg_lock); +} + diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 10cf7e1..db9de47 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -814,6 +814,8 @@ static struct kvm *kvm_create_vm(void) #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET kvm_coalesced_mmio_init(kvm); #endif + INIT_HLIST_HEAD(&kvm->gsi_msg_list); + mutex_init(&kvm->gsi_msg_lock); out: return kvm; } @@ -851,6 +853,7 @@ static void kvm_destroy_vm(struct kvm *kvm) { struct mm_struct *mm = kvm->mm; + kvm_free_gsi_msg_list(kvm); spin_lock(&kvm_lock); list_del(&kvm->vm_list); spin_unlock(&kvm_lock); @@ -1579,6 +1582,42 @@ static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) return 0; } +static int kvm_vm_ioctl_request_gsi_msg(struct kvm *kvm, + struct kvm_assigned_gsi_msg *agsi_msg) +{ + struct kvm_gsi_msg gsi_msg; + int r; + + gsi_msg.gsi = agsi_msg->gsi; + gsi_msg.msg.address_lo = agsi_msg->msg.addr_lo; + gsi_msg.msg.address_hi = agsi_msg->msg.addr_hi; + gsi_msg.msg.data = agsi_msg->msg.data; + + r = kvm_update_gsi_msg(kvm, &gsi_msg); + if (r == 0) + agsi_msg->gsi = gsi_msg.gsi; + return r; +} + +static int kvm_vm_ioctl_free_gsi_msg(struct kvm *kvm, u32 gsi) +{ + struct kvm_gsi_msg *gsi_msg; + int r; + + mutex_lock(&kvm->gsi_msg_lock); + gsi_msg = kvm_find_gsi_msg(kvm, gsi); + if (!gsi_msg) { + printk(KERN_WARNING "kvm: non-exist gsi->msi_msg mapping!"); + r = -EINVAL; + goto out; + } + kvm_free_gsi_msg(kvm, gsi_msg); + r = 0; +out: + mutex_unlock(&kvm->gsi_msg_lock); + return r; +} + static long kvm_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -1861,6 +1900,30 @@ static long kvm_vm_ioctl(struct file *filp, break; } #endif + case KVM_REQUEST_GSI_MSG: { + struct kvm_assigned_gsi_msg agsi_msg; + r = -EFAULT; + if (copy_from_user(&agsi_msg, argp, sizeof agsi_msg)) + goto out; + r = kvm_vm_ioctl_request_gsi_msg(kvm, &agsi_msg); + if (r) + goto out; + r = -EFAULT; + if (copy_to_user(argp, &agsi_msg, sizeof agsi_msg)) + goto out; + r = 0; + break; + } + case KVM_FREE_GSI_MSG: { + unsigned long guest_gsi; + r = -EFAULT; + if (copy_from_user(&guest_gsi, argp, sizeof guest_gsi)) + goto out; + r = kvm_vm_ioctl_free_gsi_msg(kvm, guest_gsi); + if (r) + goto out; + break; + } default: r = kvm_arch_vm_ioctl(filp, ioctl, arg); } -- 1.5.4.5 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to [email protected] More majordomo info at http://vger.kernel.org/majordomo-info.html
