Re: [Qemu-devel] [PATCH v4 46/47] ivshmem: use kvm irqfd for msi notifications

2015-10-02 Thread Marc-André Lureau
On Wed, Sep 30, 2015 at 1:47 PM, Claudio Fontana
 wrote:
> Is an } else { here missing?

No, but I added a comment: "it will be delayed until msix is enabled,
in write_config "

>
> Should we add IVSHMEM_DPRINTFs to help the developer understand which of 
> these paths have been taken?

ok

-- 
Marc-André Lureau



Re: [Qemu-devel] [PATCH v4 46/47] ivshmem: use kvm irqfd for msi notifications

2015-10-01 Thread Claudio Fontana
On 24.09.2015 13:37, marcandre.lur...@redhat.com wrote:
> From: Marc-André Lureau 
> 
> Use irqfd for improving context switch when notifying the guest.
> If the host doesn't support kvm irqfd, regular msi notifications are
> still supported.
> 
> Note: the ivshmem implementation doesn't allow switching between MSI and
> IO interrupts, this patch doesn't either.
> 
> Signed-off-by: Marc-André Lureau 

Paolo could you also take a look at this one?
Seems to work, but I am not familiar with the kvm msi irqfd primitives.

One comment below.

> ---
>  hw/misc/ivshmem.c | 175 
> --
>  1 file changed, 169 insertions(+), 6 deletions(-)
> 
> diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c
> index 73644cc..39c0791 100644
> --- a/hw/misc/ivshmem.c
> +++ b/hw/misc/ivshmem.c
> @@ -19,6 +19,7 @@
>  #include "hw/hw.h"
>  #include "hw/i386/pc.h"
>  #include "hw/pci/pci.h"
> +#include "hw/pci/msi.h"
>  #include "hw/pci/msix.h"
>  #include "sysemu/kvm.h"
>  #include "migration/migration.h"
> @@ -68,6 +69,7 @@ typedef struct Peer {
>  
>  typedef struct MSIVector {
>  PCIDevice *pdev;
> +int virq;
>  } MSIVector;
>  
>  typedef struct IVShmemState {
> @@ -293,13 +295,73 @@ static void fake_irqfd(void *opaque, const uint8_t 
> *buf, int size) {
>  msix_notify(pdev, vector);
>  }
>  
> +static int ivshmem_vector_unmask(PCIDevice *dev, unsigned vector,
> + MSIMessage msg)
> +{
> +IVShmemState *s = IVSHMEM(dev);
> +EventNotifier *n = >peers[s->vm_id].eventfds[vector];
> +MSIVector *v = >msi_vectors[vector];
> +int ret;
> +
> +IVSHMEM_DPRINTF("vector unmask %p %d\n", dev, vector);
> +
> +ret = kvm_irqchip_update_msi_route(kvm_state, v->virq, msg);
> +if (ret < 0) {
> +return ret;
> +}
> +
> +return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, v->virq);
> +}
> +
> +static void ivshmem_vector_mask(PCIDevice *dev, unsigned vector)
> +{
> +IVShmemState *s = IVSHMEM(dev);
> +EventNotifier *n = >peers[s->vm_id].eventfds[vector];
> +int ret;
> +
> +IVSHMEM_DPRINTF("vector mask %p %d\n", dev, vector);
> +
> +ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n,
> +s->msi_vectors[vector].virq);
> +if (ret != 0) {
> +error_report("remove_irqfd_notifier_gsi failed");
> +}
> +}
> +
> +static void ivshmem_vector_poll(PCIDevice *dev,
> +unsigned int vector_start,
> +unsigned int vector_end)
> +{
> +IVShmemState *s = IVSHMEM(dev);
> +unsigned int vector;
> +
> +IVSHMEM_DPRINTF("vector poll %p %d-%d\n", dev, vector_start, vector_end);
> +
> +vector_end = MIN(vector_end, s->vectors);
> +
> +for (vector = vector_start; vector < vector_end; vector++) {
> +EventNotifier *notifier = >peers[s->vm_id].eventfds[vector];
> +
> +if (!msix_is_masked(dev, vector)) {
> +continue;
> +}
> +
> +if (event_notifier_test_and_clear(notifier)) {
> +msix_set_pending(dev, vector);
> +}
> +}
> +}
> +
>  static CharDriverState* create_eventfd_chr_device(void * opaque, 
> EventNotifier *n,
>int vector)
>  {
>  /* create a event character device based on the passed eventfd */
>  IVShmemState *s = opaque;
> -CharDriverState * chr;
> +PCIDevice *pdev = PCI_DEVICE(s);
>  int eventfd = event_notifier_get_fd(n);
> +CharDriverState *chr;
> +
> +s->msi_vectors[vector].pdev = pdev;
>  
>  chr = qemu_chr_open_eventfd(eventfd);
>  
> @@ -484,6 +546,53 @@ static bool fifo_update_and_get(IVShmemState *s, const 
> uint8_t *buf, int size,
>  return true;
>  }
>  
> +static int ivshmem_add_kvm_msi_virq(IVShmemState *s, int vector)
> +{
> +PCIDevice *pdev = PCI_DEVICE(s);
> +MSIMessage msg = msix_get_message(pdev, vector);
> +int ret;
> +
> +IVSHMEM_DPRINTF("ivshmem_add_kvm_msi_virq vector:%d\n", vector);
> +
> +if (s->msi_vectors[vector].pdev != NULL) {
> +return 0;
> +}
> +
> +ret = kvm_irqchip_add_msi_route(kvm_state, msg); /*  */
> +if (ret < 0) {
> +error_report("ivshmem: kvm_irqchip_add_msi_route failed");
> +return -1;
> +}
> +
> +s->msi_vectors[vector].virq = ret;
> +s->msi_vectors[vector].pdev = pdev;
> +
> +return 0;
> +}
> +
> +static void setup_interrupt(IVShmemState *s, int vector)
> +{
> +EventNotifier *n = >peers[s->vm_id].eventfds[vector];
> +bool with_irqfd = kvm_msi_via_irqfd_enabled() &&
> +ivshmem_has_feature(s, IVSHMEM_MSI);
> +PCIDevice *pdev = PCI_DEVICE(s);
> +
> +IVSHMEM_DPRINTF("setting up interrupt for vector: %d\n", vector);
> +
> +if (!with_irqfd) {
> +s->eventfd_chr[vector] = create_eventfd_chr_device(s, n, vector);
> +

[Qemu-devel] [PATCH v4 46/47] ivshmem: use kvm irqfd for msi notifications

2015-09-24 Thread marcandre . lureau
From: Marc-André Lureau 

Use irqfd for improving context switch when notifying the guest.
If the host doesn't support kvm irqfd, regular msi notifications are
still supported.

Note: the ivshmem implementation doesn't allow switching between MSI and
IO interrupts, this patch doesn't either.

Signed-off-by: Marc-André Lureau 
---
 hw/misc/ivshmem.c | 175 --
 1 file changed, 169 insertions(+), 6 deletions(-)

diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c
index 73644cc..39c0791 100644
--- a/hw/misc/ivshmem.c
+++ b/hw/misc/ivshmem.c
@@ -19,6 +19,7 @@
 #include "hw/hw.h"
 #include "hw/i386/pc.h"
 #include "hw/pci/pci.h"
+#include "hw/pci/msi.h"
 #include "hw/pci/msix.h"
 #include "sysemu/kvm.h"
 #include "migration/migration.h"
@@ -68,6 +69,7 @@ typedef struct Peer {
 
 typedef struct MSIVector {
 PCIDevice *pdev;
+int virq;
 } MSIVector;
 
 typedef struct IVShmemState {
@@ -293,13 +295,73 @@ static void fake_irqfd(void *opaque, const uint8_t *buf, 
int size) {
 msix_notify(pdev, vector);
 }
 
+static int ivshmem_vector_unmask(PCIDevice *dev, unsigned vector,
+ MSIMessage msg)
+{
+IVShmemState *s = IVSHMEM(dev);
+EventNotifier *n = >peers[s->vm_id].eventfds[vector];
+MSIVector *v = >msi_vectors[vector];
+int ret;
+
+IVSHMEM_DPRINTF("vector unmask %p %d\n", dev, vector);
+
+ret = kvm_irqchip_update_msi_route(kvm_state, v->virq, msg);
+if (ret < 0) {
+return ret;
+}
+
+return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, v->virq);
+}
+
+static void ivshmem_vector_mask(PCIDevice *dev, unsigned vector)
+{
+IVShmemState *s = IVSHMEM(dev);
+EventNotifier *n = >peers[s->vm_id].eventfds[vector];
+int ret;
+
+IVSHMEM_DPRINTF("vector mask %p %d\n", dev, vector);
+
+ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n,
+s->msi_vectors[vector].virq);
+if (ret != 0) {
+error_report("remove_irqfd_notifier_gsi failed");
+}
+}
+
+static void ivshmem_vector_poll(PCIDevice *dev,
+unsigned int vector_start,
+unsigned int vector_end)
+{
+IVShmemState *s = IVSHMEM(dev);
+unsigned int vector;
+
+IVSHMEM_DPRINTF("vector poll %p %d-%d\n", dev, vector_start, vector_end);
+
+vector_end = MIN(vector_end, s->vectors);
+
+for (vector = vector_start; vector < vector_end; vector++) {
+EventNotifier *notifier = >peers[s->vm_id].eventfds[vector];
+
+if (!msix_is_masked(dev, vector)) {
+continue;
+}
+
+if (event_notifier_test_and_clear(notifier)) {
+msix_set_pending(dev, vector);
+}
+}
+}
+
 static CharDriverState* create_eventfd_chr_device(void * opaque, EventNotifier 
*n,
   int vector)
 {
 /* create a event character device based on the passed eventfd */
 IVShmemState *s = opaque;
-CharDriverState * chr;
+PCIDevice *pdev = PCI_DEVICE(s);
 int eventfd = event_notifier_get_fd(n);
+CharDriverState *chr;
+
+s->msi_vectors[vector].pdev = pdev;
 
 chr = qemu_chr_open_eventfd(eventfd);
 
@@ -484,6 +546,53 @@ static bool fifo_update_and_get(IVShmemState *s, const 
uint8_t *buf, int size,
 return true;
 }
 
+static int ivshmem_add_kvm_msi_virq(IVShmemState *s, int vector)
+{
+PCIDevice *pdev = PCI_DEVICE(s);
+MSIMessage msg = msix_get_message(pdev, vector);
+int ret;
+
+IVSHMEM_DPRINTF("ivshmem_add_kvm_msi_virq vector:%d\n", vector);
+
+if (s->msi_vectors[vector].pdev != NULL) {
+return 0;
+}
+
+ret = kvm_irqchip_add_msi_route(kvm_state, msg); /*  */
+if (ret < 0) {
+error_report("ivshmem: kvm_irqchip_add_msi_route failed");
+return -1;
+}
+
+s->msi_vectors[vector].virq = ret;
+s->msi_vectors[vector].pdev = pdev;
+
+return 0;
+}
+
+static void setup_interrupt(IVShmemState *s, int vector)
+{
+EventNotifier *n = >peers[s->vm_id].eventfds[vector];
+bool with_irqfd = kvm_msi_via_irqfd_enabled() &&
+ivshmem_has_feature(s, IVSHMEM_MSI);
+PCIDevice *pdev = PCI_DEVICE(s);
+
+IVSHMEM_DPRINTF("setting up interrupt for vector: %d\n", vector);
+
+if (!with_irqfd) {
+s->eventfd_chr[vector] = create_eventfd_chr_device(s, n, vector);
+} else if (msix_enabled(pdev)) {
+if (ivshmem_add_kvm_msi_virq(s, vector) < 0) {
+return;
+}
+
+if (!msix_is_masked(pdev, vector)) {
+kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL,
+   s->msi_vectors[vector].virq);
+}
+}
+}
+
 static void ivshmem_read(void *opaque, const uint8_t *buf, int size)
 {
 IVShmemState *s = opaque;
@@ -587,11 +696,10 @@ static void ivshmem_read(void *opaque,