Re: [PATCH v5 15/15] block/nvme: Use an array of EventNotifier

2020-08-21 Thread Stefano Garzarella
On Fri, Aug 21, 2020 at 03:09:13PM +0200, Philippe Mathieu-Daudé wrote:
> On 8/21/20 12:29 PM, Stefano Garzarella wrote:
> > On Thu, Aug 20, 2020 at 06:59:01PM +0200, Philippe Mathieu-Daudé wrote:
> >> In preparation of using multiple IRQ (thus multiple eventfds)
> >> make BDRVNVMeState::irq_notifier an array (for now of a single
> >> element, the admin queue notifier).
> >>
> >> Reviewed-by: Stefan Hajnoczi 
> >> Signed-off-by: Philippe Mathieu-Daudé 
> >> ---
> >>  block/nvme.c | 31 +--
> >>  1 file changed, 21 insertions(+), 10 deletions(-)
> >>
> >> diff --git a/block/nvme.c b/block/nvme.c
> >> index a61e86a83eb..fe8a40b7ede 100644
> >> --- a/block/nvme.c
> >> +++ b/block/nvme.c
> >> @@ -106,6 +106,12 @@ QEMU_BUILD_BUG_ON(offsetof(NVMeRegs, doorbells) != 
> >> 0x1000);
> >>  #define INDEX_ADMIN 0
> >>  #define INDEX_IO(n) (1 + n)
> >>  
> >> +/* This driver shares a single MSIX IRQ for the admin and I/O queues */
> >> +enum {
> >> +MSIX_SHARED_IRQ_IDX = 0,
> >> +MSIX_IRQ_COUNT = 1
> >> +};
> >> +
> >>  struct BDRVNVMeState {
> >>  AioContext *aio_context;
> >>  QEMUVFIOState *vfio;
> >> @@ -120,7 +126,7 @@ struct BDRVNVMeState {
> >>  /* How many uint32_t elements does each doorbell entry take. */
> >>  size_t doorbell_scale;
> >>  bool write_cache_supported;
> >> -EventNotifier irq_notifier;
> >> +EventNotifier irq_notifier[MSIX_IRQ_COUNT];
> >>  
> >>  uint64_t nsze; /* Namespace size reported by identify command */
> >>  int nsid;  /* The namespace id to read/write data. */
> >> @@ -631,7 +637,8 @@ static bool nvme_poll_queues(BDRVNVMeState *s)
> >>  
> >>  static void nvme_handle_event(EventNotifier *n)
> >>  {
> >> -BDRVNVMeState *s = container_of(n, BDRVNVMeState, irq_notifier);
> >> +BDRVNVMeState *s = container_of(n, BDRVNVMeState,
> >> +irq_notifier[MSIX_SHARED_IRQ_IDX]);
> >>  
> >>  trace_nvme_handle_event(s);
> >>  event_notifier_test_and_clear(n);
> >> @@ -683,7 +690,8 @@ out_error:
> >>  static bool nvme_poll_cb(void *opaque)
> >>  {
> >>  EventNotifier *e = opaque;
> >> -BDRVNVMeState *s = container_of(e, BDRVNVMeState, irq_notifier);
> >> +BDRVNVMeState *s = container_of(e, BDRVNVMeState,
> >> +irq_notifier[MSIX_SHARED_IRQ_IDX]);
> >>  
> >>  trace_nvme_poll_cb(s);
> >>  return nvme_poll_queues(s);
> >> @@ -705,7 +713,7 @@ static int nvme_init(BlockDriverState *bs, const char 
> >> *device, int namespace,
> >>  s->device = g_strdup(device);
> >>  s->nsid = namespace;
> >>  s->aio_context = bdrv_get_aio_context(bs);
> >> -ret = event_notifier_init(>irq_notifier, 0);
> >> +ret = event_notifier_init(>irq_notifier[MSIX_SHARED_IRQ_IDX], 0);
> >>  if (ret) {
> >>  error_setg(errp, "Failed to init event notifier");
> >>  return ret;
> >> @@ -784,12 +792,13 @@ static int nvme_init(BlockDriverState *bs, const 
> >> char *device, int namespace,
> >>  }
> >>  }
> >>  
> >> -ret = qemu_vfio_pci_init_irq(s->vfio, >irq_notifier,
> >> +ret = qemu_vfio_pci_init_irq(s->vfio, s->irq_notifier,
> > 
> > Maybe we can use '>irq_notifier[MSIX_SHARED_IRQ_IDX]' to match the other
> > changes.
> 
> This makes the following patch in the next series (using multiple
> queues) simpler, but if you prefer I don't mind using your suggestion
> here, then adding another patch to directly use the array address
> (instead of the address of the 1st element in that array). As you
> wish :)

If it simplifies the next patches, it's fine for me ;-)

Reviewed-by: Stefano Garzarella 

> 
> > 
> >>   VFIO_PCI_MSIX_IRQ_INDEX, errp);
> >>  if (ret) {
> >>  goto out;
> >>  }
> >> -aio_set_event_notifier(bdrv_get_aio_context(bs), >irq_notifier,
> >> +aio_set_event_notifier(bdrv_get_aio_context(bs),
> >> +   >irq_notifier[MSIX_SHARED_IRQ_IDX],
> >> false, nvme_handle_event, nvme_poll_cb);
> >>  
> >>  nvme_identify(bs, namespace, _err);
> >> @@ -872,9 +881,10 @@ static void nvme_close(BlockDriverState *bs)
> >>  nvme_free_queue_pair(s->queues[i]);
> >>  }
> >>  g_free(s->queues);
> >> -aio_set_event_notifier(bdrv_get_aio_context(bs), >irq_notifier,
> >> +aio_set_event_notifier(bdrv_get_aio_context(bs),
> >> +   >irq_notifier[MSIX_SHARED_IRQ_IDX],
> >> false, NULL, NULL);
> >> -event_notifier_cleanup(>irq_notifier);
> >> +event_notifier_cleanup(>irq_notifier[MSIX_SHARED_IRQ_IDX]);
> >>  qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, 
> >> NVME_BAR_SIZE);
> >>  qemu_vfio_close(s->vfio);
> >>  
> >> @@ -1381,7 +1391,8 @@ static void nvme_detach_aio_context(BlockDriverState 
> >> *bs)
> >>  q->completion_bh = NULL;
> >>  }
> >>  
> >> -

Re: [PATCH v5 15/15] block/nvme: Use an array of EventNotifier

2020-08-21 Thread Philippe Mathieu-Daudé
On 8/21/20 12:29 PM, Stefano Garzarella wrote:
> On Thu, Aug 20, 2020 at 06:59:01PM +0200, Philippe Mathieu-Daudé wrote:
>> In preparation of using multiple IRQ (thus multiple eventfds)
>> make BDRVNVMeState::irq_notifier an array (for now of a single
>> element, the admin queue notifier).
>>
>> Reviewed-by: Stefan Hajnoczi 
>> Signed-off-by: Philippe Mathieu-Daudé 
>> ---
>>  block/nvme.c | 31 +--
>>  1 file changed, 21 insertions(+), 10 deletions(-)
>>
>> diff --git a/block/nvme.c b/block/nvme.c
>> index a61e86a83eb..fe8a40b7ede 100644
>> --- a/block/nvme.c
>> +++ b/block/nvme.c
>> @@ -106,6 +106,12 @@ QEMU_BUILD_BUG_ON(offsetof(NVMeRegs, doorbells) != 
>> 0x1000);
>>  #define INDEX_ADMIN 0
>>  #define INDEX_IO(n) (1 + n)
>>  
>> +/* This driver shares a single MSIX IRQ for the admin and I/O queues */
>> +enum {
>> +MSIX_SHARED_IRQ_IDX = 0,
>> +MSIX_IRQ_COUNT = 1
>> +};
>> +
>>  struct BDRVNVMeState {
>>  AioContext *aio_context;
>>  QEMUVFIOState *vfio;
>> @@ -120,7 +126,7 @@ struct BDRVNVMeState {
>>  /* How many uint32_t elements does each doorbell entry take. */
>>  size_t doorbell_scale;
>>  bool write_cache_supported;
>> -EventNotifier irq_notifier;
>> +EventNotifier irq_notifier[MSIX_IRQ_COUNT];
>>  
>>  uint64_t nsze; /* Namespace size reported by identify command */
>>  int nsid;  /* The namespace id to read/write data. */
>> @@ -631,7 +637,8 @@ static bool nvme_poll_queues(BDRVNVMeState *s)
>>  
>>  static void nvme_handle_event(EventNotifier *n)
>>  {
>> -BDRVNVMeState *s = container_of(n, BDRVNVMeState, irq_notifier);
>> +BDRVNVMeState *s = container_of(n, BDRVNVMeState,
>> +irq_notifier[MSIX_SHARED_IRQ_IDX]);
>>  
>>  trace_nvme_handle_event(s);
>>  event_notifier_test_and_clear(n);
>> @@ -683,7 +690,8 @@ out_error:
>>  static bool nvme_poll_cb(void *opaque)
>>  {
>>  EventNotifier *e = opaque;
>> -BDRVNVMeState *s = container_of(e, BDRVNVMeState, irq_notifier);
>> +BDRVNVMeState *s = container_of(e, BDRVNVMeState,
>> +irq_notifier[MSIX_SHARED_IRQ_IDX]);
>>  
>>  trace_nvme_poll_cb(s);
>>  return nvme_poll_queues(s);
>> @@ -705,7 +713,7 @@ static int nvme_init(BlockDriverState *bs, const char 
>> *device, int namespace,
>>  s->device = g_strdup(device);
>>  s->nsid = namespace;
>>  s->aio_context = bdrv_get_aio_context(bs);
>> -ret = event_notifier_init(>irq_notifier, 0);
>> +ret = event_notifier_init(>irq_notifier[MSIX_SHARED_IRQ_IDX], 0);
>>  if (ret) {
>>  error_setg(errp, "Failed to init event notifier");
>>  return ret;
>> @@ -784,12 +792,13 @@ static int nvme_init(BlockDriverState *bs, const char 
>> *device, int namespace,
>>  }
>>  }
>>  
>> -ret = qemu_vfio_pci_init_irq(s->vfio, >irq_notifier,
>> +ret = qemu_vfio_pci_init_irq(s->vfio, s->irq_notifier,
> 
> Maybe we can use '>irq_notifier[MSIX_SHARED_IRQ_IDX]' to match the other
> changes.

This makes the following patch in the next series (using multiple
queues) simpler, but if you prefer I don't mind using your suggestion
here, then adding another patch to directly use the array address
(instead of the address of the 1st element in that array). As you
wish :)

> 
>>   VFIO_PCI_MSIX_IRQ_INDEX, errp);
>>  if (ret) {
>>  goto out;
>>  }
>> -aio_set_event_notifier(bdrv_get_aio_context(bs), >irq_notifier,
>> +aio_set_event_notifier(bdrv_get_aio_context(bs),
>> +   >irq_notifier[MSIX_SHARED_IRQ_IDX],
>> false, nvme_handle_event, nvme_poll_cb);
>>  
>>  nvme_identify(bs, namespace, _err);
>> @@ -872,9 +881,10 @@ static void nvme_close(BlockDriverState *bs)
>>  nvme_free_queue_pair(s->queues[i]);
>>  }
>>  g_free(s->queues);
>> -aio_set_event_notifier(bdrv_get_aio_context(bs), >irq_notifier,
>> +aio_set_event_notifier(bdrv_get_aio_context(bs),
>> +   >irq_notifier[MSIX_SHARED_IRQ_IDX],
>> false, NULL, NULL);
>> -event_notifier_cleanup(>irq_notifier);
>> +event_notifier_cleanup(>irq_notifier[MSIX_SHARED_IRQ_IDX]);
>>  qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE);
>>  qemu_vfio_close(s->vfio);
>>  
>> @@ -1381,7 +1391,8 @@ static void nvme_detach_aio_context(BlockDriverState 
>> *bs)
>>  q->completion_bh = NULL;
>>  }
>>  
>> -aio_set_event_notifier(bdrv_get_aio_context(bs), >irq_notifier,
>> +aio_set_event_notifier(bdrv_get_aio_context(bs),
>> +   >irq_notifier[MSIX_SHARED_IRQ_IDX],
>> false, NULL, NULL);
>>  }
>>  
>> @@ -1391,7 +1402,7 @@ static void nvme_attach_aio_context(BlockDriverState 
>> *bs,
>>  BDRVNVMeState *s = bs->opaque;
>>  
>>  s->aio_context = new_context;
>> -

Re: [PATCH v5 15/15] block/nvme: Use an array of EventNotifier

2020-08-21 Thread Stefano Garzarella
On Thu, Aug 20, 2020 at 06:59:01PM +0200, Philippe Mathieu-Daudé wrote:
> In preparation of using multiple IRQ (thus multiple eventfds)
> make BDRVNVMeState::irq_notifier an array (for now of a single
> element, the admin queue notifier).
> 
> Reviewed-by: Stefan Hajnoczi 
> Signed-off-by: Philippe Mathieu-Daudé 
> ---
>  block/nvme.c | 31 +--
>  1 file changed, 21 insertions(+), 10 deletions(-)
> 
> diff --git a/block/nvme.c b/block/nvme.c
> index a61e86a83eb..fe8a40b7ede 100644
> --- a/block/nvme.c
> +++ b/block/nvme.c
> @@ -106,6 +106,12 @@ QEMU_BUILD_BUG_ON(offsetof(NVMeRegs, doorbells) != 
> 0x1000);
>  #define INDEX_ADMIN 0
>  #define INDEX_IO(n) (1 + n)
>  
> +/* This driver shares a single MSIX IRQ for the admin and I/O queues */
> +enum {
> +MSIX_SHARED_IRQ_IDX = 0,
> +MSIX_IRQ_COUNT = 1
> +};
> +
>  struct BDRVNVMeState {
>  AioContext *aio_context;
>  QEMUVFIOState *vfio;
> @@ -120,7 +126,7 @@ struct BDRVNVMeState {
>  /* How many uint32_t elements does each doorbell entry take. */
>  size_t doorbell_scale;
>  bool write_cache_supported;
> -EventNotifier irq_notifier;
> +EventNotifier irq_notifier[MSIX_IRQ_COUNT];
>  
>  uint64_t nsze; /* Namespace size reported by identify command */
>  int nsid;  /* The namespace id to read/write data. */
> @@ -631,7 +637,8 @@ static bool nvme_poll_queues(BDRVNVMeState *s)
>  
>  static void nvme_handle_event(EventNotifier *n)
>  {
> -BDRVNVMeState *s = container_of(n, BDRVNVMeState, irq_notifier);
> +BDRVNVMeState *s = container_of(n, BDRVNVMeState,
> +irq_notifier[MSIX_SHARED_IRQ_IDX]);
>  
>  trace_nvme_handle_event(s);
>  event_notifier_test_and_clear(n);
> @@ -683,7 +690,8 @@ out_error:
>  static bool nvme_poll_cb(void *opaque)
>  {
>  EventNotifier *e = opaque;
> -BDRVNVMeState *s = container_of(e, BDRVNVMeState, irq_notifier);
> +BDRVNVMeState *s = container_of(e, BDRVNVMeState,
> +irq_notifier[MSIX_SHARED_IRQ_IDX]);
>  
>  trace_nvme_poll_cb(s);
>  return nvme_poll_queues(s);
> @@ -705,7 +713,7 @@ static int nvme_init(BlockDriverState *bs, const char 
> *device, int namespace,
>  s->device = g_strdup(device);
>  s->nsid = namespace;
>  s->aio_context = bdrv_get_aio_context(bs);
> -ret = event_notifier_init(>irq_notifier, 0);
> +ret = event_notifier_init(>irq_notifier[MSIX_SHARED_IRQ_IDX], 0);
>  if (ret) {
>  error_setg(errp, "Failed to init event notifier");
>  return ret;
> @@ -784,12 +792,13 @@ static int nvme_init(BlockDriverState *bs, const char 
> *device, int namespace,
>  }
>  }
>  
> -ret = qemu_vfio_pci_init_irq(s->vfio, >irq_notifier,
> +ret = qemu_vfio_pci_init_irq(s->vfio, s->irq_notifier,

Maybe we can use '>irq_notifier[MSIX_SHARED_IRQ_IDX]' to match the other
changes.

>   VFIO_PCI_MSIX_IRQ_INDEX, errp);
>  if (ret) {
>  goto out;
>  }
> -aio_set_event_notifier(bdrv_get_aio_context(bs), >irq_notifier,
> +aio_set_event_notifier(bdrv_get_aio_context(bs),
> +   >irq_notifier[MSIX_SHARED_IRQ_IDX],
> false, nvme_handle_event, nvme_poll_cb);
>  
>  nvme_identify(bs, namespace, _err);
> @@ -872,9 +881,10 @@ static void nvme_close(BlockDriverState *bs)
>  nvme_free_queue_pair(s->queues[i]);
>  }
>  g_free(s->queues);
> -aio_set_event_notifier(bdrv_get_aio_context(bs), >irq_notifier,
> +aio_set_event_notifier(bdrv_get_aio_context(bs),
> +   >irq_notifier[MSIX_SHARED_IRQ_IDX],
> false, NULL, NULL);
> -event_notifier_cleanup(>irq_notifier);
> +event_notifier_cleanup(>irq_notifier[MSIX_SHARED_IRQ_IDX]);
>  qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE);
>  qemu_vfio_close(s->vfio);
>  
> @@ -1381,7 +1391,8 @@ static void nvme_detach_aio_context(BlockDriverState 
> *bs)
>  q->completion_bh = NULL;
>  }
>  
> -aio_set_event_notifier(bdrv_get_aio_context(bs), >irq_notifier,
> +aio_set_event_notifier(bdrv_get_aio_context(bs),
> +   >irq_notifier[MSIX_SHARED_IRQ_IDX],
> false, NULL, NULL);
>  }
>  
> @@ -1391,7 +1402,7 @@ static void nvme_attach_aio_context(BlockDriverState 
> *bs,
>  BDRVNVMeState *s = bs->opaque;
>  
>  s->aio_context = new_context;
> -aio_set_event_notifier(new_context, >irq_notifier,
> +aio_set_event_notifier(new_context, 
> >irq_notifier[MSIX_SHARED_IRQ_IDX],
> false, nvme_handle_event, nvme_poll_cb);
>  
>  for (int i = 0; i < s->nr_queues; i++) {
> -- 
> 2.26.2
> 
>