Re: [PATCH 10/16] util/vfio-helpers: Let qemu_vfio_dma_map() propagate Error

2020-10-22 Thread Stefan Hajnoczi
On Tue, Oct 20, 2020 at 07:24:22PM +0200, Philippe Mathieu-Daudé wrote:
> Currently qemu_vfio_dma_map() displays errors on stderr.
> When using management interface, this information is simply
> lost. Pass qemu_vfio_dma_map() an Error* argument so it can
> propagate the error to callers.
> 
> Reviewed-by: Fam Zheng 
> Signed-off-by: Philippe Mathieu-Daudé 
> ---
>  include/qemu/vfio-helpers.h |  2 +-
>  block/nvme.c| 14 +++---
>  util/vfio-helpers.c | 12 +++-
>  3 files changed, 15 insertions(+), 13 deletions(-)

Reviewed-by: Stefan Hajnoczi 


signature.asc
Description: PGP signature


[PATCH 10/16] util/vfio-helpers: Let qemu_vfio_dma_map() propagate Error

2020-10-20 Thread Philippe Mathieu-Daudé
Currently qemu_vfio_dma_map() displays errors on stderr.
When using management interface, this information is simply
lost. Pass qemu_vfio_dma_map() an Error* argument so it can
propagate the error to callers.

Reviewed-by: Fam Zheng 
Signed-off-by: Philippe Mathieu-Daudé 
---
 include/qemu/vfio-helpers.h |  2 +-
 block/nvme.c| 14 +++---
 util/vfio-helpers.c | 12 +++-
 3 files changed, 15 insertions(+), 13 deletions(-)

diff --git a/include/qemu/vfio-helpers.h b/include/qemu/vfio-helpers.h
index 5cb346d8e67..4b97a904e93 100644
--- a/include/qemu/vfio-helpers.h
+++ b/include/qemu/vfio-helpers.h
@@ -19,7 +19,7 @@ QEMUVFIOState *qemu_vfio_open_pci(const char *device, size_t 
*min_page_size,
   Error **errp);
 void qemu_vfio_close(QEMUVFIOState *s);
 int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
-  bool temporary, uint64_t *iova_list);
+  bool temporary, uint64_t *iova_list, Error **errp);
 int qemu_vfio_dma_reset_temporary(QEMUVFIOState *s);
 void qemu_vfio_dma_unmap(QEMUVFIOState *s, void *host);
 void *qemu_vfio_pci_map_bar(QEMUVFIOState *s, int index,
diff --git a/block/nvme.c b/block/nvme.c
index 8335f5d70dd..428cda620df 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -167,9 +167,9 @@ static void nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
 return;
 }
 memset(q->queue, 0, bytes);
-r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, >iova);
+r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, >iova, errp);
 if (r) {
-error_setg(errp, "Cannot map queue");
+error_prepend(errp, "Cannot map queue: ");
 }
 }
 
@@ -223,7 +223,7 @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState 
*s,
 q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q);
 r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages,
   s->page_size * NVME_NUM_REQS,
-  false, _list_iova);
+  false, _list_iova, errp);
 if (r) {
 goto fail;
 }
@@ -514,9 +514,9 @@ static void nvme_identify(BlockDriverState *bs, int 
namespace, Error **errp)
 error_setg(errp, "Cannot allocate buffer for identify response");
 goto out;
 }
-r = qemu_vfio_dma_map(s->vfio, id, sizeof(*id), true, );
+r = qemu_vfio_dma_map(s->vfio, id, sizeof(*id), true, , errp);
 if (r) {
-error_setg(errp, "Cannot map buffer for DMA");
+error_prepend(errp, "Cannot map buffer for DMA: ");
 goto out;
 }
 
@@ -990,7 +990,7 @@ try_map:
 r = qemu_vfio_dma_map(s->vfio,
   qiov->iov[i].iov_base,
   qiov->iov[i].iov_len,
-  true, );
+  true, , NULL);
 if (r == -ENOMEM && retry) {
 retry = false;
 trace_nvme_dma_flush_queue_wait(s);
@@ -1437,7 +1437,7 @@ static void nvme_register_buf(BlockDriverState *bs, void 
*host, size_t size)
 int ret;
 BDRVNVMeState *s = bs->opaque;
 
-ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL);
+ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL, NULL);
 if (ret) {
 /* FIXME: we may run out of IOVA addresses after repeated
  * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
index 6a5100f4892..8c075d9aae7 100644
--- a/util/vfio-helpers.c
+++ b/util/vfio-helpers.c
@@ -486,7 +486,7 @@ static void qemu_vfio_ram_block_added(RAMBlockNotifier *n,
 {
 QEMUVFIOState *s = container_of(n, QEMUVFIOState, ram_notifier);
 trace_qemu_vfio_ram_block_added(s, host, size);
-qemu_vfio_dma_map(s, host, size, false, NULL);
+qemu_vfio_dma_map(s, host, size, false, NULL, NULL);
 }
 
 static void qemu_vfio_ram_block_removed(RAMBlockNotifier *n,
@@ -501,6 +501,7 @@ static void qemu_vfio_ram_block_removed(RAMBlockNotifier *n,
 
 static int qemu_vfio_init_ramblock(RAMBlock *rb, void *opaque)
 {
+Error *local_err = NULL;
 void *host_addr = qemu_ram_get_host_addr(rb);
 ram_addr_t length = qemu_ram_get_used_length(rb);
 int ret;
@@ -509,10 +510,11 @@ static int qemu_vfio_init_ramblock(RAMBlock *rb, void 
*opaque)
 if (!host_addr) {
 return 0;
 }
-ret = qemu_vfio_dma_map(s, host_addr, length, false, NULL);
+ret = qemu_vfio_dma_map(s, host_addr, length, false, NULL, _err);
 if (ret) {
-fprintf(stderr, "qemu_vfio_init_ramblock: failed %p %" PRId64 "\n",
-host_addr, (uint64_t)length);
+error_reportf_err(local_err,
+  "qemu_vfio_init_ramblock: failed %p %" PRId64 ":",
+  host_addr, (uint64_t)length);
 }
 return 0;
 }
@@ -754,7 +756,7 @@ qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size, 
uint64_t *iova)
  * mapping status