Re: [PATCH 2/3] softmmu: Remove DMA unmap notification callback

2023-08-23 Thread Mattias Nissler
On Thu, Jul 20, 2023 at 8:14 PM Stefan Hajnoczi  wrote:
>
> On Tue, Jul 04, 2023 at 01:06:26AM -0700, Mattias Nissler wrote:
> > According to old commit messages, this was introduced to retry a DMA
> > operation at a later point in case the single bounce buffer is found to
> > be busy. This was never used widely - only the dma-helpers code made use
> > of it, but there are other device models that use multiple DMA mappings
> > (concurrently) and just failed.
> >
> > After the improvement to support multiple concurrent bounce buffers,
> > the condition the notification callback allowed to work around no
> > longer exists, so we can just remove the logic and simplify the code.
> >
> > Signed-off-by: Mattias Nissler 
> > ---
> >  softmmu/dma-helpers.c | 28 -
> >  softmmu/physmem.c | 71 ---
> >  2 files changed, 99 deletions(-)
>
> I'm not sure if it will be possible to remove this once a limit is
> placed bounce buffer space.

I investigated this in detail and concluded that you're right
unfortunately. In particular, after I found that Linux likes to use
megabyte-sided DMA buffers with xhci-attached USB storage, I don't
think it's realistic to set a reasonable fixed limit that accommodates
most workloads in practice.






>
> >
> > diff --git a/softmmu/dma-helpers.c b/softmmu/dma-helpers.c
> > index 2463964805..d05d226f11 100644
> > --- a/softmmu/dma-helpers.c
> > +++ b/softmmu/dma-helpers.c
> > @@ -68,23 +68,10 @@ typedef struct {
> >  int sg_cur_index;
> >  dma_addr_t sg_cur_byte;
> >  QEMUIOVector iov;
> > -QEMUBH *bh;
> >  DMAIOFunc *io_func;
> >  void *io_func_opaque;
> >  } DMAAIOCB;
> >
> > -static void dma_blk_cb(void *opaque, int ret);
> > -
> > -static void reschedule_dma(void *opaque)
> > -{
> > -DMAAIOCB *dbs = (DMAAIOCB *)opaque;
> > -
> > -assert(!dbs->acb && dbs->bh);
> > -qemu_bh_delete(dbs->bh);
> > -dbs->bh = NULL;
> > -dma_blk_cb(dbs, 0);
> > -}
> > -
> >  static void dma_blk_unmap(DMAAIOCB *dbs)
> >  {
> >  int i;
> > @@ -101,7 +88,6 @@ static void dma_complete(DMAAIOCB *dbs, int ret)
> >  {
> >  trace_dma_complete(dbs, ret, dbs->common.cb);
> >
> > -assert(!dbs->acb && !dbs->bh);
> >  dma_blk_unmap(dbs);
> >  if (dbs->common.cb) {
> >  dbs->common.cb(dbs->common.opaque, ret);
> > @@ -164,13 +150,6 @@ static void dma_blk_cb(void *opaque, int ret)
> >  }
> >  }
> >
> > -if (dbs->iov.size == 0) {
> > -trace_dma_map_wait(dbs);
> > -dbs->bh = aio_bh_new(ctx, reschedule_dma, dbs);
> > -cpu_register_map_client(dbs->bh);
> > -goto out;
> > -}
> > -
> >  if (!QEMU_IS_ALIGNED(dbs->iov.size, dbs->align)) {
> >  qemu_iovec_discard_back(>iov,
> >  QEMU_ALIGN_DOWN(dbs->iov.size, 
> > dbs->align));
> > @@ -189,18 +168,12 @@ static void dma_aio_cancel(BlockAIOCB *acb)
> >
> >  trace_dma_aio_cancel(dbs);
> >
> > -assert(!(dbs->acb && dbs->bh));
> >  if (dbs->acb) {
> >  /* This will invoke dma_blk_cb.  */
> >  blk_aio_cancel_async(dbs->acb);
> >  return;
> >  }
> >
> > -if (dbs->bh) {
> > -cpu_unregister_map_client(dbs->bh);
> > -qemu_bh_delete(dbs->bh);
> > -dbs->bh = NULL;
> > -}
> >  if (dbs->common.cb) {
> >  dbs->common.cb(dbs->common.opaque, -ECANCELED);
> >  }
> > @@ -239,7 +212,6 @@ BlockAIOCB *dma_blk_io(AioContext *ctx,
> >  dbs->dir = dir;
> >  dbs->io_func = io_func;
> >  dbs->io_func_opaque = io_func_opaque;
> > -dbs->bh = NULL;
> >  qemu_iovec_init(>iov, sg->nsg);
> >  dma_blk_cb(dbs, 0);
> >  return >common;
> > diff --git a/softmmu/physmem.c b/softmmu/physmem.c
> > index 56130b5a1d..2b4123c127 100644
> > --- a/softmmu/physmem.c
> > +++ b/softmmu/physmem.c
> > @@ -2908,49 +2908,6 @@ typedef struct {
> >  uint8_t buffer[];
> >  } BounceBuffer;
> >
> > -static size_t bounce_buffers_in_use;
> > -
> > -typedef struct MapClient {
> > -QEMUBH *bh;
> > -QLIST_ENTRY(MapClient) link;
> > -} MapClient;
> > -
> > -QemuMutex map_client_list_lock;
> > -static QLIST_HEAD(, MapClient) map_client_list
> > -= QLIST_HEAD_INITIALIZER(map_client_list);
> > -
> > -static void cpu_unregister_map_client_do(MapClient *client)
> > -{
> > -QLIST_REMOVE(client, link);
> > -g_free(client);
> > -}
> > -
> > -static void cpu_notify_map_clients_locked(void)
> > -{
> > -MapClient *client;
> > -
> > -while (!QLIST_EMPTY(_client_list)) {
> > -client = QLIST_FIRST(_client_list);
> > -qemu_bh_schedule(client->bh);
> > -cpu_unregister_map_client_do(client);
> > -}
> > -}
> > -
> > -void cpu_register_map_client(QEMUBH *bh)
> > -{
> > -MapClient *client = g_malloc(sizeof(*client));
> > -
> > -qemu_mutex_lock(_client_list_lock);
> > -client->bh = bh;
> > -QLIST_INSERT_HEAD(_client_list, client, link);
> > -/* 

Re: [PATCH 2/3] softmmu: Remove DMA unmap notification callback

2023-07-20 Thread Stefan Hajnoczi
On Tue, Jul 04, 2023 at 01:06:26AM -0700, Mattias Nissler wrote:
> According to old commit messages, this was introduced to retry a DMA
> operation at a later point in case the single bounce buffer is found to
> be busy. This was never used widely - only the dma-helpers code made use
> of it, but there are other device models that use multiple DMA mappings
> (concurrently) and just failed.
> 
> After the improvement to support multiple concurrent bounce buffers,
> the condition the notification callback allowed to work around no
> longer exists, so we can just remove the logic and simplify the code.
> 
> Signed-off-by: Mattias Nissler 
> ---
>  softmmu/dma-helpers.c | 28 -
>  softmmu/physmem.c | 71 ---
>  2 files changed, 99 deletions(-)

I'm not sure if it will be possible to remove this once a limit is
placed bounce buffer space.

> 
> diff --git a/softmmu/dma-helpers.c b/softmmu/dma-helpers.c
> index 2463964805..d05d226f11 100644
> --- a/softmmu/dma-helpers.c
> +++ b/softmmu/dma-helpers.c
> @@ -68,23 +68,10 @@ typedef struct {
>  int sg_cur_index;
>  dma_addr_t sg_cur_byte;
>  QEMUIOVector iov;
> -QEMUBH *bh;
>  DMAIOFunc *io_func;
>  void *io_func_opaque;
>  } DMAAIOCB;
>  
> -static void dma_blk_cb(void *opaque, int ret);
> -
> -static void reschedule_dma(void *opaque)
> -{
> -DMAAIOCB *dbs = (DMAAIOCB *)opaque;
> -
> -assert(!dbs->acb && dbs->bh);
> -qemu_bh_delete(dbs->bh);
> -dbs->bh = NULL;
> -dma_blk_cb(dbs, 0);
> -}
> -
>  static void dma_blk_unmap(DMAAIOCB *dbs)
>  {
>  int i;
> @@ -101,7 +88,6 @@ static void dma_complete(DMAAIOCB *dbs, int ret)
>  {
>  trace_dma_complete(dbs, ret, dbs->common.cb);
>  
> -assert(!dbs->acb && !dbs->bh);
>  dma_blk_unmap(dbs);
>  if (dbs->common.cb) {
>  dbs->common.cb(dbs->common.opaque, ret);
> @@ -164,13 +150,6 @@ static void dma_blk_cb(void *opaque, int ret)
>  }
>  }
>  
> -if (dbs->iov.size == 0) {
> -trace_dma_map_wait(dbs);
> -dbs->bh = aio_bh_new(ctx, reschedule_dma, dbs);
> -cpu_register_map_client(dbs->bh);
> -goto out;
> -}
> -
>  if (!QEMU_IS_ALIGNED(dbs->iov.size, dbs->align)) {
>  qemu_iovec_discard_back(>iov,
>  QEMU_ALIGN_DOWN(dbs->iov.size, dbs->align));
> @@ -189,18 +168,12 @@ static void dma_aio_cancel(BlockAIOCB *acb)
>  
>  trace_dma_aio_cancel(dbs);
>  
> -assert(!(dbs->acb && dbs->bh));
>  if (dbs->acb) {
>  /* This will invoke dma_blk_cb.  */
>  blk_aio_cancel_async(dbs->acb);
>  return;
>  }
>  
> -if (dbs->bh) {
> -cpu_unregister_map_client(dbs->bh);
> -qemu_bh_delete(dbs->bh);
> -dbs->bh = NULL;
> -}
>  if (dbs->common.cb) {
>  dbs->common.cb(dbs->common.opaque, -ECANCELED);
>  }
> @@ -239,7 +212,6 @@ BlockAIOCB *dma_blk_io(AioContext *ctx,
>  dbs->dir = dir;
>  dbs->io_func = io_func;
>  dbs->io_func_opaque = io_func_opaque;
> -dbs->bh = NULL;
>  qemu_iovec_init(>iov, sg->nsg);
>  dma_blk_cb(dbs, 0);
>  return >common;
> diff --git a/softmmu/physmem.c b/softmmu/physmem.c
> index 56130b5a1d..2b4123c127 100644
> --- a/softmmu/physmem.c
> +++ b/softmmu/physmem.c
> @@ -2908,49 +2908,6 @@ typedef struct {
>  uint8_t buffer[];
>  } BounceBuffer;
>  
> -static size_t bounce_buffers_in_use;
> -
> -typedef struct MapClient {
> -QEMUBH *bh;
> -QLIST_ENTRY(MapClient) link;
> -} MapClient;
> -
> -QemuMutex map_client_list_lock;
> -static QLIST_HEAD(, MapClient) map_client_list
> -= QLIST_HEAD_INITIALIZER(map_client_list);
> -
> -static void cpu_unregister_map_client_do(MapClient *client)
> -{
> -QLIST_REMOVE(client, link);
> -g_free(client);
> -}
> -
> -static void cpu_notify_map_clients_locked(void)
> -{
> -MapClient *client;
> -
> -while (!QLIST_EMPTY(_client_list)) {
> -client = QLIST_FIRST(_client_list);
> -qemu_bh_schedule(client->bh);
> -cpu_unregister_map_client_do(client);
> -}
> -}
> -
> -void cpu_register_map_client(QEMUBH *bh)
> -{
> -MapClient *client = g_malloc(sizeof(*client));
> -
> -qemu_mutex_lock(_client_list_lock);
> -client->bh = bh;
> -QLIST_INSERT_HEAD(_client_list, client, link);
> -/* Write map_client_list before reading in_use.  */
> -smp_mb();
> -if (qatomic_read(_buffers_in_use)) {
> -cpu_notify_map_clients_locked();
> -}
> -qemu_mutex_unlock(_client_list_lock);
> -}
> -
>  void cpu_exec_init_all(void)
>  {
>  qemu_mutex_init(_list.mutex);
> @@ -2964,28 +2921,6 @@ void cpu_exec_init_all(void)
>  finalize_target_page_bits();
>  io_mem_init();
>  memory_map_init();
> -qemu_mutex_init(_client_list_lock);
> -}
> -
> -void cpu_unregister_map_client(QEMUBH *bh)
> -{
> -MapClient *client;
> -
> -qemu_mutex_lock(_client_list_lock);
> -QLIST_FOREACH(client, 

[PATCH 2/3] softmmu: Remove DMA unmap notification callback

2023-07-04 Thread Mattias Nissler
According to old commit messages, this was introduced to retry a DMA
operation at a later point in case the single bounce buffer is found to
be busy. This was never used widely - only the dma-helpers code made use
of it, but there are other device models that use multiple DMA mappings
(concurrently) and just failed.

After the improvement to support multiple concurrent bounce buffers,
the condition the notification callback allowed to work around no
longer exists, so we can just remove the logic and simplify the code.

Signed-off-by: Mattias Nissler 
---
 softmmu/dma-helpers.c | 28 -
 softmmu/physmem.c | 71 ---
 2 files changed, 99 deletions(-)

diff --git a/softmmu/dma-helpers.c b/softmmu/dma-helpers.c
index 2463964805..d05d226f11 100644
--- a/softmmu/dma-helpers.c
+++ b/softmmu/dma-helpers.c
@@ -68,23 +68,10 @@ typedef struct {
 int sg_cur_index;
 dma_addr_t sg_cur_byte;
 QEMUIOVector iov;
-QEMUBH *bh;
 DMAIOFunc *io_func;
 void *io_func_opaque;
 } DMAAIOCB;
 
-static void dma_blk_cb(void *opaque, int ret);
-
-static void reschedule_dma(void *opaque)
-{
-DMAAIOCB *dbs = (DMAAIOCB *)opaque;
-
-assert(!dbs->acb && dbs->bh);
-qemu_bh_delete(dbs->bh);
-dbs->bh = NULL;
-dma_blk_cb(dbs, 0);
-}
-
 static void dma_blk_unmap(DMAAIOCB *dbs)
 {
 int i;
@@ -101,7 +88,6 @@ static void dma_complete(DMAAIOCB *dbs, int ret)
 {
 trace_dma_complete(dbs, ret, dbs->common.cb);
 
-assert(!dbs->acb && !dbs->bh);
 dma_blk_unmap(dbs);
 if (dbs->common.cb) {
 dbs->common.cb(dbs->common.opaque, ret);
@@ -164,13 +150,6 @@ static void dma_blk_cb(void *opaque, int ret)
 }
 }
 
-if (dbs->iov.size == 0) {
-trace_dma_map_wait(dbs);
-dbs->bh = aio_bh_new(ctx, reschedule_dma, dbs);
-cpu_register_map_client(dbs->bh);
-goto out;
-}
-
 if (!QEMU_IS_ALIGNED(dbs->iov.size, dbs->align)) {
 qemu_iovec_discard_back(>iov,
 QEMU_ALIGN_DOWN(dbs->iov.size, dbs->align));
@@ -189,18 +168,12 @@ static void dma_aio_cancel(BlockAIOCB *acb)
 
 trace_dma_aio_cancel(dbs);
 
-assert(!(dbs->acb && dbs->bh));
 if (dbs->acb) {
 /* This will invoke dma_blk_cb.  */
 blk_aio_cancel_async(dbs->acb);
 return;
 }
 
-if (dbs->bh) {
-cpu_unregister_map_client(dbs->bh);
-qemu_bh_delete(dbs->bh);
-dbs->bh = NULL;
-}
 if (dbs->common.cb) {
 dbs->common.cb(dbs->common.opaque, -ECANCELED);
 }
@@ -239,7 +212,6 @@ BlockAIOCB *dma_blk_io(AioContext *ctx,
 dbs->dir = dir;
 dbs->io_func = io_func;
 dbs->io_func_opaque = io_func_opaque;
-dbs->bh = NULL;
 qemu_iovec_init(>iov, sg->nsg);
 dma_blk_cb(dbs, 0);
 return >common;
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
index 56130b5a1d..2b4123c127 100644
--- a/softmmu/physmem.c
+++ b/softmmu/physmem.c
@@ -2908,49 +2908,6 @@ typedef struct {
 uint8_t buffer[];
 } BounceBuffer;
 
-static size_t bounce_buffers_in_use;
-
-typedef struct MapClient {
-QEMUBH *bh;
-QLIST_ENTRY(MapClient) link;
-} MapClient;
-
-QemuMutex map_client_list_lock;
-static QLIST_HEAD(, MapClient) map_client_list
-= QLIST_HEAD_INITIALIZER(map_client_list);
-
-static void cpu_unregister_map_client_do(MapClient *client)
-{
-QLIST_REMOVE(client, link);
-g_free(client);
-}
-
-static void cpu_notify_map_clients_locked(void)
-{
-MapClient *client;
-
-while (!QLIST_EMPTY(_client_list)) {
-client = QLIST_FIRST(_client_list);
-qemu_bh_schedule(client->bh);
-cpu_unregister_map_client_do(client);
-}
-}
-
-void cpu_register_map_client(QEMUBH *bh)
-{
-MapClient *client = g_malloc(sizeof(*client));
-
-qemu_mutex_lock(_client_list_lock);
-client->bh = bh;
-QLIST_INSERT_HEAD(_client_list, client, link);
-/* Write map_client_list before reading in_use.  */
-smp_mb();
-if (qatomic_read(_buffers_in_use)) {
-cpu_notify_map_clients_locked();
-}
-qemu_mutex_unlock(_client_list_lock);
-}
-
 void cpu_exec_init_all(void)
 {
 qemu_mutex_init(_list.mutex);
@@ -2964,28 +2921,6 @@ void cpu_exec_init_all(void)
 finalize_target_page_bits();
 io_mem_init();
 memory_map_init();
-qemu_mutex_init(_client_list_lock);
-}
-
-void cpu_unregister_map_client(QEMUBH *bh)
-{
-MapClient *client;
-
-qemu_mutex_lock(_client_list_lock);
-QLIST_FOREACH(client, _client_list, link) {
-if (client->bh == bh) {
-cpu_unregister_map_client_do(client);
-break;
-}
-}
-qemu_mutex_unlock(_client_list_lock);
-}
-
-static void cpu_notify_map_clients(void)
-{
-qemu_mutex_lock(_client_list_lock);
-cpu_notify_map_clients_locked();
-qemu_mutex_unlock(_client_list_lock);
 }
 
 static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len,
@@ -3077,8 +3012,6 @@ void