Re: [PATCH v2 1/2] vdpa: rename vhost_vdpa_net_cvq_add()

2023-05-17 Thread Hawkins Jiawei
Sorry for forgetting cc when replying to the email.
I will resend this email with cc.

On Wed, 17 May 2023 at 12:12, Jason Wang  wrote:
>
> On Sat, May 6, 2023 at 10:07 PM Hawkins Jiawei  wrote:
> >
> > We want to introduce a new version of vhost_vdpa_net_cvq_add() that
> > does not poll immediately after forwarding custom buffers
> > to the device, so that QEMU can send all the SVQ control commands
> > in parallel instead of serialized.
> >
> > Signed-off-by: Hawkins Jiawei 
> > ---
> >  net/vhost-vdpa.c | 15 +++
> >  1 file changed, 11 insertions(+), 4 deletions(-)
> >
> > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > index 99904a0da7..10804c7200 100644
> > --- a/net/vhost-vdpa.c
> > +++ b/net/vhost-vdpa.c
> > @@ -590,8 +590,14 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
> >  vhost_vdpa_net_client_stop(nc);
> >  }
> >
> > -static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
> > -  size_t in_len)
> > +/**
> > + * vhost_vdpa_net_cvq_add_and_wait() adds SVQ control commands to SVQ,
> > + * kicks the device and polls the device used buffers.
> > + *
> > + * Return the length written by the device.
> > + */
> > +static ssize_t vhost_vdpa_net_cvq_add_and_wait(VhostVDPAState *s,
>
> Nit: is it better to use "poll" or "sync" other than wait?
>
> Other than this:
>
> Acked-by: Jason Wang 

Hi Jason,

Thanks for your suggestion. I prefer 'poll', which makes it clearer
that this function will poll immediately compared to the new
version of vhost_vdpa_net_cvq_add().

I will refactor this in the v2 patch with the Acked-by tag on.

Thanks!

>
> Thanks
>
> > +size_t out_len, size_t in_len)
> >  {
> >  /* Buffers for the device */
> >  const struct iovec out = {
> > @@ -636,7 +642,7 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState 
> > *s, uint8_t class,
> >  memcpy(s->cvq_cmd_out_buffer, , sizeof(ctrl));
> >  memcpy(s->cvq_cmd_out_buffer + sizeof(ctrl), data, data_size);
> >
> > -return vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + data_size,
> > +return vhost_vdpa_net_cvq_add_and_wait(s, sizeof(ctrl) + data_size,
> >sizeof(virtio_net_ctrl_ack));
> >  }
> >
> > @@ -753,7 +759,8 @@ static int 
> > vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
> >  dev_written = sizeof(status);
> >  *s->status = VIRTIO_NET_OK;
> >  } else {
> > -dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, 
> > sizeof(status));
> > +dev_written = vhost_vdpa_net_cvq_add_and_wait(s, out.iov_len,
> > +  sizeof(status));
> >  if (unlikely(dev_written < 0)) {
> >  goto out;
> >  }
> > --
> > 2.25.1
> >
>



Re: [PATCH v2 1/2] vdpa: rename vhost_vdpa_net_cvq_add()

2023-05-16 Thread Jason Wang
On Sat, May 6, 2023 at 10:07 PM Hawkins Jiawei  wrote:
>
> We want to introduce a new version of vhost_vdpa_net_cvq_add() that
> does not poll immediately after forwarding custom buffers
> to the device, so that QEMU can send all the SVQ control commands
> in parallel instead of serialized.
>
> Signed-off-by: Hawkins Jiawei 
> ---
>  net/vhost-vdpa.c | 15 +++
>  1 file changed, 11 insertions(+), 4 deletions(-)
>
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index 99904a0da7..10804c7200 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -590,8 +590,14 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
>  vhost_vdpa_net_client_stop(nc);
>  }
>
> -static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
> -  size_t in_len)
> +/**
> + * vhost_vdpa_net_cvq_add_and_wait() adds SVQ control commands to SVQ,
> + * kicks the device and polls the device used buffers.
> + *
> + * Return the length written by the device.
> + */
> +static ssize_t vhost_vdpa_net_cvq_add_and_wait(VhostVDPAState *s,

Nit: is it better to use "poll" or "sync" other than wait?

Other than this:

Acked-by: Jason Wang 

Thanks

> +size_t out_len, size_t in_len)
>  {
>  /* Buffers for the device */
>  const struct iovec out = {
> @@ -636,7 +642,7 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, 
> uint8_t class,
>  memcpy(s->cvq_cmd_out_buffer, , sizeof(ctrl));
>  memcpy(s->cvq_cmd_out_buffer + sizeof(ctrl), data, data_size);
>
> -return vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + data_size,
> +return vhost_vdpa_net_cvq_add_and_wait(s, sizeof(ctrl) + data_size,
>sizeof(virtio_net_ctrl_ack));
>  }
>
> @@ -753,7 +759,8 @@ static int 
> vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
>  dev_written = sizeof(status);
>  *s->status = VIRTIO_NET_OK;
>  } else {
> -dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
> +dev_written = vhost_vdpa_net_cvq_add_and_wait(s, out.iov_len,
> +  sizeof(status));
>  if (unlikely(dev_written < 0)) {
>  goto out;
>  }
> --
> 2.25.1
>




[PATCH v2 1/2] vdpa: rename vhost_vdpa_net_cvq_add()

2023-05-06 Thread Hawkins Jiawei
We want to introduce a new version of vhost_vdpa_net_cvq_add() that
does not poll immediately after forwarding custom buffers
to the device, so that QEMU can send all the SVQ control commands
in parallel instead of serialized.

Signed-off-by: Hawkins Jiawei 
---
 net/vhost-vdpa.c | 15 +++
 1 file changed, 11 insertions(+), 4 deletions(-)

diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 99904a0da7..10804c7200 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -590,8 +590,14 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
 vhost_vdpa_net_client_stop(nc);
 }
 
-static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
-  size_t in_len)
+/**
+ * vhost_vdpa_net_cvq_add_and_wait() adds SVQ control commands to SVQ,
+ * kicks the device and polls the device used buffers.
+ *
+ * Return the length written by the device.
+ */
+static ssize_t vhost_vdpa_net_cvq_add_and_wait(VhostVDPAState *s,
+size_t out_len, size_t in_len)
 {
 /* Buffers for the device */
 const struct iovec out = {
@@ -636,7 +642,7 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, 
uint8_t class,
 memcpy(s->cvq_cmd_out_buffer, , sizeof(ctrl));
 memcpy(s->cvq_cmd_out_buffer + sizeof(ctrl), data, data_size);
 
-return vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + data_size,
+return vhost_vdpa_net_cvq_add_and_wait(s, sizeof(ctrl) + data_size,
   sizeof(virtio_net_ctrl_ack));
 }
 
@@ -753,7 +759,8 @@ static int 
vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
 dev_written = sizeof(status);
 *s->status = VIRTIO_NET_OK;
 } else {
-dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
+dev_written = vhost_vdpa_net_cvq_add_and_wait(s, out.iov_len,
+  sizeof(status));
 if (unlikely(dev_written < 0)) {
 goto out;
 }
-- 
2.25.1