From: Hawkins Jiawei <yin31...@gmail.com> Next patches in this series will no longer perform an immediate poll and check of the device's used buffers for each CVQ state load command. Instead, they will send CVQ state load commands in parallel by polling multiple pending buffers at once.
To achieve this, this patch refactoring vhost_svq_poll() to accept a new argument `num`, which allows vhost_svq_poll() to wait for the device to use multiple elements, rather than polling for a single element. Signed-off-by: Hawkins Jiawei <yin31...@gmail.com> Acked-by: Eugenio Pérez <epere...@redhat.com> Message-Id: <950b3bfcfc5d446168b9d6a249d554a013a691d4.1693287885.git.yin31...@gmail.com> --- hw/virtio/vhost-shadow-virtqueue.h | 2 +- hw/virtio/vhost-shadow-virtqueue.c | 36 ++++++++++++++++++------------ net/vhost-vdpa.c | 2 +- 3 files changed, 24 insertions(+), 16 deletions(-) diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h index 6efe051a70..5bce67837b 100644 --- a/hw/virtio/vhost-shadow-virtqueue.h +++ b/hw/virtio/vhost-shadow-virtqueue.h @@ -119,7 +119,7 @@ void vhost_svq_push_elem(VhostShadowVirtqueue *svq, int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, size_t out_num, const struct iovec *in_sg, size_t in_num, VirtQueueElement *elem); -size_t vhost_svq_poll(VhostShadowVirtqueue *svq); +size_t vhost_svq_poll(VhostShadowVirtqueue *svq, size_t num); void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd); void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd); diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index 49e5aed931..e731b1d2ea 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -514,29 +514,37 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq, } /** - * Poll the SVQ for one device used buffer. + * Poll the SVQ to wait for the device to use the specified number + * of elements and return the total length written by the device. * * This function race with main event loop SVQ polling, so extra * synchronization is needed. * - * Return the length written by the device. + * @svq: The svq + * @num: The number of elements that need to be used */ -size_t vhost_svq_poll(VhostShadowVirtqueue *svq) +size_t vhost_svq_poll(VhostShadowVirtqueue *svq, size_t num) { - int64_t start_us = g_get_monotonic_time(); - uint32_t len = 0; + size_t len = 0; + uint32_t r; - do { - if (vhost_svq_more_used(svq)) { - break; - } + while (num--) { + int64_t start_us = g_get_monotonic_time(); - if (unlikely(g_get_monotonic_time() - start_us > 10e6)) { - return 0; - } - } while (true); + do { + if (vhost_svq_more_used(svq)) { + break; + } + + if (unlikely(g_get_monotonic_time() - start_us > 10e6)) { + return len; + } + } while (true); + + vhost_svq_get_buf(svq, &r); + len += r; + } - vhost_svq_get_buf(svq, &len); return len; } diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 90beda42e0..5808d1b60c 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -645,7 +645,7 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len, * descriptor. Also, we need to take the answer before SVQ pulls by itself, * when BQL is released */ - return vhost_svq_poll(svq); + return vhost_svq_poll(svq, 1); } static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, -- MST