It seems that to avoid deadlocks it is enough to poll vq before
 we are going to use the last buffer.  This should be faster than
c70aa540c7a9f67add11ad3161096fb95233aa2e.

Signed-off-by: Michael S. Tsirkin <m...@redhat.com>
---
 drivers/vhost/net.c | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 8e9de79..3967f82 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -197,8 +197,16 @@ static void vhost_zerocopy_callback(struct ubuf_info 
*ubuf, int status)
 {
        struct vhost_ubuf_ref *ubufs = ubuf->ctx;
        struct vhost_virtqueue *vq = ubufs->vq;
-
-       vhost_poll_queue(&vq->poll);
+       int cnt = atomic_read(&ubufs->kref.refcount);
+
+       /*
+        * Trigger polling thread if guest stopped submitting new buffers:
+        * in this case, the refcount after decrement will eventually reach 1
+        * so here it is 2.
+        * We also trigger polling periodically after each 16 packets.
+        */
+       if (cnt <= 2 || !(cnt % 16))
+               vhost_poll_queue(&vq->poll);
        /* set len to mark this desc buffers done DMA */
        vq->heads[ubuf->desc].len = status ?
                VHOST_DMA_FAILED_LEN : VHOST_DMA_DONE_LEN;
-- 
MST
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to