When the Virtio queue is full, a work item is scheduled
to execute in 1ms that retries adding the request to the queue.
This is a large amount of time on the scale on which a
virtio-fs device can operate. When using a DPU this is around
40us baseline without going to a remote server (4k, QD=1).
This patch queues requests when the Virtio queue is full,
and when a completed request is taken off, immediately fills
it back up with queued requests.

This reduces the 99.9th percentile latencies in our tests by
60x and slightly increases the overall throughput, when using a
queue depth 2x the size of the Virtio queue size, with a
DPU-powered virtio-fs device.

Signed-off-by: Peter-Jan Gootzen <peter-...@gootzen.net>
---
 fs/fuse/virtio_fs.c | 10 ++++------
 1 file changed, 4 insertions(+), 6 deletions(-)

diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index 4d8d4f16c727..8af9d3dc61d3 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -347,6 +347,8 @@ static void virtio_fs_hiprio_done_work(struct work_struct 
*work)
                }
        } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
        spin_unlock(&fsvq->lock);
+
+       schedule_delayed_work(&fsvq->dispatch_work, 0);
 }
 
 static void virtio_fs_request_dispatch_work(struct work_struct *work)
@@ -388,8 +390,6 @@ static void virtio_fs_request_dispatch_work(struct 
work_struct *work)
                        if (ret == -ENOMEM || ret == -ENOSPC) {
                                spin_lock(&fsvq->lock);
                                list_add_tail(&req->list, &fsvq->queued_reqs);
-                               schedule_delayed_work(&fsvq->dispatch_work,
-                                                     msecs_to_jiffies(1));
                                spin_unlock(&fsvq->lock);
                                return;
                        }
@@ -436,8 +436,6 @@ static int send_forget_request(struct virtio_fs_vq *fsvq,
                        pr_debug("virtio-fs: Could not queue FORGET: err=%d. 
Will try later\n",
                                 ret);
                        list_add_tail(&forget->list, &fsvq->queued_reqs);
-                       schedule_delayed_work(&fsvq->dispatch_work,
-                                             msecs_to_jiffies(1));
                        if (!in_flight)
                                inc_in_flight_req(fsvq);
                        /* Queue is full */
@@ -647,6 +645,8 @@ static void virtio_fs_requests_done_work(struct work_struct 
*work)
                        virtio_fs_request_complete(req, fsvq);
                }
        }
+
+       schedule_delayed_work(&fsvq->dispatch_work, 0);
 }
 
 /* Virtqueue interrupt handler */
@@ -1254,8 +1254,6 @@ __releases(fiq->lock)
                        spin_lock(&fsvq->lock);
                        list_add_tail(&req->list, &fsvq->queued_reqs);
                        inc_in_flight_req(fsvq);
-                       schedule_delayed_work(&fsvq->dispatch_work,
-                                               msecs_to_jiffies(1));
                        spin_unlock(&fsvq->lock);
                        return;
                }
-- 
2.34.1

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to