From: Mike Christie <michael.chris...@oracle.com>

This patchset allows userspace to map vqs to different workers. This
patch adds a worker pointer to the vq so in later patches in this set
we can queue/flush specific vqs and their workers.

Signed-off-by: Mike Christie <michael.chris...@oracle.com>
Message-Id: <20230626232307.97930-4-michael.chris...@oracle.com>
Signed-off-by: Michael S. Tsirkin <m...@redhat.com>

---------

(cherry picked from ms commit 737bdb643c4f)
https://virtuozzo.atlassian.net/browse/PSBM-152375
Signed-off-by: Andrey Zhadchenko <andrey.zhadche...@virtuozzo.com>
---
 drivers/vhost/vhost.c | 25 +++++++++++++++----------
 drivers/vhost/vhost.h |  1 +
 2 files changed, 16 insertions(+), 10 deletions(-)

diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 2dd032fbf642..c6054c1429c5 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -333,6 +333,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
        vq->busyloop_timeout = 0;
        vq->umem = NULL;
        vq->iotlb = NULL;
+       vq->worker = NULL;
        vhost_vring_call_reset(&vq->call_ctx);
        __vhost_vq_meta_reset(vq);
 }
@@ -585,7 +586,7 @@ static void vhost_worker_free(struct vhost_dev *dev)
        kfree(worker);
 }
 
-static int vhost_worker_create(struct vhost_dev *dev)
+static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
 {
        struct vhost_worker *worker;
        struct task_struct *task;
@@ -593,7 +594,7 @@ static int vhost_worker_create(struct vhost_dev *dev)
 
        worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
        if (!worker)
-               return -ENOMEM;
+               return NULL;
 
        dev->worker = worker;
        worker->dev = dev;
@@ -601,10 +602,8 @@ static int vhost_worker_create(struct vhost_dev *dev)
        init_llist_head(&worker->work_list);
 
        task = kthread_create(vhost_worker, worker, "vhost-%d", current->pid);
-       if (IS_ERR(task)) {
-               ret = PTR_ERR(task);
+       if (IS_ERR(task))
                goto free_worker;
-       }
 
        worker->task = task;
        wake_up_process(task); /* avoid contributing to loadavg */
@@ -613,14 +612,14 @@ static int vhost_worker_create(struct vhost_dev *dev)
        if (ret)
                goto stop_worker;
 
-       return 0;
+       return worker;
 
 stop_worker:
        kthread_stop(worker->task);
 free_worker:
        kfree(worker);
        dev->worker = NULL;
-       return ret;
+       return NULL;
 }
 
 static int vhost_get_vq_from_user(struct vhost_dev *dev, void __user *argp,
@@ -647,7 +646,8 @@ static int vhost_get_vq_from_user(struct vhost_dev *dev, 
void __user *argp,
 /* Caller should have device mutex */
 long vhost_dev_set_owner(struct vhost_dev *dev)
 {
-       int err;
+       struct vhost_worker *worker;
+       int err, i;
 
        /* Is there an owner already? */
        if (vhost_dev_has_owner(dev)) {
@@ -668,9 +668,14 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
                 * below since we don't have to worry about vsock queueing
                 * while we free the worker.
                 */
-               err = vhost_worker_create(dev);
-               if (err)
+               worker = vhost_worker_create(dev);
+               if (!worker) {
+                       err = -ENOMEM;
                        goto err_worker;
+               }
+
+               for (i = 0; i < dev->nvqs; i++)
+                       dev->vqs[i]->worker = worker;
        }
 
        return 0;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 9ff8c0afcead..f1f168f4fa85 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -74,6 +74,7 @@ struct vhost_vring_call {
 /* The virtqueue structure describes a queue attached to a device. */
 struct vhost_virtqueue {
        struct vhost_dev *dev;
+       struct vhost_worker *worker;
 
        /* The actual ring of buffers. */
        struct mutex mutex;
-- 
2.39.3

_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to