Re: [PATCH v6 01/11] vhost: add vhost_worker pointer to vhost_virtqueue

2023-04-04 Thread Mike Christie
On 4/4/23 1:38 PM, Michael S. Tsirkin wrote:
> On Mon, Mar 27, 2023 at 09:17:07PM -0500, Mike Christie wrote:
>> This patchset allows userspace to map vqs to different workers. This
>> patch adds a worker pointer to the vq so we can store that info.
>>
>> Signed-off-by: Mike Christie 
> Thanks! Conflicts with a bunch of refactorings upstream:
> could you rebase this on my tree and repost?

I will.

> I need to queue this soon so it gets time in -next.
Are you shooting for 6.4?

I think it's ok to do this for 6.5. We are already at rc5 and to
handle Jason's issue I will need to do redo testing and that will
take me some time.
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [PATCH v6 01/11] vhost: add vhost_worker pointer to vhost_virtqueue

2023-04-04 Thread Michael S. Tsirkin
On Mon, Mar 27, 2023 at 09:17:07PM -0500, Mike Christie wrote:
> This patchset allows userspace to map vqs to different workers. This
> patch adds a worker pointer to the vq so we can store that info.
> 
> Signed-off-by: Mike Christie 

Thanks! Conflicts with a bunch of refactorings upstream:
could you rebase this on my tree and repost?
I need to queue this soon so it gets time in -next.

> ---
>  drivers/vhost/vhost.c | 24 +---
>  drivers/vhost/vhost.h |  1 +
>  2 files changed, 14 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> index 4368ee9b999c..e041e116afee 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -486,6 +486,7 @@ void vhost_dev_init(struct vhost_dev *dev,
>   vq->log = NULL;
>   vq->indirect = NULL;
>   vq->heads = NULL;
> + vq->worker = NULL;
>   vq->dev = dev;
>   mutex_init(>mutex);
>   vhost_vq_reset(dev, vq);
> @@ -554,16 +555,15 @@ static void vhost_worker_free(struct vhost_dev *dev)
>   kfree(worker);
>  }
>  
> -static int vhost_worker_create(struct vhost_dev *dev)
> +static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
>  {
>   struct vhost_worker *worker;
>   struct vhost_task *vtsk;
>   char name[TASK_COMM_LEN];
> - int ret;
>  
>   worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
>   if (!worker)
> - return -ENOMEM;
> + return NULL;
>  
>   dev->worker = worker;
>   worker->kcov_handle = kcov_common_handle();
> @@ -571,25 +571,24 @@ static int vhost_worker_create(struct vhost_dev *dev)
>   snprintf(name, sizeof(name), "vhost-%d", current->pid);
>  
>   vtsk = vhost_task_create(vhost_worker, worker, name);
> - if (!vtsk) {
> - ret = -ENOMEM;
> + if (!vtsk)
>   goto free_worker;
> - }
>  
>   worker->vtsk = vtsk;
>   vhost_task_start(vtsk);
> - return 0;
> + return worker;
>  
>  free_worker:
>   kfree(worker);
>   dev->worker = NULL;
> - return ret;
> + return NULL;
>  }
>  
>  /* Caller should have device mutex */
>  long vhost_dev_set_owner(struct vhost_dev *dev)
>  {
> - int err;
> + struct vhost_worker *worker;
> + int err, i;
>  
>   /* Is there an owner already? */
>   if (vhost_dev_has_owner(dev)) {
> @@ -600,9 +599,12 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
>   vhost_attach_mm(dev);
>  
>   if (dev->use_worker) {
> - err = vhost_worker_create(dev);
> - if (err)
> + worker = vhost_worker_create(dev);
> + if (!worker)
>   goto err_worker;
> +
> + for (i = 0; i < dev->nvqs; i++)
> + dev->vqs[i]->worker = worker;
>   }
>  
>   err = vhost_dev_alloc_iovecs(dev);
> diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> index 0308638cdeee..e72b665ba3a5 100644
> --- a/drivers/vhost/vhost.h
> +++ b/drivers/vhost/vhost.h
> @@ -74,6 +74,7 @@ struct vhost_vring_call {
>  /* The virtqueue structure describes a queue attached to a device. */
>  struct vhost_virtqueue {
>   struct vhost_dev *dev;
> + struct vhost_worker *worker;
>  
>   /* The actual ring of buffers. */
>   struct mutex mutex;
> -- 
> 2.25.1

On Mon, Mar 27, 2023 at 09:17:08PM -0500, Mike Christie wrote:
> In the next patches each vq might have different workers so one could
> have work but others do not. For net, we only want to check specific vqs,
> so this adds a helper to check if a vq has work pending and converts
> vhost-net to use it.
> 
> Signed-off-by: Mike Christie 
> ---
>  drivers/vhost/net.c   | 2 +-
>  drivers/vhost/vhost.c | 6 +++---
>  drivers/vhost/vhost.h | 2 +-
>  3 files changed, 5 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
> index 07181cd8d52e..8ed63651b9eb 100644
> --- a/drivers/vhost/net.c
> +++ b/drivers/vhost/net.c
> @@ -546,7 +546,7 @@ static void vhost_net_busy_poll(struct vhost_net *net,
>   endtime = busy_clock() + busyloop_timeout;
>  
>   while (vhost_can_busy_poll(endtime)) {
> - if (vhost_has_work(>dev)) {
> + if (vhost_vq_has_work(vq)) {
>   *busyloop_intr = true;
>   break;
>   }
> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> index e041e116afee..6567aed69ebb 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -262,11 +262,11 @@ void vhost_work_queue(struct vhost_dev *dev, struct 
> vhost_work *work)
>  EXPORT_SYMBOL_GPL(vhost_work_queue);
>  
>  /* A lockless hint for busy polling code to exit the loop */
> -bool vhost_has_work(struct vhost_dev *dev)
> +bool vhost_vq_has_work(struct vhost_virtqueue *vq)
>  {
> - return dev->worker && !llist_empty(>worker->work_list);
> + return vq->worker && 

Re: [PATCH v6 01/11] vhost: add vhost_worker pointer to vhost_virtqueue

2023-04-04 Thread Jason Wang
On Tue, Mar 28, 2023 at 10:17 AM Mike Christie
 wrote:
>
> This patchset allows userspace to map vqs to different workers. This
> patch adds a worker pointer to the vq so we can store that info.
>
> Signed-off-by: Mike Christie 

Acked-by: Jason Wang 

Thanks

> ---
>  drivers/vhost/vhost.c | 24 +---
>  drivers/vhost/vhost.h |  1 +
>  2 files changed, 14 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> index 4368ee9b999c..e041e116afee 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -486,6 +486,7 @@ void vhost_dev_init(struct vhost_dev *dev,
> vq->log = NULL;
> vq->indirect = NULL;
> vq->heads = NULL;
> +   vq->worker = NULL;
> vq->dev = dev;
> mutex_init(>mutex);
> vhost_vq_reset(dev, vq);
> @@ -554,16 +555,15 @@ static void vhost_worker_free(struct vhost_dev *dev)
> kfree(worker);
>  }
>
> -static int vhost_worker_create(struct vhost_dev *dev)
> +static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
>  {
> struct vhost_worker *worker;
> struct vhost_task *vtsk;
> char name[TASK_COMM_LEN];
> -   int ret;
>
> worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
> if (!worker)
> -   return -ENOMEM;
> +   return NULL;
>
> dev->worker = worker;
> worker->kcov_handle = kcov_common_handle();
> @@ -571,25 +571,24 @@ static int vhost_worker_create(struct vhost_dev *dev)
> snprintf(name, sizeof(name), "vhost-%d", current->pid);
>
> vtsk = vhost_task_create(vhost_worker, worker, name);
> -   if (!vtsk) {
> -   ret = -ENOMEM;
> +   if (!vtsk)
> goto free_worker;
> -   }
>
> worker->vtsk = vtsk;
> vhost_task_start(vtsk);
> -   return 0;
> +   return worker;
>
>  free_worker:
> kfree(worker);
> dev->worker = NULL;
> -   return ret;
> +   return NULL;
>  }
>
>  /* Caller should have device mutex */
>  long vhost_dev_set_owner(struct vhost_dev *dev)
>  {
> -   int err;
> +   struct vhost_worker *worker;
> +   int err, i;
>
> /* Is there an owner already? */
> if (vhost_dev_has_owner(dev)) {
> @@ -600,9 +599,12 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
> vhost_attach_mm(dev);
>
> if (dev->use_worker) {
> -   err = vhost_worker_create(dev);
> -   if (err)
> +   worker = vhost_worker_create(dev);
> +   if (!worker)
> goto err_worker;
> +
> +   for (i = 0; i < dev->nvqs; i++)
> +   dev->vqs[i]->worker = worker;
> }
>
> err = vhost_dev_alloc_iovecs(dev);
> diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> index 0308638cdeee..e72b665ba3a5 100644
> --- a/drivers/vhost/vhost.h
> +++ b/drivers/vhost/vhost.h
> @@ -74,6 +74,7 @@ struct vhost_vring_call {
>  /* The virtqueue structure describes a queue attached to a device. */
>  struct vhost_virtqueue {
> struct vhost_dev *dev;
> +   struct vhost_worker *worker;
>
> /* The actual ring of buffers. */
> struct mutex mutex;
> --
> 2.25.1
>

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

[PATCH v6 01/11] vhost: add vhost_worker pointer to vhost_virtqueue

2023-03-27 Thread Mike Christie
This patchset allows userspace to map vqs to different workers. This
patch adds a worker pointer to the vq so we can store that info.

Signed-off-by: Mike Christie 
---
 drivers/vhost/vhost.c | 24 +---
 drivers/vhost/vhost.h |  1 +
 2 files changed, 14 insertions(+), 11 deletions(-)

diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 4368ee9b999c..e041e116afee 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -486,6 +486,7 @@ void vhost_dev_init(struct vhost_dev *dev,
vq->log = NULL;
vq->indirect = NULL;
vq->heads = NULL;
+   vq->worker = NULL;
vq->dev = dev;
mutex_init(>mutex);
vhost_vq_reset(dev, vq);
@@ -554,16 +555,15 @@ static void vhost_worker_free(struct vhost_dev *dev)
kfree(worker);
 }
 
-static int vhost_worker_create(struct vhost_dev *dev)
+static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
 {
struct vhost_worker *worker;
struct vhost_task *vtsk;
char name[TASK_COMM_LEN];
-   int ret;
 
worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
if (!worker)
-   return -ENOMEM;
+   return NULL;
 
dev->worker = worker;
worker->kcov_handle = kcov_common_handle();
@@ -571,25 +571,24 @@ static int vhost_worker_create(struct vhost_dev *dev)
snprintf(name, sizeof(name), "vhost-%d", current->pid);
 
vtsk = vhost_task_create(vhost_worker, worker, name);
-   if (!vtsk) {
-   ret = -ENOMEM;
+   if (!vtsk)
goto free_worker;
-   }
 
worker->vtsk = vtsk;
vhost_task_start(vtsk);
-   return 0;
+   return worker;
 
 free_worker:
kfree(worker);
dev->worker = NULL;
-   return ret;
+   return NULL;
 }
 
 /* Caller should have device mutex */
 long vhost_dev_set_owner(struct vhost_dev *dev)
 {
-   int err;
+   struct vhost_worker *worker;
+   int err, i;
 
/* Is there an owner already? */
if (vhost_dev_has_owner(dev)) {
@@ -600,9 +599,12 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
vhost_attach_mm(dev);
 
if (dev->use_worker) {
-   err = vhost_worker_create(dev);
-   if (err)
+   worker = vhost_worker_create(dev);
+   if (!worker)
goto err_worker;
+
+   for (i = 0; i < dev->nvqs; i++)
+   dev->vqs[i]->worker = worker;
}
 
err = vhost_dev_alloc_iovecs(dev);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 0308638cdeee..e72b665ba3a5 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -74,6 +74,7 @@ struct vhost_vring_call {
 /* The virtqueue structure describes a queue attached to a device. */
 struct vhost_virtqueue {
struct vhost_dev *dev;
+   struct vhost_worker *worker;
 
/* The actual ring of buffers. */
struct mutex mutex;
-- 
2.25.1

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization