Re: [PATCH 1/2] vhost test module

2010-12-04 Thread Paul E. McKenney
On Thu, Dec 02, 2010 at 03:56:56PM -0800, Paul E. McKenney wrote:
 On Fri, Dec 03, 2010 at 01:18:18AM +0200, Michael S. Tsirkin wrote:
  On Thu, Dec 02, 2010 at 03:13:03PM -0800, Paul E. McKenney wrote:
   On Thu, Dec 02, 2010 at 09:47:09PM +0200, Michael S. Tsirkin wrote:
On Thu, Dec 02, 2010 at 11:26:16AM -0800, Paul E. McKenney wrote:
 On Thu, Dec 02, 2010 at 09:11:30PM +0200, Michael S. Tsirkin wrote:
  On Thu, Dec 02, 2010 at 11:00:37AM -0800, Paul E. McKenney wrote:
   On Mon, Nov 29, 2010 at 07:09:01PM +0200, Michael S. Tsirkin 
   wrote:
This adds a test module for vhost infrastructure.
Intentionally not tied to kbuild to prevent people
from installing and loading it accidentally.

Signed-off-by: Michael S. Tsirkin m...@redhat.com
   
   On question below.
   
---

diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
new file mode 100644
index 000..099f302
--- /dev/null
+++ b/drivers/vhost/test.c
@@ -0,0 +1,320 @@
+/* Copyright (C) 2009 Red Hat, Inc.
+ * Author: Michael S. Tsirkin m...@redhat.com
+ *
+ * This work is licensed under the terms of the GNU GPL, 
version 2.
+ *
+ * test virtio server in host kernel.
+ */
+
+#include linux/compat.h
+#include linux/eventfd.h
+#include linux/vhost.h
+#include linux/miscdevice.h
+#include linux/module.h
+#include linux/mutex.h
+#include linux/workqueue.h
+#include linux/rcupdate.h
+#include linux/file.h
+#include linux/slab.h
+
+#include test.h
+#include vhost.c
+
+/* Max number of bytes transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving 
others. */
+#define VHOST_TEST_WEIGHT 0x8
+
+enum {
+   VHOST_TEST_VQ = 0,
+   VHOST_TEST_VQ_MAX = 1,
+};
+
+struct vhost_test {
+   struct vhost_dev dev;
+   struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
+};
+
+/* Expects to be always run from workqueue - which acts as
+ * read-size critical section for our kind of RCU. */
+static void handle_vq(struct vhost_test *n)
+{
+   struct vhost_virtqueue *vq = n-dev.vqs[VHOST_TEST_VQ];
+   unsigned out, in;
+   int head;
+   size_t len, total_len = 0;
+   void *private;
+
+   private = rcu_dereference_check(vq-private_data, 1);
   
   Any chance of a check for running in a workqueue?  If I remember 
   correctly,
   the -lockdep_map field in the work_struct structure allows you 
   to create
   the required lockdep expression.
  
  We moved away from using the workqueue to a custom kernel thread
  implementation though.
 
 OK, then could you please add a check for current == 
 custom_kernel_thread
 or some such?
 
   Thanx, Paul

It's a bit tricky. The way we flush out things is by an analog of
flush_work. So just checking that we run from workqueue isn't
right need to check that we are running from one of the specific work
structures that we are careful to flush.

I can do this by passing the work structure pointer on to relevant
functions but I think this will add (very minor) overhead even when rcu
checks are disabled. Does it matter? Thoughts?
   
   Would it be possible to set up separate lockdep maps, in effect passing
   the needed information via lockdep rather than via the function arguments?
  
  Possibly, I don't know enough about this but will check.
  Any examples to look at?
 
 I would suggest the workqueue example in include/linux/workqueue.h and
 kernel/workqueue.c.  You will need a struct lockdep_map for each of the
 specific work structures, sort of like the one in struct workqueue_struct.
 
 You can then use lock_map_acquire() and lock_map_release() to flag the
 fact that your kernel thread is running and not, and then you can pass
 lock_is_held() to rcu_dereference_check().  Each of lock_map_acquire(),
 lock_map_release(), and lock_is_held() takes a struct lockdep_map as
 sole argument.  The rcu_lock_map definition shows an example of a global
 lockdep_map variable.  If you need to do runtime initialization of your
 lockdep_map structure, you can use lockdep_init_map().
 
 Seem reasonable?

PS to previous...

Suppose that there are many work structures, and several of them use
this same function, but the function has no indication of which one it
is working on.  Then one reasonable approach is to share a lockdep_map
structure among them.  Although this does not allow you to validate
one-to-one from 

Re: [PATCH 1/2] vhost test module

2010-12-02 Thread Michael S. Tsirkin
On Thu, Dec 02, 2010 at 11:00:37AM -0800, Paul E. McKenney wrote:
 On Mon, Nov 29, 2010 at 07:09:01PM +0200, Michael S. Tsirkin wrote:
  This adds a test module for vhost infrastructure.
  Intentionally not tied to kbuild to prevent people
  from installing and loading it accidentally.
  
  Signed-off-by: Michael S. Tsirkin m...@redhat.com
 
 On question below.
 
  ---
  
  diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
  new file mode 100644
  index 000..099f302
  --- /dev/null
  +++ b/drivers/vhost/test.c
  @@ -0,0 +1,320 @@
  +/* Copyright (C) 2009 Red Hat, Inc.
  + * Author: Michael S. Tsirkin m...@redhat.com
  + *
  + * This work is licensed under the terms of the GNU GPL, version 2.
  + *
  + * test virtio server in host kernel.
  + */
  +
  +#include linux/compat.h
  +#include linux/eventfd.h
  +#include linux/vhost.h
  +#include linux/miscdevice.h
  +#include linux/module.h
  +#include linux/mutex.h
  +#include linux/workqueue.h
  +#include linux/rcupdate.h
  +#include linux/file.h
  +#include linux/slab.h
  +
  +#include test.h
  +#include vhost.c
  +
  +/* Max number of bytes transferred before requeueing the job.
  + * Using this limit prevents one virtqueue from starving others. */
  +#define VHOST_TEST_WEIGHT 0x8
  +
  +enum {
  +   VHOST_TEST_VQ = 0,
  +   VHOST_TEST_VQ_MAX = 1,
  +};
  +
  +struct vhost_test {
  +   struct vhost_dev dev;
  +   struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
  +};
  +
  +/* Expects to be always run from workqueue - which acts as
  + * read-size critical section for our kind of RCU. */
  +static void handle_vq(struct vhost_test *n)
  +{
  +   struct vhost_virtqueue *vq = n-dev.vqs[VHOST_TEST_VQ];
  +   unsigned out, in;
  +   int head;
  +   size_t len, total_len = 0;
  +   void *private;
  +
  +   private = rcu_dereference_check(vq-private_data, 1);
 
 Any chance of a check for running in a workqueue?  If I remember correctly,
 the -lockdep_map field in the work_struct structure allows you to create
 the required lockdep expression.
 
   Thanx, Paul

We moved away from using the workqueue to a custom kernel thread
implementation though.


  +   if (!private)
  +   return;
  +
  +   mutex_lock(vq-mutex);
  +   vhost_disable_notify(vq);
  +
  +   for (;;) {
  +   head = vhost_get_vq_desc(n-dev, vq, vq-iov,
  +ARRAY_SIZE(vq-iov),
  +out, in,
  +NULL, NULL);
  +   /* On error, stop handling until the next kick. */
  +   if (unlikely(head  0))
  +   break;
  +   /* Nothing new?  Wait for eventfd to tell us they refilled. */
  +   if (head == vq-num) {
  +   if (unlikely(vhost_enable_notify(vq))) {
  +   vhost_disable_notify(vq);
  +   continue;
  +   }
  +   break;
  +   }
  +   if (in) {
  +   vq_err(vq, Unexpected descriptor format for TX: 
  +  out %d, int %d\n, out, in);
  +   break;
  +   }
  +   len = iov_length(vq-iov, out);
  +   /* Sanity check */
  +   if (!len) {
  +   vq_err(vq, Unexpected 0 len for TX\n);
  +   break;
  +   }
  +   vhost_add_used_and_signal(n-dev, vq, head, 0);
  +   total_len += len;
  +   if (unlikely(total_len = VHOST_TEST_WEIGHT)) {
  +   vhost_poll_queue(vq-poll);
  +   break;
  +   }
  +   }
  +
  +   mutex_unlock(vq-mutex);
  +}
  +
  +static void handle_vq_kick(struct vhost_work *work)
  +{
  +   struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
  + poll.work);
  +   struct vhost_test *n = container_of(vq-dev, struct vhost_test, dev);
  +
  +   handle_vq(n);
  +}
  +
  +static int vhost_test_open(struct inode *inode, struct file *f)
  +{
  +   struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
  +   struct vhost_dev *dev;
  +   int r;
  +
  +   if (!n)
  +   return -ENOMEM;
  +
  +   dev = n-dev;
  +   n-vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
  +   r = vhost_dev_init(dev, n-vqs, VHOST_TEST_VQ_MAX);
  +   if (r  0) {
  +   kfree(n);
  +   return r;
  +   }
  +
  +   f-private_data = n;
  +
  +   return 0;
  +}
  +
  +static void *vhost_test_stop_vq(struct vhost_test *n,
  +   struct vhost_virtqueue *vq)
  +{
  +   void *private;
  +
  +   mutex_lock(vq-mutex);
  +   private = rcu_dereference_protected(vq-private_data,
  +lockdep_is_held(vq-mutex));
  +   rcu_assign_pointer(vq-private_data, NULL);
  +   mutex_unlock(vq-mutex);
  +   return private;
  +}
  +
  +static void vhost_test_stop(struct vhost_test *n, void **privatep)
  +{
  

Re: [PATCH 1/2] vhost test module

2010-12-02 Thread Paul E. McKenney
On Mon, Nov 29, 2010 at 07:09:01PM +0200, Michael S. Tsirkin wrote:
 This adds a test module for vhost infrastructure.
 Intentionally not tied to kbuild to prevent people
 from installing and loading it accidentally.
 
 Signed-off-by: Michael S. Tsirkin m...@redhat.com

On question below.

 ---
 
 diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
 new file mode 100644
 index 000..099f302
 --- /dev/null
 +++ b/drivers/vhost/test.c
 @@ -0,0 +1,320 @@
 +/* Copyright (C) 2009 Red Hat, Inc.
 + * Author: Michael S. Tsirkin m...@redhat.com
 + *
 + * This work is licensed under the terms of the GNU GPL, version 2.
 + *
 + * test virtio server in host kernel.
 + */
 +
 +#include linux/compat.h
 +#include linux/eventfd.h
 +#include linux/vhost.h
 +#include linux/miscdevice.h
 +#include linux/module.h
 +#include linux/mutex.h
 +#include linux/workqueue.h
 +#include linux/rcupdate.h
 +#include linux/file.h
 +#include linux/slab.h
 +
 +#include test.h
 +#include vhost.c
 +
 +/* Max number of bytes transferred before requeueing the job.
 + * Using this limit prevents one virtqueue from starving others. */
 +#define VHOST_TEST_WEIGHT 0x8
 +
 +enum {
 + VHOST_TEST_VQ = 0,
 + VHOST_TEST_VQ_MAX = 1,
 +};
 +
 +struct vhost_test {
 + struct vhost_dev dev;
 + struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
 +};
 +
 +/* Expects to be always run from workqueue - which acts as
 + * read-size critical section for our kind of RCU. */
 +static void handle_vq(struct vhost_test *n)
 +{
 + struct vhost_virtqueue *vq = n-dev.vqs[VHOST_TEST_VQ];
 + unsigned out, in;
 + int head;
 + size_t len, total_len = 0;
 + void *private;
 +
 + private = rcu_dereference_check(vq-private_data, 1);

Any chance of a check for running in a workqueue?  If I remember correctly,
the -lockdep_map field in the work_struct structure allows you to create
the required lockdep expression.

Thanx, Paul

 + if (!private)
 + return;
 +
 + mutex_lock(vq-mutex);
 + vhost_disable_notify(vq);
 +
 + for (;;) {
 + head = vhost_get_vq_desc(n-dev, vq, vq-iov,
 +  ARRAY_SIZE(vq-iov),
 +  out, in,
 +  NULL, NULL);
 + /* On error, stop handling until the next kick. */
 + if (unlikely(head  0))
 + break;
 + /* Nothing new?  Wait for eventfd to tell us they refilled. */
 + if (head == vq-num) {
 + if (unlikely(vhost_enable_notify(vq))) {
 + vhost_disable_notify(vq);
 + continue;
 + }
 + break;
 + }
 + if (in) {
 + vq_err(vq, Unexpected descriptor format for TX: 
 +out %d, int %d\n, out, in);
 + break;
 + }
 + len = iov_length(vq-iov, out);
 + /* Sanity check */
 + if (!len) {
 + vq_err(vq, Unexpected 0 len for TX\n);
 + break;
 + }
 + vhost_add_used_and_signal(n-dev, vq, head, 0);
 + total_len += len;
 + if (unlikely(total_len = VHOST_TEST_WEIGHT)) {
 + vhost_poll_queue(vq-poll);
 + break;
 + }
 + }
 +
 + mutex_unlock(vq-mutex);
 +}
 +
 +static void handle_vq_kick(struct vhost_work *work)
 +{
 + struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
 +   poll.work);
 + struct vhost_test *n = container_of(vq-dev, struct vhost_test, dev);
 +
 + handle_vq(n);
 +}
 +
 +static int vhost_test_open(struct inode *inode, struct file *f)
 +{
 + struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
 + struct vhost_dev *dev;
 + int r;
 +
 + if (!n)
 + return -ENOMEM;
 +
 + dev = n-dev;
 + n-vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
 + r = vhost_dev_init(dev, n-vqs, VHOST_TEST_VQ_MAX);
 + if (r  0) {
 + kfree(n);
 + return r;
 + }
 +
 + f-private_data = n;
 +
 + return 0;
 +}
 +
 +static void *vhost_test_stop_vq(struct vhost_test *n,
 + struct vhost_virtqueue *vq)
 +{
 + void *private;
 +
 + mutex_lock(vq-mutex);
 + private = rcu_dereference_protected(vq-private_data,
 +  lockdep_is_held(vq-mutex));
 + rcu_assign_pointer(vq-private_data, NULL);
 + mutex_unlock(vq-mutex);
 + return private;
 +}
 +
 +static void vhost_test_stop(struct vhost_test *n, void **privatep)
 +{
 + *privatep = vhost_test_stop_vq(n, n-vqs + VHOST_TEST_VQ);
 +}
 +
 +static void vhost_test_flush_vq(struct vhost_test *n, int index)
 +{
 + 

Re: [PATCH 1/2] vhost test module

2010-12-02 Thread Paul E. McKenney
On Thu, Dec 02, 2010 at 09:11:30PM +0200, Michael S. Tsirkin wrote:
 On Thu, Dec 02, 2010 at 11:00:37AM -0800, Paul E. McKenney wrote:
  On Mon, Nov 29, 2010 at 07:09:01PM +0200, Michael S. Tsirkin wrote:
   This adds a test module for vhost infrastructure.
   Intentionally not tied to kbuild to prevent people
   from installing and loading it accidentally.
   
   Signed-off-by: Michael S. Tsirkin m...@redhat.com
  
  On question below.
  
   ---
   
   diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
   new file mode 100644
   index 000..099f302
   --- /dev/null
   +++ b/drivers/vhost/test.c
   @@ -0,0 +1,320 @@
   +/* Copyright (C) 2009 Red Hat, Inc.
   + * Author: Michael S. Tsirkin m...@redhat.com
   + *
   + * This work is licensed under the terms of the GNU GPL, version 2.
   + *
   + * test virtio server in host kernel.
   + */
   +
   +#include linux/compat.h
   +#include linux/eventfd.h
   +#include linux/vhost.h
   +#include linux/miscdevice.h
   +#include linux/module.h
   +#include linux/mutex.h
   +#include linux/workqueue.h
   +#include linux/rcupdate.h
   +#include linux/file.h
   +#include linux/slab.h
   +
   +#include test.h
   +#include vhost.c
   +
   +/* Max number of bytes transferred before requeueing the job.
   + * Using this limit prevents one virtqueue from starving others. */
   +#define VHOST_TEST_WEIGHT 0x8
   +
   +enum {
   + VHOST_TEST_VQ = 0,
   + VHOST_TEST_VQ_MAX = 1,
   +};
   +
   +struct vhost_test {
   + struct vhost_dev dev;
   + struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
   +};
   +
   +/* Expects to be always run from workqueue - which acts as
   + * read-size critical section for our kind of RCU. */
   +static void handle_vq(struct vhost_test *n)
   +{
   + struct vhost_virtqueue *vq = n-dev.vqs[VHOST_TEST_VQ];
   + unsigned out, in;
   + int head;
   + size_t len, total_len = 0;
   + void *private;
   +
   + private = rcu_dereference_check(vq-private_data, 1);
  
  Any chance of a check for running in a workqueue?  If I remember correctly,
  the -lockdep_map field in the work_struct structure allows you to create
  the required lockdep expression.
 
 We moved away from using the workqueue to a custom kernel thread
 implementation though.

OK, then could you please add a check for current == custom_kernel_thread
or some such?

Thanx, Paul

   + if (!private)
   + return;
   +
   + mutex_lock(vq-mutex);
   + vhost_disable_notify(vq);
   +
   + for (;;) {
   + head = vhost_get_vq_desc(n-dev, vq, vq-iov,
   +  ARRAY_SIZE(vq-iov),
   +  out, in,
   +  NULL, NULL);
   + /* On error, stop handling until the next kick. */
   + if (unlikely(head  0))
   + break;
   + /* Nothing new?  Wait for eventfd to tell us they refilled. */
   + if (head == vq-num) {
   + if (unlikely(vhost_enable_notify(vq))) {
   + vhost_disable_notify(vq);
   + continue;
   + }
   + break;
   + }
   + if (in) {
   + vq_err(vq, Unexpected descriptor format for TX: 
   +out %d, int %d\n, out, in);
   + break;
   + }
   + len = iov_length(vq-iov, out);
   + /* Sanity check */
   + if (!len) {
   + vq_err(vq, Unexpected 0 len for TX\n);
   + break;
   + }
   + vhost_add_used_and_signal(n-dev, vq, head, 0);
   + total_len += len;
   + if (unlikely(total_len = VHOST_TEST_WEIGHT)) {
   + vhost_poll_queue(vq-poll);
   + break;
   + }
   + }
   +
   + mutex_unlock(vq-mutex);
   +}
   +
   +static void handle_vq_kick(struct vhost_work *work)
   +{
   + struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
   +   poll.work);
   + struct vhost_test *n = container_of(vq-dev, struct vhost_test, dev);
   +
   + handle_vq(n);
   +}
   +
   +static int vhost_test_open(struct inode *inode, struct file *f)
   +{
   + struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
   + struct vhost_dev *dev;
   + int r;
   +
   + if (!n)
   + return -ENOMEM;
   +
   + dev = n-dev;
   + n-vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
   + r = vhost_dev_init(dev, n-vqs, VHOST_TEST_VQ_MAX);
   + if (r  0) {
   + kfree(n);
   + return r;
   + }
   +
   + f-private_data = n;
   +
   + return 0;
   +}
   +
   +static void *vhost_test_stop_vq(struct vhost_test *n,
   + struct vhost_virtqueue *vq)
   +{
   + void *private;
   +
   + mutex_lock(vq-mutex);
   + private = rcu_dereference_protected(vq-private_data,
   +  lockdep_is_held(vq-mutex));
   + 

Re: [PATCH 1/2] vhost test module

2010-12-02 Thread Michael S. Tsirkin
On Thu, Dec 02, 2010 at 11:26:16AM -0800, Paul E. McKenney wrote:
 On Thu, Dec 02, 2010 at 09:11:30PM +0200, Michael S. Tsirkin wrote:
  On Thu, Dec 02, 2010 at 11:00:37AM -0800, Paul E. McKenney wrote:
   On Mon, Nov 29, 2010 at 07:09:01PM +0200, Michael S. Tsirkin wrote:
This adds a test module for vhost infrastructure.
Intentionally not tied to kbuild to prevent people
from installing and loading it accidentally.

Signed-off-by: Michael S. Tsirkin m...@redhat.com
   
   On question below.
   
---

diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
new file mode 100644
index 000..099f302
--- /dev/null
+++ b/drivers/vhost/test.c
@@ -0,0 +1,320 @@
+/* Copyright (C) 2009 Red Hat, Inc.
+ * Author: Michael S. Tsirkin m...@redhat.com
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * test virtio server in host kernel.
+ */
+
+#include linux/compat.h
+#include linux/eventfd.h
+#include linux/vhost.h
+#include linux/miscdevice.h
+#include linux/module.h
+#include linux/mutex.h
+#include linux/workqueue.h
+#include linux/rcupdate.h
+#include linux/file.h
+#include linux/slab.h
+
+#include test.h
+#include vhost.c
+
+/* Max number of bytes transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others. */
+#define VHOST_TEST_WEIGHT 0x8
+
+enum {
+   VHOST_TEST_VQ = 0,
+   VHOST_TEST_VQ_MAX = 1,
+};
+
+struct vhost_test {
+   struct vhost_dev dev;
+   struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
+};
+
+/* Expects to be always run from workqueue - which acts as
+ * read-size critical section for our kind of RCU. */
+static void handle_vq(struct vhost_test *n)
+{
+   struct vhost_virtqueue *vq = n-dev.vqs[VHOST_TEST_VQ];
+   unsigned out, in;
+   int head;
+   size_t len, total_len = 0;
+   void *private;
+
+   private = rcu_dereference_check(vq-private_data, 1);
   
   Any chance of a check for running in a workqueue?  If I remember 
   correctly,
   the -lockdep_map field in the work_struct structure allows you to create
   the required lockdep expression.
  
  We moved away from using the workqueue to a custom kernel thread
  implementation though.
 
 OK, then could you please add a check for current == custom_kernel_thread
 or some such?
 
   Thanx, Paul

It's a bit tricky. The way we flush out things is by an analog of
flush_work. So just checking that we run from workqueue isn't
right need to check that we are running from one of the specific work
structures that we are careful to flush.

I can do this by passing the work structure pointer on to relevant
functions but I think this will add (very minor) overhead even when rcu
checks are disabled. Does it matter? Thoughts?

+   if (!private)
+   return;
+
+   mutex_lock(vq-mutex);
+   vhost_disable_notify(vq);
+
+   for (;;) {
+   head = vhost_get_vq_desc(n-dev, vq, vq-iov,
+ARRAY_SIZE(vq-iov),
+out, in,
+NULL, NULL);
+   /* On error, stop handling until the next kick. */
+   if (unlikely(head  0))
+   break;
+   /* Nothing new?  Wait for eventfd to tell us they 
refilled. */
+   if (head == vq-num) {
+   if (unlikely(vhost_enable_notify(vq))) {
+   vhost_disable_notify(vq);
+   continue;
+   }
+   break;
+   }
+   if (in) {
+   vq_err(vq, Unexpected descriptor format for 
TX: 
+  out %d, int %d\n, out, in);
+   break;
+   }
+   len = iov_length(vq-iov, out);
+   /* Sanity check */
+   if (!len) {
+   vq_err(vq, Unexpected 0 len for TX\n);
+   break;
+   }
+   vhost_add_used_and_signal(n-dev, vq, head, 0);
+   total_len += len;
+   if (unlikely(total_len = VHOST_TEST_WEIGHT)) {
+   vhost_poll_queue(vq-poll);
+   break;
+   }
+   }
+
+   mutex_unlock(vq-mutex);
+}
+
+static void handle_vq_kick(struct vhost_work *work)
+{
+   struct vhost_virtqueue *vq = container_of(work, struct 
vhost_virtqueue,
+

Re: [PATCH 1/2] vhost test module

2010-12-02 Thread Paul E. McKenney
On Thu, Dec 02, 2010 at 09:47:09PM +0200, Michael S. Tsirkin wrote:
 On Thu, Dec 02, 2010 at 11:26:16AM -0800, Paul E. McKenney wrote:
  On Thu, Dec 02, 2010 at 09:11:30PM +0200, Michael S. Tsirkin wrote:
   On Thu, Dec 02, 2010 at 11:00:37AM -0800, Paul E. McKenney wrote:
On Mon, Nov 29, 2010 at 07:09:01PM +0200, Michael S. Tsirkin wrote:
 This adds a test module for vhost infrastructure.
 Intentionally not tied to kbuild to prevent people
 from installing and loading it accidentally.
 
 Signed-off-by: Michael S. Tsirkin m...@redhat.com

On question below.

 ---
 
 diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
 new file mode 100644
 index 000..099f302
 --- /dev/null
 +++ b/drivers/vhost/test.c
 @@ -0,0 +1,320 @@
 +/* Copyright (C) 2009 Red Hat, Inc.
 + * Author: Michael S. Tsirkin m...@redhat.com
 + *
 + * This work is licensed under the terms of the GNU GPL, version 2.
 + *
 + * test virtio server in host kernel.
 + */
 +
 +#include linux/compat.h
 +#include linux/eventfd.h
 +#include linux/vhost.h
 +#include linux/miscdevice.h
 +#include linux/module.h
 +#include linux/mutex.h
 +#include linux/workqueue.h
 +#include linux/rcupdate.h
 +#include linux/file.h
 +#include linux/slab.h
 +
 +#include test.h
 +#include vhost.c
 +
 +/* Max number of bytes transferred before requeueing the job.
 + * Using this limit prevents one virtqueue from starving others. */
 +#define VHOST_TEST_WEIGHT 0x8
 +
 +enum {
 + VHOST_TEST_VQ = 0,
 + VHOST_TEST_VQ_MAX = 1,
 +};
 +
 +struct vhost_test {
 + struct vhost_dev dev;
 + struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
 +};
 +
 +/* Expects to be always run from workqueue - which acts as
 + * read-size critical section for our kind of RCU. */
 +static void handle_vq(struct vhost_test *n)
 +{
 + struct vhost_virtqueue *vq = n-dev.vqs[VHOST_TEST_VQ];
 + unsigned out, in;
 + int head;
 + size_t len, total_len = 0;
 + void *private;
 +
 + private = rcu_dereference_check(vq-private_data, 1);

Any chance of a check for running in a workqueue?  If I remember 
correctly,
the -lockdep_map field in the work_struct structure allows you to 
create
the required lockdep expression.
   
   We moved away from using the workqueue to a custom kernel thread
   implementation though.
  
  OK, then could you please add a check for current == custom_kernel_thread
  or some such?
  
  Thanx, Paul
 
 It's a bit tricky. The way we flush out things is by an analog of
 flush_work. So just checking that we run from workqueue isn't
 right need to check that we are running from one of the specific work
 structures that we are careful to flush.
 
 I can do this by passing the work structure pointer on to relevant
 functions but I think this will add (very minor) overhead even when rcu
 checks are disabled. Does it matter? Thoughts?

Would it be possible to set up separate lockdep maps, in effect passing
the needed information via lockdep rather than via the function arguments?

Thanx, Paul

 + if (!private)
 + return;
 +
 + mutex_lock(vq-mutex);
 + vhost_disable_notify(vq);
 +
 + for (;;) {
 + head = vhost_get_vq_desc(n-dev, vq, vq-iov,
 +  ARRAY_SIZE(vq-iov),
 +  out, in,
 +  NULL, NULL);
 + /* On error, stop handling until the next kick. */
 + if (unlikely(head  0))
 + break;
 + /* Nothing new?  Wait for eventfd to tell us they 
 refilled. */
 + if (head == vq-num) {
 + if (unlikely(vhost_enable_notify(vq))) {
 + vhost_disable_notify(vq);
 + continue;
 + }
 + break;
 + }
 + if (in) {
 + vq_err(vq, Unexpected descriptor format for 
 TX: 
 +out %d, int %d\n, out, in);
 + break;
 + }
 + len = iov_length(vq-iov, out);
 + /* Sanity check */
 + if (!len) {
 + vq_err(vq, Unexpected 0 len for TX\n);
 + break;
 + }
 + vhost_add_used_and_signal(n-dev, vq, head, 0);
 + total_len += len;
 + if (unlikely(total_len = VHOST_TEST_WEIGHT)) {
 + 

Re: [PATCH 1/2] vhost test module

2010-12-02 Thread Michael S. Tsirkin
On Thu, Dec 02, 2010 at 03:13:03PM -0800, Paul E. McKenney wrote:
 On Thu, Dec 02, 2010 at 09:47:09PM +0200, Michael S. Tsirkin wrote:
  On Thu, Dec 02, 2010 at 11:26:16AM -0800, Paul E. McKenney wrote:
   On Thu, Dec 02, 2010 at 09:11:30PM +0200, Michael S. Tsirkin wrote:
On Thu, Dec 02, 2010 at 11:00:37AM -0800, Paul E. McKenney wrote:
 On Mon, Nov 29, 2010 at 07:09:01PM +0200, Michael S. Tsirkin wrote:
  This adds a test module for vhost infrastructure.
  Intentionally not tied to kbuild to prevent people
  from installing and loading it accidentally.
  
  Signed-off-by: Michael S. Tsirkin m...@redhat.com
 
 On question below.
 
  ---
  
  diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
  new file mode 100644
  index 000..099f302
  --- /dev/null
  +++ b/drivers/vhost/test.c
  @@ -0,0 +1,320 @@
  +/* Copyright (C) 2009 Red Hat, Inc.
  + * Author: Michael S. Tsirkin m...@redhat.com
  + *
  + * This work is licensed under the terms of the GNU GPL, version 2.
  + *
  + * test virtio server in host kernel.
  + */
  +
  +#include linux/compat.h
  +#include linux/eventfd.h
  +#include linux/vhost.h
  +#include linux/miscdevice.h
  +#include linux/module.h
  +#include linux/mutex.h
  +#include linux/workqueue.h
  +#include linux/rcupdate.h
  +#include linux/file.h
  +#include linux/slab.h
  +
  +#include test.h
  +#include vhost.c
  +
  +/* Max number of bytes transferred before requeueing the job.
  + * Using this limit prevents one virtqueue from starving others. */
  +#define VHOST_TEST_WEIGHT 0x8
  +
  +enum {
  +   VHOST_TEST_VQ = 0,
  +   VHOST_TEST_VQ_MAX = 1,
  +};
  +
  +struct vhost_test {
  +   struct vhost_dev dev;
  +   struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
  +};
  +
  +/* Expects to be always run from workqueue - which acts as
  + * read-size critical section for our kind of RCU. */
  +static void handle_vq(struct vhost_test *n)
  +{
  +   struct vhost_virtqueue *vq = n-dev.vqs[VHOST_TEST_VQ];
  +   unsigned out, in;
  +   int head;
  +   size_t len, total_len = 0;
  +   void *private;
  +
  +   private = rcu_dereference_check(vq-private_data, 1);
 
 Any chance of a check for running in a workqueue?  If I remember 
 correctly,
 the -lockdep_map field in the work_struct structure allows you to 
 create
 the required lockdep expression.

We moved away from using the workqueue to a custom kernel thread
implementation though.
   
   OK, then could you please add a check for current == 
   custom_kernel_thread
   or some such?
   
 Thanx, Paul
  
  It's a bit tricky. The way we flush out things is by an analog of
  flush_work. So just checking that we run from workqueue isn't
  right need to check that we are running from one of the specific work
  structures that we are careful to flush.
  
  I can do this by passing the work structure pointer on to relevant
  functions but I think this will add (very minor) overhead even when rcu
  checks are disabled. Does it matter? Thoughts?
 
 Would it be possible to set up separate lockdep maps, in effect passing
 the needed information via lockdep rather than via the function arguments?
 
   Thanx, Paul

Possibly, I don't know enough about this but will check.
Any examples to look at?

-- 
mST
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/2] vhost test module

2010-12-02 Thread Paul E. McKenney
On Fri, Dec 03, 2010 at 01:18:18AM +0200, Michael S. Tsirkin wrote:
 On Thu, Dec 02, 2010 at 03:13:03PM -0800, Paul E. McKenney wrote:
  On Thu, Dec 02, 2010 at 09:47:09PM +0200, Michael S. Tsirkin wrote:
   On Thu, Dec 02, 2010 at 11:26:16AM -0800, Paul E. McKenney wrote:
On Thu, Dec 02, 2010 at 09:11:30PM +0200, Michael S. Tsirkin wrote:
 On Thu, Dec 02, 2010 at 11:00:37AM -0800, Paul E. McKenney wrote:
  On Mon, Nov 29, 2010 at 07:09:01PM +0200, Michael S. Tsirkin wrote:
   This adds a test module for vhost infrastructure.
   Intentionally not tied to kbuild to prevent people
   from installing and loading it accidentally.
   
   Signed-off-by: Michael S. Tsirkin m...@redhat.com
  
  On question below.
  
   ---
   
   diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
   new file mode 100644
   index 000..099f302
   --- /dev/null
   +++ b/drivers/vhost/test.c
   @@ -0,0 +1,320 @@
   +/* Copyright (C) 2009 Red Hat, Inc.
   + * Author: Michael S. Tsirkin m...@redhat.com
   + *
   + * This work is licensed under the terms of the GNU GPL, version 
   2.
   + *
   + * test virtio server in host kernel.
   + */
   +
   +#include linux/compat.h
   +#include linux/eventfd.h
   +#include linux/vhost.h
   +#include linux/miscdevice.h
   +#include linux/module.h
   +#include linux/mutex.h
   +#include linux/workqueue.h
   +#include linux/rcupdate.h
   +#include linux/file.h
   +#include linux/slab.h
   +
   +#include test.h
   +#include vhost.c
   +
   +/* Max number of bytes transferred before requeueing the job.
   + * Using this limit prevents one virtqueue from starving others. 
   */
   +#define VHOST_TEST_WEIGHT 0x8
   +
   +enum {
   + VHOST_TEST_VQ = 0,
   + VHOST_TEST_VQ_MAX = 1,
   +};
   +
   +struct vhost_test {
   + struct vhost_dev dev;
   + struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
   +};
   +
   +/* Expects to be always run from workqueue - which acts as
   + * read-size critical section for our kind of RCU. */
   +static void handle_vq(struct vhost_test *n)
   +{
   + struct vhost_virtqueue *vq = n-dev.vqs[VHOST_TEST_VQ];
   + unsigned out, in;
   + int head;
   + size_t len, total_len = 0;
   + void *private;
   +
   + private = rcu_dereference_check(vq-private_data, 1);
  
  Any chance of a check for running in a workqueue?  If I remember 
  correctly,
  the -lockdep_map field in the work_struct structure allows you to 
  create
  the required lockdep expression.
 
 We moved away from using the workqueue to a custom kernel thread
 implementation though.

OK, then could you please add a check for current == 
custom_kernel_thread
or some such?

Thanx, Paul
   
   It's a bit tricky. The way we flush out things is by an analog of
   flush_work. So just checking that we run from workqueue isn't
   right need to check that we are running from one of the specific work
   structures that we are careful to flush.
   
   I can do this by passing the work structure pointer on to relevant
   functions but I think this will add (very minor) overhead even when rcu
   checks are disabled. Does it matter? Thoughts?
  
  Would it be possible to set up separate lockdep maps, in effect passing
  the needed information via lockdep rather than via the function arguments?
 
 Possibly, I don't know enough about this but will check.
 Any examples to look at?

I would suggest the workqueue example in include/linux/workqueue.h and
kernel/workqueue.c.  You will need a struct lockdep_map for each of the
specific work structures, sort of like the one in struct workqueue_struct.

You can then use lock_map_acquire() and lock_map_release() to flag the
fact that your kernel thread is running and not, and then you can pass
lock_is_held() to rcu_dereference_check().  Each of lock_map_acquire(),
lock_map_release(), and lock_is_held() takes a struct lockdep_map as
sole argument.  The rcu_lock_map definition shows an example of a global
lockdep_map variable.  If you need to do runtime initialization of your
lockdep_map structure, you can use lockdep_init_map().

Seem reasonable?

Thanx, Paul
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 1/2] vhost test module

2010-11-29 Thread Michael S. Tsirkin
This adds a test module for vhost infrastructure.
Intentionally not tied to kbuild to prevent people
from installing and loading it accidentally.

Signed-off-by: Michael S. Tsirkin m...@redhat.com

---

diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
new file mode 100644
index 000..099f302
--- /dev/null
+++ b/drivers/vhost/test.c
@@ -0,0 +1,320 @@
+/* Copyright (C) 2009 Red Hat, Inc.
+ * Author: Michael S. Tsirkin m...@redhat.com
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * test virtio server in host kernel.
+ */
+
+#include linux/compat.h
+#include linux/eventfd.h
+#include linux/vhost.h
+#include linux/miscdevice.h
+#include linux/module.h
+#include linux/mutex.h
+#include linux/workqueue.h
+#include linux/rcupdate.h
+#include linux/file.h
+#include linux/slab.h
+
+#include test.h
+#include vhost.c
+
+/* Max number of bytes transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others. */
+#define VHOST_TEST_WEIGHT 0x8
+
+enum {
+   VHOST_TEST_VQ = 0,
+   VHOST_TEST_VQ_MAX = 1,
+};
+
+struct vhost_test {
+   struct vhost_dev dev;
+   struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
+};
+
+/* Expects to be always run from workqueue - which acts as
+ * read-size critical section for our kind of RCU. */
+static void handle_vq(struct vhost_test *n)
+{
+   struct vhost_virtqueue *vq = n-dev.vqs[VHOST_TEST_VQ];
+   unsigned out, in;
+   int head;
+   size_t len, total_len = 0;
+   void *private;
+
+   private = rcu_dereference_check(vq-private_data, 1);
+   if (!private)
+   return;
+
+   mutex_lock(vq-mutex);
+   vhost_disable_notify(vq);
+
+   for (;;) {
+   head = vhost_get_vq_desc(n-dev, vq, vq-iov,
+ARRAY_SIZE(vq-iov),
+out, in,
+NULL, NULL);
+   /* On error, stop handling until the next kick. */
+   if (unlikely(head  0))
+   break;
+   /* Nothing new?  Wait for eventfd to tell us they refilled. */
+   if (head == vq-num) {
+   if (unlikely(vhost_enable_notify(vq))) {
+   vhost_disable_notify(vq);
+   continue;
+   }
+   break;
+   }
+   if (in) {
+   vq_err(vq, Unexpected descriptor format for TX: 
+  out %d, int %d\n, out, in);
+   break;
+   }
+   len = iov_length(vq-iov, out);
+   /* Sanity check */
+   if (!len) {
+   vq_err(vq, Unexpected 0 len for TX\n);
+   break;
+   }
+   vhost_add_used_and_signal(n-dev, vq, head, 0);
+   total_len += len;
+   if (unlikely(total_len = VHOST_TEST_WEIGHT)) {
+   vhost_poll_queue(vq-poll);
+   break;
+   }
+   }
+
+   mutex_unlock(vq-mutex);
+}
+
+static void handle_vq_kick(struct vhost_work *work)
+{
+   struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+   struct vhost_test *n = container_of(vq-dev, struct vhost_test, dev);
+
+   handle_vq(n);
+}
+
+static int vhost_test_open(struct inode *inode, struct file *f)
+{
+   struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
+   struct vhost_dev *dev;
+   int r;
+
+   if (!n)
+   return -ENOMEM;
+
+   dev = n-dev;
+   n-vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
+   r = vhost_dev_init(dev, n-vqs, VHOST_TEST_VQ_MAX);
+   if (r  0) {
+   kfree(n);
+   return r;
+   }
+
+   f-private_data = n;
+
+   return 0;
+}
+
+static void *vhost_test_stop_vq(struct vhost_test *n,
+   struct vhost_virtqueue *vq)
+{
+   void *private;
+
+   mutex_lock(vq-mutex);
+   private = rcu_dereference_protected(vq-private_data,
+lockdep_is_held(vq-mutex));
+   rcu_assign_pointer(vq-private_data, NULL);
+   mutex_unlock(vq-mutex);
+   return private;
+}
+
+static void vhost_test_stop(struct vhost_test *n, void **privatep)
+{
+   *privatep = vhost_test_stop_vq(n, n-vqs + VHOST_TEST_VQ);
+}
+
+static void vhost_test_flush_vq(struct vhost_test *n, int index)
+{
+   vhost_poll_flush(n-dev.vqs[index].poll);
+}
+
+static void vhost_test_flush(struct vhost_test *n)
+{
+   vhost_test_flush_vq(n, VHOST_TEST_VQ);
+}
+
+static int vhost_test_release(struct inode *inode, struct file *f)
+{
+   struct vhost_test *n = f-private_data;
+   void  *private;
+
+   vhost_test_stop(n, private);
+