Re: [PATCH 4/4] vhost-net: Cleanup vhost_ubuf adn vhost_zcopy

2013-05-06 Thread Michael S. Tsirkin
Typo a/adn/and/

On Fri, May 03, 2013 at 02:25:18PM +0800, Asias He wrote:
 - Rename vhost_ubuf to vhost_net_ubuf
 - Rename vhost_zcopy_mask to vhost_net_zcopy_mask
 - Make funcs static
 
 Signed-off-by: Asias He as...@redhat.com
 ---
  drivers/vhost/net.c | 58 
 +++--
  1 file changed, 30 insertions(+), 28 deletions(-)
 
 diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
 index eb73217..4548c0b 100644
 --- a/drivers/vhost/net.c
 +++ b/drivers/vhost/net.c
 @@ -70,7 +70,7 @@ enum {
   VHOST_NET_VQ_MAX = 2,
  };
  
 -struct vhost_ubuf_ref {
 +struct vhost_net_ubuf_ref {
   struct kref kref;
   wait_queue_head_t wait;
   struct vhost_virtqueue *vq;
 @@ -93,7 +93,7 @@ struct vhost_net_virtqueue {
   struct ubuf_info *ubuf_info;
   /* Reference counting for outstanding ubufs.
* Protected by vq mutex. Writers must also take device mutex. */
 - struct vhost_ubuf_ref *ubufs;
 + struct vhost_net_ubuf_ref *ubufs;
  };
  
  struct vhost_net {
 @@ -110,24 +110,25 @@ struct vhost_net {
   bool tx_flush;
  };
  
 -static unsigned vhost_zcopy_mask __read_mostly;
 +static unsigned vhost_net_zcopy_mask __read_mostly;
  
 -void vhost_enable_zcopy(int vq)
 +static void vhost_net_enable_zcopy(int vq)
  {
 - vhost_zcopy_mask |= 0x1  vq;
 + vhost_net_zcopy_mask |= 0x1  vq;
  }
  
 -static void vhost_zerocopy_done_signal(struct kref *kref)
 +static void vhost_net_zerocopy_done_signal(struct kref *kref)
  {
 - struct vhost_ubuf_ref *ubufs = container_of(kref, struct vhost_ubuf_ref,
 - kref);
 + struct vhost_net_ubuf_ref *ubufs;
 +
 + ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref);
   wake_up(ubufs-wait);
  }
  
 -struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *vq,
 - bool zcopy)
 +static struct vhost_net_ubuf_ref *
 +vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
  {
 - struct vhost_ubuf_ref *ubufs;
 + struct vhost_net_ubuf_ref *ubufs;
   /* No zero copy backend? Nothing to count. */
   if (!zcopy)
   return NULL;
 @@ -140,14 +141,14 @@ struct vhost_ubuf_ref *vhost_ubuf_alloc(struct 
 vhost_virtqueue *vq,
   return ubufs;
  }
  
 -void vhost_ubuf_put(struct vhost_ubuf_ref *ubufs)
 +static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
  {
 - kref_put(ubufs-kref, vhost_zerocopy_done_signal);
 + kref_put(ubufs-kref, vhost_net_zerocopy_done_signal);
  }
  
 -void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
 +static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
  {
 - kref_put(ubufs-kref, vhost_zerocopy_done_signal);
 + kref_put(ubufs-kref, vhost_net_zerocopy_done_signal);
   wait_event(ubufs-wait, !atomic_read(ubufs-kref.refcount));
   kfree(ubufs);
  }
 @@ -159,7 +160,7 @@ static void vhost_net_clear_ubuf_info(struct vhost_net *n)
   int i;
  
   for (i = 0; i  n-dev.nvqs; ++i) {
 - zcopy = vhost_zcopy_mask  (0x1  i);
 + zcopy = vhost_net_zcopy_mask  (0x1  i);
   if (zcopy)
   kfree(n-vqs[i].ubuf_info);
   }
 @@ -171,7 +172,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
   int i;
  
   for (i = 0; i  n-dev.nvqs; ++i) {
 - zcopy = vhost_zcopy_mask  (0x1  i);
 + zcopy = vhost_net_zcopy_mask  (0x1  i);
   if (!zcopy)
   continue;
   n-vqs[i].ubuf_info = kmalloc(sizeof(*n-vqs[i].ubuf_info) *
 @@ -183,7 +184,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
  
  err:
   while (i--) {
 - zcopy = vhost_zcopy_mask  (0x1  i);
 + zcopy = vhost_net_zcopy_mask  (0x1  i);
   if (!zcopy)
   continue;
   kfree(n-vqs[i].ubuf_info);
 @@ -305,7 +306,7 @@ static int vhost_zerocopy_signal_used(struct vhost_net 
 *net,
  
  static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
  {
 - struct vhost_ubuf_ref *ubufs = ubuf-ctx;
 + struct vhost_net_ubuf_ref *ubufs = ubuf-ctx;
   struct vhost_virtqueue *vq = ubufs-vq;
   int cnt = atomic_read(ubufs-kref.refcount);
  
 @@ -322,7 +323,7 @@ static void vhost_zerocopy_callback(struct ubuf_info 
 *ubuf, bool success)
   /* set len to mark this desc buffers done DMA */
   vq-heads[ubuf-desc].len = success ?
   VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
 - vhost_ubuf_put(ubufs);
 + vhost_net_ubuf_put(ubufs);
  }
  
  /* Expects to be always run from workqueue - which acts as
 @@ -345,7 +346,7 @@ static void handle_tx(struct vhost_net *net)
   int err;
   size_t hdr_size;
   struct socket *sock;
 - struct vhost_ubuf_ref *uninitialized_var(ubufs);
 + struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
   bool zcopy, zcopy_used;
  
   /* TODO: check that we are 

Re: [PATCH 4/4] vhost-net: Cleanup vhost_ubuf adn vhost_zcopy

2013-05-06 Thread Asias He
On Mon, May 6, 2013 at 4:17 PM, Michael S. Tsirkin m...@redhat.com wrote:
 Typo a/adn/and/

Yes.  Catched  this up and and fixed already.


 On Fri, May 03, 2013 at 02:25:18PM +0800, Asias He wrote:
 - Rename vhost_ubuf to vhost_net_ubuf
 - Rename vhost_zcopy_mask to vhost_net_zcopy_mask
 - Make funcs static

 Signed-off-by: Asias He as...@redhat.com
 ---
  drivers/vhost/net.c | 58 
 +++--
  1 file changed, 30 insertions(+), 28 deletions(-)

 diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
 index eb73217..4548c0b 100644
 --- a/drivers/vhost/net.c
 +++ b/drivers/vhost/net.c
 @@ -70,7 +70,7 @@ enum {
   VHOST_NET_VQ_MAX = 2,
  };

 -struct vhost_ubuf_ref {
 +struct vhost_net_ubuf_ref {
   struct kref kref;
   wait_queue_head_t wait;
   struct vhost_virtqueue *vq;
 @@ -93,7 +93,7 @@ struct vhost_net_virtqueue {
   struct ubuf_info *ubuf_info;
   /* Reference counting for outstanding ubufs.
* Protected by vq mutex. Writers must also take device mutex. */
 - struct vhost_ubuf_ref *ubufs;
 + struct vhost_net_ubuf_ref *ubufs;
  };

  struct vhost_net {
 @@ -110,24 +110,25 @@ struct vhost_net {
   bool tx_flush;
  };

 -static unsigned vhost_zcopy_mask __read_mostly;
 +static unsigned vhost_net_zcopy_mask __read_mostly;

 -void vhost_enable_zcopy(int vq)
 +static void vhost_net_enable_zcopy(int vq)
  {
 - vhost_zcopy_mask |= 0x1  vq;
 + vhost_net_zcopy_mask |= 0x1  vq;
  }

 -static void vhost_zerocopy_done_signal(struct kref *kref)
 +static void vhost_net_zerocopy_done_signal(struct kref *kref)
  {
 - struct vhost_ubuf_ref *ubufs = container_of(kref, struct 
 vhost_ubuf_ref,
 - kref);
 + struct vhost_net_ubuf_ref *ubufs;
 +
 + ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref);
   wake_up(ubufs-wait);
  }

 -struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *vq,
 - bool zcopy)
 +static struct vhost_net_ubuf_ref *
 +vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
  {
 - struct vhost_ubuf_ref *ubufs;
 + struct vhost_net_ubuf_ref *ubufs;
   /* No zero copy backend? Nothing to count. */
   if (!zcopy)
   return NULL;
 @@ -140,14 +141,14 @@ struct vhost_ubuf_ref *vhost_ubuf_alloc(struct 
 vhost_virtqueue *vq,
   return ubufs;
  }

 -void vhost_ubuf_put(struct vhost_ubuf_ref *ubufs)
 +static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
  {
 - kref_put(ubufs-kref, vhost_zerocopy_done_signal);
 + kref_put(ubufs-kref, vhost_net_zerocopy_done_signal);
  }

 -void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
 +static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
  {
 - kref_put(ubufs-kref, vhost_zerocopy_done_signal);
 + kref_put(ubufs-kref, vhost_net_zerocopy_done_signal);
   wait_event(ubufs-wait, !atomic_read(ubufs-kref.refcount));
   kfree(ubufs);
  }
 @@ -159,7 +160,7 @@ static void vhost_net_clear_ubuf_info(struct vhost_net 
 *n)
   int i;

   for (i = 0; i  n-dev.nvqs; ++i) {
 - zcopy = vhost_zcopy_mask  (0x1  i);
 + zcopy = vhost_net_zcopy_mask  (0x1  i);
   if (zcopy)
   kfree(n-vqs[i].ubuf_info);
   }
 @@ -171,7 +172,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
   int i;

   for (i = 0; i  n-dev.nvqs; ++i) {
 - zcopy = vhost_zcopy_mask  (0x1  i);
 + zcopy = vhost_net_zcopy_mask  (0x1  i);
   if (!zcopy)
   continue;
   n-vqs[i].ubuf_info = kmalloc(sizeof(*n-vqs[i].ubuf_info) *
 @@ -183,7 +184,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)

  err:
   while (i--) {
 - zcopy = vhost_zcopy_mask  (0x1  i);
 + zcopy = vhost_net_zcopy_mask  (0x1  i);
   if (!zcopy)
   continue;
   kfree(n-vqs[i].ubuf_info);
 @@ -305,7 +306,7 @@ static int vhost_zerocopy_signal_used(struct vhost_net 
 *net,

  static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
  {
 - struct vhost_ubuf_ref *ubufs = ubuf-ctx;
 + struct vhost_net_ubuf_ref *ubufs = ubuf-ctx;
   struct vhost_virtqueue *vq = ubufs-vq;
   int cnt = atomic_read(ubufs-kref.refcount);

 @@ -322,7 +323,7 @@ static void vhost_zerocopy_callback(struct ubuf_info 
 *ubuf, bool success)
   /* set len to mark this desc buffers done DMA */
   vq-heads[ubuf-desc].len = success ?
   VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
 - vhost_ubuf_put(ubufs);
 + vhost_net_ubuf_put(ubufs);
  }

  /* Expects to be always run from workqueue - which acts as
 @@ -345,7 +346,7 @@ static void handle_tx(struct vhost_net *net)
   int err;
   size_t hdr_size;
   struct socket *sock;
 - struct vhost_ubuf_ref *uninitialized_var(ubufs);
 + struct vhost_net_ubuf_ref 

[PATCH 4/4] vhost-net: Cleanup vhost_ubuf adn vhost_zcopy

2013-05-03 Thread Asias He
- Rename vhost_ubuf to vhost_net_ubuf
- Rename vhost_zcopy_mask to vhost_net_zcopy_mask
- Make funcs static

Signed-off-by: Asias He as...@redhat.com
---
 drivers/vhost/net.c | 58 +++--
 1 file changed, 30 insertions(+), 28 deletions(-)

diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index eb73217..4548c0b 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -70,7 +70,7 @@ enum {
VHOST_NET_VQ_MAX = 2,
 };
 
-struct vhost_ubuf_ref {
+struct vhost_net_ubuf_ref {
struct kref kref;
wait_queue_head_t wait;
struct vhost_virtqueue *vq;
@@ -93,7 +93,7 @@ struct vhost_net_virtqueue {
struct ubuf_info *ubuf_info;
/* Reference counting for outstanding ubufs.
 * Protected by vq mutex. Writers must also take device mutex. */
-   struct vhost_ubuf_ref *ubufs;
+   struct vhost_net_ubuf_ref *ubufs;
 };
 
 struct vhost_net {
@@ -110,24 +110,25 @@ struct vhost_net {
bool tx_flush;
 };
 
-static unsigned vhost_zcopy_mask __read_mostly;
+static unsigned vhost_net_zcopy_mask __read_mostly;
 
-void vhost_enable_zcopy(int vq)
+static void vhost_net_enable_zcopy(int vq)
 {
-   vhost_zcopy_mask |= 0x1  vq;
+   vhost_net_zcopy_mask |= 0x1  vq;
 }
 
-static void vhost_zerocopy_done_signal(struct kref *kref)
+static void vhost_net_zerocopy_done_signal(struct kref *kref)
 {
-   struct vhost_ubuf_ref *ubufs = container_of(kref, struct vhost_ubuf_ref,
-   kref);
+   struct vhost_net_ubuf_ref *ubufs;
+
+   ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref);
wake_up(ubufs-wait);
 }
 
-struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *vq,
-   bool zcopy)
+static struct vhost_net_ubuf_ref *
+vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
 {
-   struct vhost_ubuf_ref *ubufs;
+   struct vhost_net_ubuf_ref *ubufs;
/* No zero copy backend? Nothing to count. */
if (!zcopy)
return NULL;
@@ -140,14 +141,14 @@ struct vhost_ubuf_ref *vhost_ubuf_alloc(struct 
vhost_virtqueue *vq,
return ubufs;
 }
 
-void vhost_ubuf_put(struct vhost_ubuf_ref *ubufs)
+static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
 {
-   kref_put(ubufs-kref, vhost_zerocopy_done_signal);
+   kref_put(ubufs-kref, vhost_net_zerocopy_done_signal);
 }
 
-void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
+static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
 {
-   kref_put(ubufs-kref, vhost_zerocopy_done_signal);
+   kref_put(ubufs-kref, vhost_net_zerocopy_done_signal);
wait_event(ubufs-wait, !atomic_read(ubufs-kref.refcount));
kfree(ubufs);
 }
@@ -159,7 +160,7 @@ static void vhost_net_clear_ubuf_info(struct vhost_net *n)
int i;
 
for (i = 0; i  n-dev.nvqs; ++i) {
-   zcopy = vhost_zcopy_mask  (0x1  i);
+   zcopy = vhost_net_zcopy_mask  (0x1  i);
if (zcopy)
kfree(n-vqs[i].ubuf_info);
}
@@ -171,7 +172,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
int i;
 
for (i = 0; i  n-dev.nvqs; ++i) {
-   zcopy = vhost_zcopy_mask  (0x1  i);
+   zcopy = vhost_net_zcopy_mask  (0x1  i);
if (!zcopy)
continue;
n-vqs[i].ubuf_info = kmalloc(sizeof(*n-vqs[i].ubuf_info) *
@@ -183,7 +184,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
 
 err:
while (i--) {
-   zcopy = vhost_zcopy_mask  (0x1  i);
+   zcopy = vhost_net_zcopy_mask  (0x1  i);
if (!zcopy)
continue;
kfree(n-vqs[i].ubuf_info);
@@ -305,7 +306,7 @@ static int vhost_zerocopy_signal_used(struct vhost_net *net,
 
 static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
 {
-   struct vhost_ubuf_ref *ubufs = ubuf-ctx;
+   struct vhost_net_ubuf_ref *ubufs = ubuf-ctx;
struct vhost_virtqueue *vq = ubufs-vq;
int cnt = atomic_read(ubufs-kref.refcount);
 
@@ -322,7 +323,7 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, 
bool success)
/* set len to mark this desc buffers done DMA */
vq-heads[ubuf-desc].len = success ?
VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
-   vhost_ubuf_put(ubufs);
+   vhost_net_ubuf_put(ubufs);
 }
 
 /* Expects to be always run from workqueue - which acts as
@@ -345,7 +346,7 @@ static void handle_tx(struct vhost_net *net)
int err;
size_t hdr_size;
struct socket *sock;
-   struct vhost_ubuf_ref *uninitialized_var(ubufs);
+   struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
bool zcopy, zcopy_used;
 
/* TODO: check that we are running from vhost_worker? */
@@ -441,7 +442,7 @@ static void handle_tx(struct vhost_net