Re: [PATCH v2] virtio-net: fill only rx queues which are being used

2013-04-28 Thread Rusty Russell
Sasha Levin  writes:
> Due to MQ support we may allocate a whole bunch of rx queues but
> never use them. With this patch we'll safe the space used by
> the receive buffers until they are actually in use:

Thanks, applied!

Cheers,
Rusty.

>
> sh-4.2# free -h
>  total   used   free sharedbuffers cached
> Mem:  490M35M   455M 0B 0B   4.1M
> -/+ buffers/cache:31M   459M
> Swap:   0B 0B 0B
> sh-4.2# ethtool -L eth0 combined 8
> sh-4.2# free -h
>  total   used   free sharedbuffers cached
> Mem:  490M   162M   327M 0B 0B   4.1M
> -/+ buffers/cache:   158M   331M
> Swap:   0B 0B 0B
>
> Signed-off-by: Sasha Levin 
> ---
>  drivers/net/virtio_net.c | 15 ++-
>  1 file changed, 10 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 6bfc511..196e721 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -581,7 +581,7 @@ static void refill_work(struct work_struct *work)
>   bool still_empty;
>   int i;
>  
> - for (i = 0; i < vi->max_queue_pairs; i++) {
> + for (i = 0; i < vi->curr_queue_pairs; i++) {
>   struct receive_queue *rq = &vi->rq[i];
>  
>   napi_disable(&rq->napi);
> @@ -636,7 +636,7 @@ static int virtnet_open(struct net_device *dev)
>   struct virtnet_info *vi = netdev_priv(dev);
>   int i;
>  
> - for (i = 0; i < vi->max_queue_pairs; i++) {
> + for (i = 0; i < vi->curr_queue_pairs; i++) {
>   /* Make sure we have some buffers: if oom use wq. */
>   if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
>   schedule_delayed_work(&vi->refill, 0);
> @@ -900,6 +900,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, 
> u16 queue_pairs)
>   struct scatterlist sg;
>   struct virtio_net_ctrl_mq s;
>   struct net_device *dev = vi->dev;
> + int i;
>  
>   if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
>   return 0;
> @@ -912,8 +913,12 @@ static int virtnet_set_queues(struct virtnet_info *vi, 
> u16 queue_pairs)
>   dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
>queue_pairs);
>   return -EINVAL;
> - } else
> + } else {
> + for (i = vi->curr_queue_pairs; i < queue_pairs; i++)
> + if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
> + schedule_delayed_work(&vi->refill, 0);
>   vi->curr_queue_pairs = queue_pairs;
> + }
>  
>   return 0;
>  }
> @@ -1568,7 +1573,7 @@ static int virtnet_probe(struct virtio_device *vdev)
>   }
>  
>   /* Last of all, set up some receive buffers. */
> - for (i = 0; i < vi->max_queue_pairs; i++) {
> + for (i = 0; i < vi->curr_queue_pairs; i++) {
>   try_fill_recv(&vi->rq[i], GFP_KERNEL);
>  
>   /* If we didn't even get one input buffer, we're useless. */
> @@ -1692,7 +1697,7 @@ static int virtnet_restore(struct virtio_device *vdev)
>  
>   netif_device_attach(vi->dev);
>  
> - for (i = 0; i < vi->max_queue_pairs; i++)
> + for (i = 0; i < vi->curr_queue_pairs; i++)
>   if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
>   schedule_delayed_work(&vi->refill, 0);
>  
> -- 
> 1.8.2.1
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v2] virtio-net: fill only rx queues which are being used

2013-04-23 Thread Sasha Levin
Due to MQ support we may allocate a whole bunch of rx queues but
never use them. With this patch we'll safe the space used by
the receive buffers until they are actually in use:

sh-4.2# free -h
 total   used   free sharedbuffers cached
Mem:  490M35M   455M 0B 0B   4.1M
-/+ buffers/cache:31M   459M
Swap:   0B 0B 0B
sh-4.2# ethtool -L eth0 combined 8
sh-4.2# free -h
 total   used   free sharedbuffers cached
Mem:  490M   162M   327M 0B 0B   4.1M
-/+ buffers/cache:   158M   331M
Swap:   0B 0B 0B

Signed-off-by: Sasha Levin 
---
 drivers/net/virtio_net.c | 15 ++-
 1 file changed, 10 insertions(+), 5 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6bfc511..196e721 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -581,7 +581,7 @@ static void refill_work(struct work_struct *work)
bool still_empty;
int i;
 
-   for (i = 0; i < vi->max_queue_pairs; i++) {
+   for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];
 
napi_disable(&rq->napi);
@@ -636,7 +636,7 @@ static int virtnet_open(struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int i;
 
-   for (i = 0; i < vi->max_queue_pairs; i++) {
+   for (i = 0; i < vi->curr_queue_pairs; i++) {
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
@@ -900,6 +900,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 
queue_pairs)
struct scatterlist sg;
struct virtio_net_ctrl_mq s;
struct net_device *dev = vi->dev;
+   int i;
 
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
return 0;
@@ -912,8 +913,12 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 
queue_pairs)
dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
 queue_pairs);
return -EINVAL;
-   } else
+   } else {
+   for (i = vi->curr_queue_pairs; i < queue_pairs; i++)
+   if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+   schedule_delayed_work(&vi->refill, 0);
vi->curr_queue_pairs = queue_pairs;
+   }
 
return 0;
 }
@@ -1568,7 +1573,7 @@ static int virtnet_probe(struct virtio_device *vdev)
}
 
/* Last of all, set up some receive buffers. */
-   for (i = 0; i < vi->max_queue_pairs; i++) {
+   for (i = 0; i < vi->curr_queue_pairs; i++) {
try_fill_recv(&vi->rq[i], GFP_KERNEL);
 
/* If we didn't even get one input buffer, we're useless. */
@@ -1692,7 +1697,7 @@ static int virtnet_restore(struct virtio_device *vdev)
 
netif_device_attach(vi->dev);
 
-   for (i = 0; i < vi->max_queue_pairs; i++)
+   for (i = 0; i < vi->curr_queue_pairs; i++)
if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
 
-- 
1.8.2.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html