Re: [PATCH] ibmveth change buffer pools dynamically

2006-05-25 Thread Jeff Garzik

Santiago Leon wrote:

Jeff,

Can you consider applying this patch?  I haven't received any feedback 
from netdev, but the changes are pretty straightforward (the majority of 
the patch is setting up the sysfs interface).


It's already in netdev-2.6.git#upstream...


-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] ibmveth change buffer pools dynamically

2006-05-25 Thread Santiago Leon

Jeff,

Can you consider applying this patch?  I haven't received any feedback 
from netdev, but the changes are pretty straightforward (the majority of 
the patch is setting up the sysfs interface).



This patch provides a sysfs interface to change some properties of the 
ibmveth buffer pools (size of the buffers, number of buffers per pool, 
and whether a pool is active).  Ethernet drivers use ethtool to provide 
this type of functionality.  However, the buffers in the ibmveth driver 
can have an arbitrary size (not only regular, mini, and jumbo which are 
the only sizes that ethtool can change), and also ibmveth can have an 
arbitrary number of buffer pools


Under heavy load we have seen dropped packets which obviously kills TCP 
performance.  We have created several fixes that mitigate this issue, 
but we definitely need a way of changing the number of buffers for an 
adapter dynamically.  Also, changing the size of the buffers allows 
users to change the MTU to something big (bigger than a jumbo frame) 
greatly improving performance on partition to partition transfers.


The patch creates directories pool1...pool4 in the device directory in 
sysfs, each with files: num, size, and active (which default to the 
values in the mainline version).


Signed-off-by: Santiago Leon <[EMAIL PROTECTED]>
--
 ibmveth.c |  211 +-
 ibmveth.h |7 +-
 2 files changed, 174 insertions(+), 44 deletions(-)

--- a/drivers/net/ibmveth.h 2006-01-02 21:21:10.0 -0600
+++ b/drivers/net/ibmveth.h 2006-04-18 10:20:00.102520432 -0500
@@ -75,10 +75,13 @@
 
 #define IbmVethNumBufferPools 5
 #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
+#define IBMVETH_MAX_MTU 68
+#define IBMVETH_MAX_POOL_COUNT 4096
+#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
 
-/* pool_size should be sorted */
 static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
 static int pool_count[] = { 256, 768, 256, 256, 256 };
+static int pool_active[] = { 1, 1, 0, 0, 0};
 
 #define IBM_VETH_INVALID_MAP ((u16)0x)
 
@@ -94,6 +97,7 @@ struct ibmveth_buff_pool {
 dma_addr_t *dma_addr;
 struct sk_buff **skbuff;
 int active;
+struct kobject kobj;
 };
 
 struct ibmveth_rx_q {
@@ -118,6 +122,7 @@ struct ibmveth_adapter {
 dma_addr_t filter_list_dma;
 struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools];
 struct ibmveth_rx_q rx_queue;
+int pool_config;
 
 /* adapter specific stats */
 u64 replenish_task_cycles;
--- a/drivers/net/ibmveth.c 2006-01-02 21:21:10.0 -0600
+++ b/drivers/net/ibmveth.c 2006-04-18 10:19:55.624532480 -0500
@@ -96,6 +96,7 @@ static void ibmveth_proc_register_adapte
 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct 
pt_regs *regs);
 static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
+static struct kobj_type ktype_veth_pool;
 
 #ifdef CONFIG_PROC_FS
 #define IBMVETH_PROC_DIR "net/ibmveth"
@@ -133,12 +134,13 @@ static inline int ibmveth_rxq_frame_leng
 }
 
 /* setup the initial settings for a buffer pool */
-static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 
pool_index, u32 pool_size, u32 buff_size)
+static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 
pool_index, u32 pool_size, u32 buff_size, u32 pool_active)
 {
pool->size = pool_size;
pool->index = pool_index;
pool->buff_size = buff_size;
pool->threshold = pool_size / 2;
+   pool->active = pool_active;
 }
 
 /* allocate and setup an buffer pool - called during open */
@@ -180,7 +182,6 @@ static int ibmveth_alloc_buffer_pool(str
atomic_set(&pool->available, 0);
pool->producer_index = 0;
pool->consumer_index = 0;
-   pool->active = 0;
 
return 0;
 }
@@ -301,7 +302,6 @@ static void ibmveth_free_buffer_pool(str
kfree(pool->skbuff);
pool->skbuff = NULL;
}
-   pool->active = 0;
 }
 
 /* remove a buffer from a pool */
@@ -433,7 +433,9 @@ static void ibmveth_cleanup(struct ibmve
}
 
for(i = 0; irx_buff_pool[i]);
+   if (adapter->rx_buff_pool[i].active)
+   ibmveth_free_buffer_pool(adapter, 
+&adapter->rx_buff_pool[i]);
 }
 
 static int ibmveth_open(struct net_device *netdev)
@@ -489,9 +491,6 @@ static int ibmveth_open(struct net_devic
adapter->rx_queue.num_slots = rxq_entries;
adapter->rx_queue.toggle = 1;
 
-   /* call change_mtu to init the buffer pools based in initial mtu */
-   ibmveth_change_mtu(netdev, netdev->mtu);
-
memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
mac_address = mac_address >> 16;
 
@@ -522,6 +521,17 @@ static int ibmveth_open(struct net_devic
retur

Re: [PATCH] ibmveth change buffer pools dynamically

2006-04-28 Thread Santiago Leon

Santiago Leon wrote:


This patch provides a sysfs interface to change some properties of the
ibmveth buffer pools (size of the buffers, number of buffers per pool,
and whether a pool is active).  Ethernet drivers use ethtool to provide
this type of functionality.  However, the buffers in the ibmveth driver
can have an arbitrary size (not only regular, mini, and jumbo which are
the only sizes that ethtool can change), and also ibmveth can have an
arbitrary number of buffer pools 


Under heavy load we have seen dropped packets which obviously kills TCP
performance.  We have created several fixes that mitigate this issue,
but we definitely need a way of changing the number of buffers for an
adapter dynamically.  Also, changing the size of the buffers allows
users to change the MTU to something big (bigger than a jumbo frame)
greatly improving performance on partition to partition transfers.

The patch creates directories pool1...pool4 in the device directory in
sysfs, each with files: num, size, and active (which default to the
values in the mainline version).

Comments and suggestions are welcome...



Jeff, if you don't have any problem with this patch, can you apply it?

Thanks,

--
Santiago A. Leon
Power Linux Development
IBM Linux Technology Center

-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] ibmveth change buffer pools dynamically

2006-04-25 Thread Santiago Leon
This patch provides a sysfs interface to change some properties of the
ibmveth buffer pools (size of the buffers, number of buffers per pool,
and whether a pool is active).  Ethernet drivers use ethtool to provide
this type of functionality.  However, the buffers in the ibmveth driver
can have an arbitrary size (not only regular, mini, and jumbo which are
the only sizes that ethtool can change), and also ibmveth can have an
arbitrary number of buffer pools 

Under heavy load we have seen dropped packets which obviously kills TCP
performance.  We have created several fixes that mitigate this issue,
but we definitely need a way of changing the number of buffers for an
adapter dynamically.  Also, changing the size of the buffers allows
users to change the MTU to something big (bigger than a jumbo frame)
greatly improving performance on partition to partition transfers.

The patch creates directories pool1...pool4 in the device directory in
sysfs, each with files: num, size, and active (which default to the
values in the mainline version).

Comments and suggestions are welcome...
-- 
Santiago A. Leon
Power Linux Development
IBM Linux Technology Center
--- a/drivers/net/ibmveth.h	2006-01-02 21:21:10.0 -0600
+++ b/drivers/net/ibmveth.h	2006-04-18 10:20:00.102520432 -0500
@@ -75,10 +75,13 @@
 
 #define IbmVethNumBufferPools 5
 #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
+#define IBMVETH_MAX_MTU 68
+#define IBMVETH_MAX_POOL_COUNT 4096
+#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
 
-/* pool_size should be sorted */
 static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
 static int pool_count[] = { 256, 768, 256, 256, 256 };
+static int pool_active[] = { 1, 1, 0, 0, 0};
 
 #define IBM_VETH_INVALID_MAP ((u16)0x)
 
@@ -94,6 +97,7 @@ struct ibmveth_buff_pool {
 dma_addr_t *dma_addr;
 struct sk_buff **skbuff;
 int active;
+struct kobject kobj;
 };
 
 struct ibmveth_rx_q {
@@ -118,6 +122,7 @@ struct ibmveth_adapter {
 dma_addr_t filter_list_dma;
 struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools];
 struct ibmveth_rx_q rx_queue;
+int pool_config;
 
 /* adapter specific stats */
 u64 replenish_task_cycles;
--- a/drivers/net/ibmveth.c	2006-01-02 21:21:10.0 -0600
+++ b/drivers/net/ibmveth.c	2006-04-18 10:19:55.624532480 -0500
@@ -96,6 +96,7 @@ static void ibmveth_proc_register_adapte
 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
 static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
+static struct kobj_type ktype_veth_pool;
 
 #ifdef CONFIG_PROC_FS
 #define IBMVETH_PROC_DIR "net/ibmveth"
@@ -133,12 +134,13 @@ static inline int ibmveth_rxq_frame_leng
 }
 
 /* setup the initial settings for a buffer pool */
-static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size)
+static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active)
 {
 	pool->size = pool_size;
 	pool->index = pool_index;
 	pool->buff_size = buff_size;
 	pool->threshold = pool_size / 2;
+	pool->active = pool_active;
 }
 
 /* allocate and setup an buffer pool - called during open */
@@ -180,7 +182,6 @@ static int ibmveth_alloc_buffer_pool(str
 	atomic_set(&pool->available, 0);
 	pool->producer_index = 0;
 	pool->consumer_index = 0;
-	pool->active = 0;
 
 	return 0;
 }
@@ -301,7 +302,6 @@ static void ibmveth_free_buffer_pool(str
 		kfree(pool->skbuff);
 		pool->skbuff = NULL;
 	}
-	pool->active = 0;
 }
 
 /* remove a buffer from a pool */
@@ -433,7 +433,9 @@ static void ibmveth_cleanup(struct ibmve
 	}
 
 	for(i = 0; irx_buff_pool[i]);
+		if (adapter->rx_buff_pool[i].active)
+			ibmveth_free_buffer_pool(adapter, 
+		 &adapter->rx_buff_pool[i]);
 }
 
 static int ibmveth_open(struct net_device *netdev)
@@ -489,9 +491,6 @@ static int ibmveth_open(struct net_devic
 	adapter->rx_queue.num_slots = rxq_entries;
 	adapter->rx_queue.toggle = 1;
 
-	/* call change_mtu to init the buffer pools based in initial mtu */
-	ibmveth_change_mtu(netdev, netdev->mtu);
-
 	memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
 	mac_address = mac_address >> 16;
 
@@ -522,6 +521,17 @@ static int ibmveth_open(struct net_devic
 		return -ENONET; 
 	}
 
+	for(i = 0; irx_buff_pool[i].active)
+			continue;
+		if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
+			ibmveth_error_printk("unable to alloc pool\n");
+			adapter->rx_buff_pool[i].active = 0;
+			ibmveth_cleanup(adapter);
+			return -ENOMEM ;
+		}
+	}
+
 	ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
 	if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
 		ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
@@ -550,7 +560,8 @@ static int ibmveth_clos