In order to reduce the interrupt times in the embedded system,
a receiving workqueue is introduced.
This modification also enhanced the overall throughput as the
benefits of reducing interrupt occurrence.
Signed-off-by: Clanlab (Taiwan) Linux Project clanlab.p...@gmail.com
Cc: David Brownell dbrown...@users.sourceforge.net
Cc: David S. Miller da...@davemloft.net
Cc: Stephen Hemminger shemmin...@vyatta.com
Cc: Felipe Balbi ba...@ti.com
Cc: Greg Kroah-Hartman gre...@linuxfoundation.org
Cc: Oliver Neukum oli...@neukum.org
---
drivers/usb/gadget/u_ether.c | 111 +--
1 file changed, 76 insertions(+), 35 deletions(-)
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index b7d4f82..a96a8d9 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -72,6 +72,7 @@ struct eth_dev {
struct sk_buff_head *list);
struct work_struct work;
+ struct work_struct rx_work;
unsigned long todo;
#defineWORK_RX_MEMORY 0
@@ -81,6 +82,8 @@ struct eth_dev {
u8 dev_mac[ETH_ALEN];
};
+static struct workqueue_struct *gether_wq;
+
/*-*/
#define RX_EXTRA 20 /* bytes guarding against rx overflows */
@@ -253,18 +256,16 @@ enomem:
DBG(dev, rx submit -- %d\n, retval);
if (skb)
dev_kfree_skb_any(skb);
- spin_lock_irqsave(dev-req_lock, flags);
- list_add(req-list, dev-rx_reqs);
- spin_unlock_irqrestore(dev-req_lock, flags);
}
return retval;
}
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
- struct sk_buff *skb = req-context, *skb2;
+ struct sk_buff *skb = req-context;
struct eth_dev *dev = ep-driver_data;
int status = req-status;
+ boolrx_queue = 0;
switch (status) {
@@ -288,30 +289,8 @@ static void rx_complete(struct usb_ep *ep, struct
usb_request *req)
} else {
skb_queue_tail(dev-rx_frames, skb);
}
- skb = NULL;
-
- skb2 = skb_dequeue(dev-rx_frames);
- while (skb2) {
- if (status 0
- || ETH_HLEN skb2-len
- || skb2-len VLAN_ETH_FRAME_LEN) {
- dev-net-stats.rx_errors++;
- dev-net-stats.rx_length_errors++;
- DBG(dev, rx length %d\n, skb2-len);
- dev_kfree_skb_any(skb2);
- goto next_frame;
- }
- skb2-protocol = eth_type_trans(skb2, dev-net);
- dev-net-stats.rx_packets++;
- dev-net-stats.rx_bytes += skb2-len;
-
- /* no buffer copies needed, unless hardware can't
-* use skb buffers.
-*/
- status = netif_rx(skb2);
-next_frame:
- skb2 = skb_dequeue(dev-rx_frames);
- }
+ if (!status)
+ rx_queue = 1;
break;
/* software-driven interface shutdown */
@@ -334,22 +313,20 @@ quiesce:
/* FALLTHROUGH */
default:
+ rx_queue = 1;
+ dev_kfree_skb_any(skb);
dev-net-stats.rx_errors++;
DBG(dev, rx status %d\n, status);
break;
}
- if (skb)
- dev_kfree_skb_any(skb);
- if (!netif_running(dev-net)) {
clean:
spin_lock(dev-req_lock);
list_add(req-list, dev-rx_reqs);
spin_unlock(dev-req_lock);
- req = NULL;
- }
- if (req)
- rx_submit(dev, req, GFP_ATOMIC);
+
+ if (rx_queue)
+ queue_work(gether_wq, dev-rx_work);
}
static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
@@ -414,16 +391,24 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
{
struct usb_request *req;
unsigned long flags;
+ int rx_counts = 0;
/* fill unused rxq slots with some skb */
spin_lock_irqsave(dev-req_lock, flags);
while (!list_empty(dev-rx_reqs)) {
+
+ if (++rx_counts qlen(dev-gadget, dev-qmult))
+ break;
+
req = container_of(dev-rx_reqs.next,
struct usb_request, list);
list_del_init(req-list);
spin_unlock_irqrestore(dev-req_lock, flags);
if (rx_submit(dev, req, gfp_flags)