Hi!
I am accessing USB devices through the usbdevfs filesystem. I have to do
a lot of big bulk transfers in a very short time frame, and stumbled
upon the limitation that each IOCTL_USB_BULK ioctl could only trigger a
single usb BULK transfer.
For example, in order to do a 64k BULK OUT transfer, assuming an
endpoint maxPacketSize of 32 bytes, I had to do 2048 ioctls, each
triggering a context switch.
In the FreeBSD usb device FS, a read/write can trigger multiple bulk
transfers, which led to acceptable performance.
I have patched the file drivers/usb/devio.c to modify the proc_bulk
procedure so that a usbdevfs_bulktransfer request gets cut up into
multiple USB BULK transfers with maximum size either the maximum packet
size of the endpoint or PAGE_SIZE (as I still use get_free_page for the
kernel buffer). The BULK transfers are then carried out. The transfer is
stopped as soon as a BULK transfer was not completed to its full size
(for example only 28 bytes are fetched from a IN request of 32 bytes).
This way, I was able to tremendously improve he transfer speed.
Furthermore, no more processing of endpoint maPacketSize is necessary in
userland.
The patch was applied onto a 2.4.26 kernel, and does not apply to a 2.6
kernel, but should be no problem to adapt. The changes itself are pretty
trivial, the proc_bulk function now makes use of proc_bulk_read and
proc_bulk_write which split up the original requests in multiple smaller
requests, copying the read/written data approprietaly to/from userland.
Manuel Odendahl
--- devio-old.c 2003-11-28 19:26:20.000000000 +0100
+++ devio.c 2004-10-07 10:03:19.000000000 +0200
@@ -19,7 +19,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * $Id: devio.c,v 1.7 2000/02/01 17:28:48 fliegl Exp $
+ * $Id: devio.c 827 2004-10-07 07:55:17Z manuel $
*
* This file implements the usbdevfs/x/y files, where
* x is the bus number and y the device number.
@@ -590,14 +590,97 @@
return i;
}
+/* Read bulk.len bytes using bulk read transfers, limited by the max
+ packet size on the pipe and the PAGE_SIZE */
+static int proc_bulk_read(struct usb_device *dev, struct usbdevfs_bulktransfer *bulk,
int pipe, int max_size)
+{
+ int read, len2;
+ unsigned int tmo;
+ unsigned char *tbuf;
+
+ if (bulk->len <= 0)
+ return -EINVAL;
+
+ if (!(tbuf = (unsigned char *)__get_free_page(GFP_KERNEL)))
+ return -ENOMEM;
+
+ tmo = (bulk->timeout * HZ + 999) / 1000;
+ for (read = 0; read < bulk->len; ) {
+ int bulk_size, i;
+
+ bulk_size = max_size > (bulk->len - read) ? (bulk->len - read) :
max_size;
+ i = usb_bulk_msg(dev, pipe, tbuf, bulk_size, &len2, tmo);
+ if (i < 0) {
+ printk(KERN_WARNING "usbdevfs: USBDEVFS_BULK failed dev %d ep
0x%x len %u ret %d\n",
+ dev->devnum, bulk->ep, bulk_size, i);
+ len2 = i;
+ goto exit;
+ }
+ if (len2 > 0) {
+ if (copy_to_user(bulk->data + read, tbuf, len2)) {
+ len2 = -EFAULT;
+ goto exit;
+ }
+ }
+ read += len2;
+ /* We have read less than we wanted, the device has probably emptied
its buffer. */
+ if (len2 < bulk_size)
+ break;
+ }
+ len2 = read;
+
+ exit:
+ free_page((unsigned long)tbuf);
+ return len2;
+}
+
+static int proc_bulk_write(struct usb_device *dev, struct usbdevfs_bulktransfer
*bulk, int pipe, int max_size)
+{
+ int written, len2;
+ unsigned int tmo;
+ unsigned char *tbuf;
+
+ if (bulk->len <= 0)
+ return -EINVAL;
+
+ if (!(tbuf = (unsigned char *)__get_free_page(GFP_KERNEL)))
+ return -ENOMEM;
+
+ tmo = (bulk->timeout * HZ + 999) / 1000;
+ for (written = 0; written < bulk->len; ) {
+ int i, bulk_size;
+
+ bulk_size = max_size > (bulk->len - written) ? (bulk->len - written) :
max_size;
+ if (copy_from_user(tbuf, bulk->data + written, bulk_size)) {
+ len2 = -EFAULT;
+ goto exit;
+ }
+ i = usb_bulk_msg(dev, pipe, tbuf, bulk_size, &len2, tmo);
+ if (i < 0) {
+ printk(KERN_WARNING "usbdevfs: USBDEVFS_BULK failed dev %d ep
0x%x len %u ret %d\n",
+ dev->devnum, bulk->ep, bulk_size, i);
+ len2 = i;
+ goto exit;
+ }
+ written += len2;
+ /* We have written less bytes than we wanted, the device has probably
filled its buffer. */
+ if (len2 < bulk_size)
+ break;
+ }
+ len2 = written;
+
+ exit:
+ free_page((unsigned long)tbuf);
+ return len2;
+}
+
static int proc_bulk(struct dev_state *ps, void *arg)
{
struct usb_device *dev = ps->dev;
struct usbdevfs_bulktransfer bulk;
- unsigned int tmo, len1, pipe;
- int len2;
- unsigned char *tbuf;
- int i, ret;
+ unsigned int len1, pipe;
+ int ret;
+ int max_size;
if (copy_from_user(&bulk, (void *)arg, sizeof(bulk)))
return -EFAULT;
@@ -605,46 +688,24 @@
return ret;
if ((ret = checkintf(ps, ret)))
return ret;
+
if (bulk.ep & USB_DIR_IN)
pipe = usb_rcvbulkpipe(dev, bulk.ep & 0x7f);
else
pipe = usb_sndbulkpipe(dev, bulk.ep & 0x7f);
- if (!usb_maxpacket(dev, pipe, !(bulk.ep & USB_DIR_IN)))
+ if (!(max_size = usb_maxpacket(dev, pipe, !(bulk.ep & USB_DIR_IN))))
return -EINVAL;
+ if (max_size > PAGE_SIZE)
+ max_size = PAGE_SIZE;
len1 = bulk.len;
- if (len1 > PAGE_SIZE)
- return -EINVAL;
- if (!(tbuf = (unsigned char *)__get_free_page(GFP_KERNEL)))
- return -ENOMEM;
- tmo = (bulk.timeout * HZ + 999) / 1000;
if (bulk.ep & 0x80) {
if (len1 && !access_ok(VERIFY_WRITE, bulk.data, len1)) {
- free_page((unsigned long)tbuf);
return -EINVAL;
}
- i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
- if (!i && len2) {
- if (copy_to_user(bulk.data, tbuf, len2)) {
- free_page((unsigned long)tbuf);
- return -EFAULT;
- }
- }
+ return proc_bulk_read(dev, &bulk, pipe, max_size);
} else {
- if (len1) {
- if (copy_from_user(tbuf, bulk.data, len1)) {
- free_page((unsigned long)tbuf);
- return -EFAULT;
- }
- }
- i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
+ return proc_bulk_write(dev, &bulk, pipe, max_size);
}
- free_page((unsigned long)tbuf);
- if (i < 0) {
- printk(KERN_WARNING "usbdevfs: USBDEVFS_BULK failed dev %d ep 0x%x len
%u ret %d\n",
- dev->devnum, bulk.ep, bulk.len, i);
- return i;
- }
- return len2;
}
static int proc_resetep(struct dev_state *ps, void *arg)