Module Name:    src
Committed By:   ad
Date:           Mon May 18 21:12:33 UTC 2009

Modified Files:
        src/sys/kern: kern_physio.c

Log Message:
- Remove unneded uvm_lwp_hold(), uvm_lwp_rele().
- Make physio_concurrency tuneable via crash(8).
- Update comments.


To generate a diff of this commit:
cvs rdiff -u -r1.89 -r1.90 src/sys/kern/kern_physio.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/kern/kern_physio.c
diff -u src/sys/kern/kern_physio.c:1.89 src/sys/kern/kern_physio.c:1.90
--- src/sys/kern/kern_physio.c:1.89	Sun Nov  9 12:18:07 2008
+++ src/sys/kern/kern_physio.c	Mon May 18 21:12:33 2009
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_physio.c,v 1.89 2008/11/09 12:18:07 bouyer Exp $	*/
+/*	$NetBSD: kern_physio.c,v 1.90 2009/05/18 21:12:33 ad Exp $	*/
 
 /*-
  * Copyright (c) 1982, 1986, 1990, 1993
@@ -71,7 +71,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_physio.c,v 1.89 2008/11/09 12:18:07 bouyer Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_physio.c,v 1.90 2009/05/18 21:12:33 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -86,12 +86,7 @@
 ONCE_DECL(physio_initialized);
 struct workqueue *physio_workqueue;
 
-/*
- * The routines implemented in this file are described in:
- *	Leffler, et al.: The Design and Implementation of the 4.3BSD
- *	    UNIX Operating System (Addison Welley, 1989)
- * on pages 231-233.
- */
+int physio_concurrency = 16;
 
 /* #define	PHYSIO_DEBUG */
 #if defined(PHYSIO_DEBUG)
@@ -208,13 +203,9 @@
 	return error;
 }
 
-#define	PHYSIO_CONCURRENCY	16	/* XXX tune */
-
 /*
  * Do "physical I/O" on behalf of a user.  "Physical I/O" is I/O directly
  * from the raw device to user buffers, and bypasses the buffer cache.
- *
- * Comments in brackets are from Leffler, et al.'s pseudo-code implementation.
  */
 int
 physio(void (*strategy)(struct buf *), struct buf *obp, dev_t dev, int flags,
@@ -226,7 +217,7 @@
 	int i, error;
 	struct buf *bp = NULL;
 	struct physio_stat *ps;
-	int concurrency = PHYSIO_CONCURRENCY - 1;
+	int concurrency = physio_concurrency - 1;
 
 	error = RUN_ONCE(&physio_initialized, physio_init);
 	if (__predict_false(error != 0)) {
@@ -250,7 +241,6 @@
 
 	/* Make sure we have a buffer, creating one if necessary. */
 	if (obp != NULL) {
-		/* [raise the processor priority level to splbio;] */
 		mutex_enter(&bufcache_lock);
 		/* Mark it busy, so nobody else will use it. */
 		while (bbusy(obp, false, 0, NULL) == EPASSTHROUGH)
@@ -259,8 +249,6 @@
 		concurrency = 0; /* see "XXXkludge" comment below */
 	}
 
-	uvm_lwp_hold(l);
-
 	for (i = 0; i < uio->uio_iovcnt; i++) {
 		bool sync = true;
 
@@ -290,18 +278,17 @@
 			bp->b_private = ps;
 
 			/*
-			 * [mark the buffer busy for physical I/O]
-			 * (i.e. set B_PHYS (because it's an I/O to user
-			 * memory, and B_RAW, because B_RAW is to be
-			 * "Set by physio for raw transfers.", in addition
-			 * to the "busy" and read/write flag.)
+			 * Mrk the buffer busy for physical I/O.  Also set
+			 * B_PHYS because it's an I/O to user memory, and
+			 * B_RAW because B_RAW is to be "set by physio for
+			 * raw transfers".
 			 */
 			bp->b_oflags = 0;
 			bp->b_cflags = BC_BUSY;
 			bp->b_flags = flags | B_PHYS | B_RAW;
 			bp->b_iodone = physio_biodone;
 
-			/* [set up the buffer for a maximum-sized transfer] */
+			/* Set up the buffer for a maximum-sized transfer. */
 			bp->b_blkno = btodb(uio->uio_offset);
 			if (dbtob(bp->b_blkno) != uio->uio_offset) {
 				error = EINVAL;
@@ -311,7 +298,7 @@
 			bp->b_data = iovp->iov_base;
 
 			/*
-			 * [call minphys to bound the transfer size]
+			 * Call minphys to bound the transfer size,
 			 * and remember the amount of data to transfer,
 			 * for later comparison.
 			 */
@@ -327,18 +314,17 @@
 			endp = (vaddr_t)bp->b_data + todo;
 			if (trunc_page(endp) != endp) {
 				/*
-				 * following requests can overlap.
+				 * Following requests can overlap.
 				 * note that uvm_vslock does round_page.
 				 */
 				sync = true;
 			}
 
 			/*
-			 * [lock the part of the user address space involved
-			 *    in the transfer]
-			 * Beware vmapbuf(); it clobbers b_data and
-			 * saves it in b_saveaddr.  However, vunmapbuf()
-			 * restores it.
+			 * Lock the part of the user address space involved
+			 * in the transfer.  Beware vmapbuf(); it clobbers
+			 * b_data and saves it in b_saveaddr.  However,
+			 * vunmapbuf() restores it.
 			 */
 			error = uvm_vslock(p->p_vmspace, bp->b_data, todo,
 			    (flags & B_READ) ?  VM_PROT_WRITE : VM_PROT_READ);
@@ -353,7 +339,7 @@
 			ps->ps_running++;
 			mutex_exit(&ps->ps_lock);
 
-			/* [call strategy to start the transfer] */
+			/* Call strategy to start the transfer. */
 			(*strategy)(bp);
 			bp = NULL;
 
@@ -391,16 +377,16 @@
 	kmem_free(ps, sizeof(*ps));
 
 	/*
-	 * [clean up the state of the buffer]
-	 * Remember if somebody wants it, so we can wake them up below.
-	 * Also, if we had to steal it, give it back.
+	 * Clean up the state of the buffer.  Remember if somebody wants
+	 * it, so we can wake them up below.  Also, if we had to steal it,
+	 * give it back.
 	 */
 	if (obp != NULL) {
 		KASSERT((obp->b_cflags & BC_BUSY) != 0);
 
 		/*
-		 * [if another process is waiting for the raw I/O buffer,
-		 *    wake up processes waiting to do physical I/O;
+		 * If another process is waiting for the raw I/O buffer,
+		 * wake up processes waiting to do physical I/O;
 		 */
 		mutex_enter(&bufcache_lock);
 		obp->b_cflags &= ~(BC_BUSY | BC_WANTED);
@@ -409,7 +395,6 @@
 		cv_broadcast(&obp->b_busy);
 		mutex_exit(&bufcache_lock);
 	}
-	uvm_lwp_rele(l);
 
 	DPRINTF(("%s: done: off=%" PRIu64 ", resid=%zu\n",
 	    __func__, uio->uio_offset, uio->uio_resid));
@@ -418,13 +403,12 @@
 }
 
 /*
- * Leffler, et al., says on p. 231:
- * "The minphys() routine is called by physio() to adjust the
- * size of each I/O transfer before the latter is passed to
- * the strategy routine..."
+ * A minphys() routine is called by physio() to adjust the size of each
+ * I/O transfer before the latter is passed to the strategy routine.
  *
- * so, just adjust the buffer's count accounting to MAXPHYS here,
- * and return the new count;
+ * This minphys() is a default that must be called to enforce limits
+ * that are applicable to all devices, because of limitations in the
+ * kernel or the hardware platform.
  */
 void
 minphys(struct buf *bp)

Reply via email to