I am enclosing patches against linux-2.3.35 - these should
correct a couple of issues that people have reported.
1) I *think* I have finally fixed the DMA exhaustion warning
message problem. The cause is that scsi_malloc can only return bounce
buffer segments of <= PAGE_SIZE, and I had forgotten this little detail. I
am toying with the idea of scaling this pool down to a small emergency
allocation and relying upon kmalloc(size, GFP_DMA) for the bulk of the
work, but now is not the time to try anything like that. The fix was to
scsi_merge.c, and was a little ugly. People with hosts that do ISA DMA
would see a little more overhead in doing the extra computations, but
ultimately it needs to be done right and if people want high performance
they shouldn't be using ISA bus host adapters. The thing that scares me
about these changes is that they worked the first time :-). I will be
testing them a bit more myself.
2) I have corrected an issue where the ppa driver would hang up if
you inserted the module without media in the drive and then later inserted
media.
3) I fixed the bug I mentioned in the earlier message, where a
single SCpnt was getting queued to the low-level driver in some instances
when there are bad sectors.
4) I fixed a secondary bug that comes up when a device is taken
offline where the scatter-gather table and bounce buffers were not being
deallocated, and the DMA pool is exhausted.
5) I have cleaned out a bit more dead code that I have identified,
and updated some stale comments here and there.
There is something else I am still looking at - a remnant of the
bugs I discovered when looking at error handling. It seems that in some
cases when you get MEDIUM_ERROR it launches into the error handler thread,
and ultimately takes the thing offline. Not the correct behavior - it
should just report the thing up to the higher levels.
Taking devices offline still isn't optimal. It breaks the cycle
of infinite attempts to remedy the error (which is good), but the device
is unusable, unmountable, etc, etc). I think I need to play with
scsi_debug and fix it to allow just enough access so that you can unmount
the sucker.
-Eric
"The world was a library, and its books were the stones, leaves,
brooks, grass, and the birds of the earth. We learned to do what only
a student of nature ever learns, and that was to feel beauty."
Chief Luther Standing Bear - Teton Sioux
--- linux/CREDITS.~1~ Sun Dec 26 22:34:04 1999
+++ linux/CREDITS Sat Jan 1 21:42:50 2000
@@ -2398,7 +2398,7 @@
S: USA
N: Eric Youngdale
-E: [EMAIL PROTECTED]
+E: [EMAIL PROTECTED]
D: General kernel hacker
D: SCSI iso9660 and ELF
S: 17 Canterbury Square #101
Index: linux/drivers/scsi/scsi.c
diff -c linux/drivers/scsi/scsi.c:1.1.1.5 linux/drivers/scsi/scsi.c:1.7
*** linux/drivers/scsi/scsi.c:1.1.1.5 Thu Dec 30 00:13:59 1999
--- linux/drivers/scsi/scsi.c Sun Jan 2 23:45:05 2000
***************
*** 1,6 ****
/*
* scsi.c Copyright (C) 1992 Drew Eckhardt
! * Copyright (C) 1993, 1994, 1995 Eric Youngdale
*
* generic mid-level SCSI driver
* Initial versions: Drew Eckhardt
--- 1,6 ----
/*
* scsi.c Copyright (C) 1992 Drew Eckhardt
! * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
*
* generic mid-level SCSI driver
* Initial versions: Drew Eckhardt
***************
*** 13,19 ****
* Tommy Thorn <tthorn>
* Thomas Wuensche <[EMAIL PROTECTED]>
*
! * Modified by Eric Youngdale [EMAIL PROTECTED] or [EMAIL PROTECTED] to
* add scatter-gather, multiple outstanding request, and other
* enhancements.
*
--- 13,19 ----
* Tommy Thorn <tthorn>
* Thomas Wuensche <[EMAIL PROTECTED]>
*
! * Modified by Eric Youngdale [EMAIL PROTECTED] or [EMAIL PROTECTED] to
* add scatter-gather, multiple outstanding request, and other
* enhancements.
*
***************
*** 153,161 ****
*/
unsigned int scsi_logging_level = 0;
- volatile struct Scsi_Host *host_active = NULL;
-
-
const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] =
{
"Direct-Access ",
--- 153,158 ----
***************
*** 193,206 ****
extern void scsi_old_done(Scsi_Cmnd * SCpnt);
extern void scsi_old_times_out(Scsi_Cmnd * SCpnt);
- #define SCSI_BLOCK(DEVICE, HOST) \
- ((HOST->block && host_active && HOST != host_active) \
- || ((HOST)->can_queue && HOST->host_busy >= HOST->can_queue) \
- || ((HOST)->host_blocked) \
- || ((DEVICE) != NULL && (DEVICE)->device_blocked) )
-
-
-
struct dev_info {
const char *vendor;
const char *model;
--- 190,195 ----
***************
*** 496,501 ****
--- 485,491 ----
* things are quiet.
*/
atomic_inc(&shpnt->host_active);
+ atomic_inc(&SDpnt->device_active);
if (hardcoded == 1) {
Scsi_Device *oldSDpnt = SDpnt;
***************
*** 577,582 ****
--- 567,573 ----
* so we know when everything is quiet.
*/
atomic_dec(&shpnt->host_active);
+ atomic_dec(&SDpnt->device_active);
leave:
***************
*** 1077,1093 ****
|| SDpnt->id != device->id
|| SDpnt == device) {
continue;
- }
- for (SCpnt = SDpnt->device_queue;
- SCpnt;
- SCpnt = SCpnt->next) {
- if (SCpnt->request.rq_status !=
RQ_INACTIVE) {
- break;
- }
- }
- if (SCpnt) {
- break;
}
}
if (SDpnt) {
/*
--- 1068,1078 ----
|| SDpnt->id != device->id
|| SDpnt == device) {
continue;
}
+ if( atomic_read(&SDpnt->device_active) != 0)
+ {
+ break;
+ }
}
if (SDpnt) {
/*
***************
*** 1173,1178 ****
--- 1158,1164 ----
SCpnt->request.sem = NULL; /* And no one is waiting for this
* to complete */
atomic_inc(&SCpnt->host->host_active);
+ atomic_inc(&SCpnt->device->device_active);
SCpnt->buffer = NULL;
SCpnt->bufflen = 0;
***************
*** 1184,1189 ****
--- 1170,1176 ----
SCpnt->transfersize = 0; /* No default transfer size */
SCpnt->cmd_len = 0;
+ SCpnt->result = 0;
SCpnt->underflow = 0; /* Do not flag underflow conditions */
SCpnt->resid = 0;
SCpnt->state = SCSI_STATE_INITIALIZING;
***************
*** 1225,1234 ****
--- 1212,1224 ----
spin_lock_irqsave(&device_request_lock, flags);
+ SDpnt = SCpnt->device;
+
SCpnt->request.rq_status = RQ_INACTIVE;
SCpnt->state = SCSI_STATE_UNUSED;
SCpnt->owner = SCSI_OWNER_NOBODY;
atomic_dec(&SCpnt->host->host_active);
+ atomic_dec(&SDpnt->device_active);
SCSI_LOG_MLQUEUE(5, printk("Deactivating command for device %d (active=%d,
failed=%d)\n",
SCpnt->target,
***************
*** 1251,1258 ****
up(SCpnt->host->eh_wait);
}
- SDpnt = SCpnt->device;
-
spin_unlock_irqrestore(&device_request_lock, flags);
/*
--- 1241,1246 ----
***************
*** 1367,1372 ****
--- 1355,1361 ----
if (rtn != 0) {
scsi_delete_timer(SCpnt);
scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);
+ SCSI_LOG_MLQUEUE(3, printk("queuecommand : request
+rejected\n"));
}
} else {
spin_lock_irqsave(&io_request_lock, flags);
***************
*** 1438,1444 ****
int timeout, int retries)
{
struct Scsi_Host *host = SCpnt->host;
- Scsi_Device *device = SCpnt->device;
ASSERT_LOCK(&io_request_lock, 0);
--- 1427,1432 ----
***************
*** 1693,1698 ****
--- 1681,1688 ----
* from being sent to the device, so we shouldn't end
up
* with tons of things being sent down that shouldn't
be.
*/
+ SCSI_LOG_MLCOMPLETE(3, printk("Command rejected as
+device queue full, put on ml queue %p\n",
+ SCpnt));
scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_DEVICE_BUSY);
break;
default:
Index: linux/drivers/scsi/scsi.h
diff -c linux/drivers/scsi/scsi.h:1.1.1.4 linux/drivers/scsi/scsi.h:1.4
*** linux/drivers/scsi/scsi.h:1.1.1.4 Wed Dec 22 10:28:44 1999
--- linux/drivers/scsi/scsi.h Sun Jan 2 23:45:05 2000
***************
*** 430,437 ****
extern Scsi_Cmnd *scsi_allocate_device(Scsi_Device *, int, int);
- extern Scsi_Cmnd *scsi_request_queueable(struct request *, Scsi_Device *);
-
extern void scsi_release_command(Scsi_Cmnd *);
extern int max_scsi_hosts;
--- 430,435 ----
***************
*** 461,466 ****
--- 459,465 ----
device is busy */
struct Scsi_Host *host;
request_queue_t request_queue;
+ atomic_t device_active; /* commands checked out for device */
volatile unsigned short device_busy; /* commands actually active on
low-level */
int (*scsi_init_io_fn) (Scsi_Cmnd *); /* Used to initialize
new request */
***************
*** 687,692 ****
--- 686,692 ----
extern void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
int block_sectors);
+ extern struct Scsi_Device_Template *scsi_get_request_dev(struct request *);
#if defined(MAJOR_NR) && (MAJOR_NR != SCSI_TAPE_MAJOR)
#include "hosts.h"
Index: linux/drivers/scsi/scsi_error.c
diff -c linux/drivers/scsi/scsi_error.c:1.1.1.5 linux/drivers/scsi/scsi_error.c:1.5
*** linux/drivers/scsi/scsi_error.c:1.1.1.5 Thu Dec 30 00:14:00 1999
--- linux/drivers/scsi/scsi_error.c Sun Jan 2 23:45:05 2000
***************
*** 150,156 ****
rtn = del_timer(&SCset->eh_timeout);
! SCSI_LOG_ERROR_RECOVERY(5, printk("Clearing timer for command %p\n", SCset));
SCset->eh_timeout.data = (unsigned long) NULL;
SCset->eh_timeout.function = NULL;
--- 150,156 ----
rtn = del_timer(&SCset->eh_timeout);
! SCSI_LOG_ERROR_RECOVERY(5, printk("Clearing timer for command %p %d\n", SCset,
rtn));
SCset->eh_timeout.data = (unsigned long) NULL;
SCset->eh_timeout.function = NULL;
Index: linux/drivers/scsi/scsi_ioctl.c
diff -c linux/drivers/scsi/scsi_ioctl.c:1.1.1.3 linux/drivers/scsi/scsi_ioctl.c:1.3
*** linux/drivers/scsi/scsi_ioctl.c:1.1.1.3 Wed Dec 22 10:28:44 1999
--- linux/drivers/scsi/scsi_ioctl.c Sat Jan 1 21:39:23 2000
***************
*** 117,129 ****
return -EINTR;
}
! {
! DECLARE_MUTEX_LOCKED(sem);
! SCpnt->request.sem = &sem;
! scsi_do_cmd(SCpnt, cmd, NULL, 0, scsi_ioctl_done, timeout, retries);
! down(&sem);
! SCpnt->request.sem = NULL;
! }
SCSI_LOG_IOCTL(2, printk("Ioctl returned 0x%x\n", SCpnt->result));
--- 117,123 ----
return -EINTR;
}
! scsi_wait_cmd(SCpnt, cmd, NULL, 0, scsi_ioctl_done, timeout, retries);
SCSI_LOG_IOCTL(2, printk("Ioctl returned 0x%x\n", SCpnt->result));
***************
*** 306,319 ****
return -EINTR;
}
! {
! DECLARE_MUTEX_LOCKED(sem);
! SCpnt->request.sem = &sem;
! scsi_do_cmd(SCpnt, cmd, buf, needed, scsi_ioctl_done,
! timeout, retries);
! down(&sem);
! SCpnt->request.sem = NULL;
! }
/*
* If there was an error condition, pass the info back to the user.
--- 300,307 ----
return -EINTR;
}
! scsi_wait_cmd(SCpnt, cmd, buf, needed, scsi_ioctl_done,
! timeout, retries);
/*
* If there was an error condition, pass the info back to the user.
Index: linux/drivers/scsi/scsi_lib.c
diff -c linux/drivers/scsi/scsi_lib.c:1.1.1.4 linux/drivers/scsi/scsi_lib.c:1.6
*** linux/drivers/scsi/scsi_lib.c:1.1.1.4 Thu Dec 30 00:14:01 1999
--- linux/drivers/scsi/scsi_lib.c Sun Jan 2 23:45:05 2000
***************
*** 294,304 ****
* Function: scsi_end_request()
*
* Purpose: Post-processing of completed commands called from interrupt
! * handler.
*
* Arguments: SCpnt - command that is complete.
* uptodate - 1 if I/O indicates success, 0 for I/O error.
* sectors - number of sectors we want to mark.
*
* Lock status: Assumed that lock is not held upon entry.
*
--- 294,305 ----
* Function: scsi_end_request()
*
* Purpose: Post-processing of completed commands called from interrupt
! * handler or a bottom-half handler.
*
* Arguments: SCpnt - command that is complete.
* uptodate - 1 if I/O indicates success, 0 for I/O error.
* sectors - number of sectors we want to mark.
+ * requeue - indicates whether we should requeue leftovers.
*
* Lock status: Assumed that lock is not held upon entry.
*
***************
*** 310,316 ****
* We are guaranteeing that the request queue will be goosed
* at some point during this call.
*/
! Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt, int uptodate, int sectors)
{
struct request *req;
struct buffer_head *bh;
--- 311,320 ----
* We are guaranteeing that the request queue will be goosed
* at some point during this call.
*/
! static Scsi_Cmnd *__scsi_end_request(Scsi_Cmnd * SCpnt,
! int uptodate,
! int sectors,
! int requeue)
{
struct request *req;
struct buffer_head *bh;
***************
*** 348,353 ****
--- 352,362 ----
if (req->bh) {
request_queue_t *q;
+ if( !requeue )
+ {
+ return SCpnt;
+ }
+
q = &SCpnt->device->request_queue;
req->buffer = bh->b_data;
***************
*** 377,382 ****
--- 386,470 ----
}
/*
+ * Function: scsi_end_request()
+ *
+ * Purpose: Post-processing of completed commands called from interrupt
+ * handler or a bottom-half handler.
+ *
+ * Arguments: SCpnt - command that is complete.
+ * uptodate - 1 if I/O indicates success, 0 for I/O error.
+ * sectors - number of sectors we want to mark.
+ *
+ * Lock status: Assumed that lock is not held upon entry.
+ *
+ * Returns: Nothing
+ *
+ * Notes: This is called for block device requests in order to
+ * mark some number of sectors as complete.
+ *
+ * We are guaranteeing that the request queue will be goosed
+ * at some point during this call.
+ */
+ Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt, int uptodate, int sectors)
+ {
+ return __scsi_end_request(SCpnt, uptodate, sectors, 1);
+ }
+
+ /*
+ * Function: scsi_release_buffers()
+ *
+ * Purpose: Completion processing for block device I/O requests.
+ *
+ * Arguments: SCpnt - command that we are bailing.
+ *
+ * Lock status: Assumed that no lock is held upon entry.
+ *
+ * Returns: Nothing
+ *
+ * Notes: In the event that an upper level driver rejects a
+ * command, we must release resources allocated during
+ * the __init_io() function. Primarily this would involve
+ * the scatter-gather table, and potentially any bounce
+ * buffers.
+ */
+ static void scsi_release_buffers(Scsi_Cmnd * SCpnt)
+ {
+ ASSERT_LOCK(&io_request_lock, 0);
+
+ /*
+ * Free up any indirection buffers we allocated for DMA purposes.
+ * For the case of a READ, we need to copy the data out of the
+ * bounce buffer and into the real buffer.
+ */
+ if (SCpnt->use_sg) {
+ struct scatterlist *sgpnt;
+ int i;
+
+ sgpnt = (struct scatterlist *) SCpnt->request_buffer;
+
+ for (i = 0; i < SCpnt->use_sg; i++) {
+ if (sgpnt[i].alt_address) {
+ scsi_free(sgpnt[i].address, sgpnt[i].length);
+ }
+ }
+ scsi_free(SCpnt->request_buffer, SCpnt->sglist_len);
+ } else {
+ if (SCpnt->request_buffer != SCpnt->request.buffer) {
+ scsi_free(SCpnt->request_buffer, SCpnt->request_bufflen);
+ }
+ }
+
+ /*
+ * Zero these out. They now point to freed memory, and it is
+ * dangerous to hang onto the pointers.
+ */
+ SCpnt->buffer = NULL;
+ SCpnt->bufflen = 0;
+ SCpnt->request_buffer = NULL;
+ SCpnt->request_bufflen = 0;
+ }
+
+ /*
* Function: scsi_io_completion()
*
* Purpose: Completion processing for block device I/O requests.
***************
*** 471,484 ****
* If multiple sectors are requested in one buffer, then
* they will have been finished off by the first command.
* If not, then we have a multi-buffer command.
! */
! SCpnt = scsi_end_request(SCpnt, 1, good_sectors);
/*
* If the command completed without error, then either finish off the
* rest of the command, or start a new one.
*/
! if (result == 0) {
return;
}
}
--- 559,581 ----
* If multiple sectors are requested in one buffer, then
* they will have been finished off by the first command.
* If not, then we have a multi-buffer command.
! *
! * If block_sectors != 0, it means we had a medium error
! * of some sort, and that we want to mark some number of
! * sectors as not uptodate. Thus we want to inhibit
! * requeueing right here - we will requeue down below
! * when we handle the bad sectors.
! */
! SCpnt = __scsi_end_request(SCpnt,
! 1,
! good_sectors,
! result == 0);
/*
* If the command completed without error, then either finish off the
* rest of the command, or start a new one.
*/
! if (result == 0 || SCpnt == NULL ) {
return;
}
}
***************
*** 561,567 ****
}
} /* driver byte != 0 */
if (result) {
! printk("SCSI disk error : host %d channel %d id %d lun %d return code
= %x\n",
SCpnt->device->host->host_no,
SCpnt->device->channel,
SCpnt->device->id,
--- 658,668 ----
}
} /* driver byte != 0 */
if (result) {
! struct Scsi_Device_Template *STpnt;
!
! STpnt = scsi_get_request_dev(&SCpnt->request);
! printk("SCSI %s error : host %d channel %d id %d lun %d return code =
%x\n",
! (STpnt ? STpnt->name : "device"),
SCpnt->device->host->host_no,
SCpnt->device->channel,
SCpnt->device->id,
***************
*** 569,574 ****
--- 670,680 ----
if (driver_byte(result) & DRIVER_SENSE)
print_sense("sd", SCpnt);
+ /*
+ * Mark a single buffer as not uptodate. Queue the remainder.
+ * We sometimes get this cruft in the event that a medium error
+ * isn't properly reported.
+ */
SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
return;
}
***************
*** 702,707 ****
--- 808,838 ----
} else {
SDpnt->starved = 0;
}
+
+ /*
+ * FIXME(eric)
+ * I am not sure where the best place to do this is. We need
+ * to hook in a place where we are likely to come if in user
+ * space. Technically the error handling thread should be
+ * doing this crap, but the error handler isn't used by
+ * most hosts.
+ */
+ if (SDpnt->was_reset) {
+ /*
+ * We need to relock the door, but we might
+ * be in an interrupt handler. Only do this
+ * from user space, since we do not want to
+ * sleep from an interrupt.
+ */
+ SDpnt->was_reset = 0;
+ if (SDpnt->removable && !in_interrupt()) {
+ spin_unlock_irq(&io_request_lock);
+ scsi_ioctl(SDpnt, SCSI_IOCTL_DOORLOCK, 0);
+ spin_lock_irq(&io_request_lock);
+ continue;
+ }
+ }
+
/*
* Loop through all of the requests in this queue, and find
* one that is queueable.
***************
*** 768,797 ****
SDpnt->device_busy++;
/*
- * FIXME(eric)
- * I am not sure where the best place to do this is. We need
- * to hook in a place where we are likely to come if in user
- * space. Technically the error handling thread should be
- * doing this crap, but the error handler isn't used by
- * most hosts.
- */
- if (SDpnt->was_reset) {
- /*
- * We need to relock the door, but we might
- * be in an interrupt handler. Only do this
- * from user space, since we do not want to
- * sleep from an interrupt.
- */
- if (SDpnt->removable && !in_interrupt()) {
- spin_unlock_irq(&io_request_lock);
- scsi_ioctl(SDpnt, SCSI_IOCTL_DOORLOCK, 0);
- SDpnt->was_reset = 0;
- spin_lock_irq(&io_request_lock);
- continue;
- }
- SDpnt->was_reset = 0;
- }
- /*
* Finally, before we release the lock, we copy the
* request to the command block, and remove the
* request from the request list. Note that we always
--- 899,904 ----
***************
*** 842,847 ****
--- 949,958 ----
* get those allocated here.
*/
if (!SDpnt->scsi_init_io_fn(SCpnt)) {
+ SHpnt->host_busy--;
+ SDpnt->device_busy--;
+ scsi_end_request(SCpnt, 0,
+ SCpnt->request.nr_sectors);
spin_lock_irq(&io_request_lock);
continue;
}
***************
*** 849,854 ****
--- 960,970 ----
* Initialize the actual SCSI command for this request.
*/
if (!STpnt->init_command(SCpnt)) {
+ SHpnt->host_busy--;
+ SDpnt->device_busy--;
+ scsi_release_buffers(SCpnt);
+ scsi_end_request(SCpnt, 0,
+ SCpnt->request.nr_sectors);
spin_lock_irq(&io_request_lock);
continue;
}
Index: linux/drivers/scsi/scsi_merge.c
diff -c linux/drivers/scsi/scsi_merge.c:1.1.1.4 linux/drivers/scsi/scsi_merge.c:1.4
*** linux/drivers/scsi/scsi_merge.c:1.1.1.4 Thu Dec 30 00:14:01 1999
--- linux/drivers/scsi/scsi_merge.c Wed Dec 29 23:25:58 1999
***************
*** 61,66 ****
--- 61,74 ----
#include "constants.h"
#include <scsi/scsi_ioctl.h>
+ /*
+ * This means that bounce buffers cannot be allocated in chunks > PAGE_SIZE.
+ * Ultimately we should get away from using a dedicated DMA bounce buffer
+ * pool, and we should instead try and use kmalloc() instead. If we can
+ * eliminate this pool, then this restriction would no longer be needed.
+ */
+ #define DMA_SEGMENT_SIZE_LIMITED
+
#ifdef CONFIG_SCSI_DEBUG_QUEUES
/*
* Enable a bunch of additional consistency checking. Turn this off
***************
*** 97,108 ****
* This can be removed for optimization.
*/
#define SANITY_CHECK(req, _CLUSTER, _DMA) \
! if( req->nr_segments != __count_segments(req, _CLUSTER, _DMA) ) \
{ \
__label__ here; \
here: \
printk("Incorrect segment count at 0x%p", &&here); \
! dump_stats(req, _CLUSTER, _DMA, __count_segments(req, _CLUSTER, _DMA)); \
}
#else
#define SANITY_CHECK(req, _CLUSTER, _DMA)
--- 105,116 ----
* This can be removed for optimization.
*/
#define SANITY_CHECK(req, _CLUSTER, _DMA) \
! if( req->nr_segments != __count_segments(req, _CLUSTER, _DMA, NULL) ) \
{ \
__label__ here; \
here: \
printk("Incorrect segment count at 0x%p", &&here); \
! dump_stats(req, _CLUSTER, _DMA, __count_segments(req, _CLUSTER, _DMA, NULL));
\
}
#else
#define SANITY_CHECK(req, _CLUSTER, _DMA)
***************
*** 166,171 ****
--- 174,181 ----
* dma_host - 1 if this host has ISA DMA issues (bus doesn't
* expose all of the address lines, so that DMA cannot
* be done from an arbitrary address).
+ * remainder - used to limit size of any given segment to
+ * PAGE_SIZE.
*
* Returns: Count of the number of SG segments for the request.
*
***************
*** 175,185 ****
*/
__inline static int __count_segments(struct request *req,
int use_clustering,
! int dma_host)
{
int ret = 1;
struct buffer_head *bh;
for (bh = req->bh; bh->b_reqnext != NULL; bh = bh->b_reqnext) {
if (use_clustering) {
/*
--- 185,201 ----
*/
__inline static int __count_segments(struct request *req,
int use_clustering,
! int dma_host,
! int * remainder)
{
int ret = 1;
+ int reqsize = 0;
struct buffer_head *bh;
+ if( remainder != NULL ) {
+ reqsize = *remainder;
+ }
+
for (bh = req->bh; bh->b_reqnext != NULL; bh = bh->b_reqnext) {
if (use_clustering) {
/*
***************
*** 191,207 ****
--- 207,249 ----
if (dma_host &&
virt_to_phys(bh->b_data) - 1 == ISA_DMA_THRESHOLD) {
ret++;
+ reqsize = bh->b_size;
} else if (CONTIGUOUS_BUFFERS(bh, bh->b_reqnext)) {
/*
* This one is OK. Let it go.
+ */
+ #ifdef DMA_SEGMENT_SIZE_LIMITED
+ /* Note scsi_malloc is only able to hand out
+ * chunks of memory in sizes of PAGE_SIZE or
+ * less. Thus we need to keep track of
+ * the size of the piece that we have
+ * seen so far, and if we have hit
+ * the limit of PAGE_SIZE, then we are
+ * kind of screwed and we need to start
+ * another segment.
*/
+ if( dma_host
+ && virt_to_phys(bh->b_data) - 1 >=
+ISA_DMA_THRESHOLD
+ && reqsize + bh->b_size > PAGE_SIZE )
+ {
+ ret++;
+ reqsize = bh->b_size;
+ continue;
+ }
+ #endif
+ reqsize += bh->b_size;
continue;
}
ret++;
+ reqsize = bh->b_size;
} else {
ret++;
+ reqsize = bh->b_size;
}
}
+ if( remainder != NULL ) {
+ *remainder = reqsize;
+ }
return ret;
}
***************
*** 239,245 ****
req->nr_segments = __count_segments(req,
CLUSTERABLE_DEVICE(SHpnt, SDpnt),
! SHpnt->unchecked_isa_dma);
}
/*
--- 281,287 ----
req->nr_segments = __count_segments(req,
CLUSTERABLE_DEVICE(SHpnt, SDpnt),
! SHpnt->unchecked_isa_dma, NULL);
}
/*
***************
*** 282,287 ****
--- 324,330 ----
int dma_host)
{
unsigned int sector, count;
+ unsigned int segment_size = 0;
Scsi_Device *SDpnt;
struct Scsi_Host *SHpnt;
***************
*** 334,339 ****
--- 377,393 ----
goto new_segment;
}
if (CONTIGUOUS_BUFFERS(req->bhtail, bh)) {
+ #ifdef DMA_SEGMENT_SIZE_LIMITED
+ if( dma_host
+ && virt_to_phys(bh->b_data) - 1 >=
+ISA_DMA_THRESHOLD ) {
+ segment_size = 0;
+ count = __count_segments(req, use_clustering,
+dma_host, &segment_size);
+ if( segment_size + bh->b_size > PAGE_SIZE )
+ {
+ goto new_segment;
+ }
+ }
+ #endif
/*
* This one is OK. Let it go.
*/
***************
*** 354,359 ****
--- 408,423 ----
goto new_segment;
}
if (CONTIGUOUS_BUFFERS(bh, req->bh)) {
+ #ifdef DMA_SEGMENT_SIZE_LIMITED
+ if( dma_host
+ && virt_to_phys(bh->b_data) - 1 >=
+ISA_DMA_THRESHOLD ) {
+ segment_size = bh->b_size;
+ count = __count_segments(req, use_clustering,
+dma_host, &segment_size);
+ if( count != req->nr_segments ) {
+ goto new_segment;
+ }
+ }
+ #endif
/*
* This one is OK. Let it go.
*/
***************
*** 477,482 ****
--- 541,565 ----
virt_to_phys(req->bhtail->b_data) - 1 == ISA_DMA_THRESHOLD) {
goto dont_combine;
}
+ #ifdef DMA_SEGMENT_SIZE_LIMITED
+ /*
+ * We currently can only allocate scatter-gather bounce
+ * buffers in chunks of PAGE_SIZE or less.
+ */
+ if (dma_host
+ && CONTIGUOUS_BUFFERS(req->bhtail, next->bh)
+ && virt_to_phys(req->bhtail->b_data) - 1 >= ISA_DMA_THRESHOLD )
+ {
+ int segment_size = 0;
+ int count = 0;
+
+ count = __count_segments(req, use_clustering, dma_host,
+&segment_size);
+ count += __count_segments(next, use_clustering, dma_host,
+&segment_size);
+ if( count != req->nr_segments + next->nr_segments ) {
+ goto dont_combine;
+ }
+ }
+ #endif
if (CONTIGUOUS_BUFFERS(req->bhtail, next->bh)) {
/*
* This one is OK. Let it go.
***************
*** 611,617 ****
* First we need to know how many scatter gather segments are needed.
*/
if (!sg_count_valid) {
! count = __count_segments(req, use_clustering, dma_host);
} else {
count = req->nr_segments;
}
--- 694,700 ----
* First we need to know how many scatter gather segments are needed.
*/
if (!sg_count_valid) {
! count = __count_segments(req, use_clustering, dma_host, NULL);
} else {
count = req->nr_segments;
}
***************
*** 672,685 ****
/* Nothing - fall through */
} else if (CONTIGUOUS_BUFFERS(bhprev, bh)) {
/*
! * This one is OK. Let it go.
*/
! sgpnt[count - 1].length += bh->b_size;
! if (!dma_host) {
SCpnt->request_bufflen += bh->b_size;
}
- bhprev = bh;
- continue;
}
}
count++;
--- 755,783 ----
/* Nothing - fall through */
} else if (CONTIGUOUS_BUFFERS(bhprev, bh)) {
/*
! * This one is OK. Let it go. Note that we
! * do not have the ability to allocate
! * bounce buffer segments > PAGE_SIZE, so
! * for now we limit the thing.
*/
! if( dma_host ) {
! #ifdef DMA_SEGMENT_SIZE_LIMITED
! if( sgpnt[count - 1].length + bh->b_size <=
PAGE_SIZE ) {
! sgpnt[count - 1].length += bh->b_size;
! bhprev = bh;
! continue;
! }
! #else
! sgpnt[count - 1].length += bh->b_size;
! bhprev = bh;
! continue;
! #endif
! } else {
! sgpnt[count - 1].length += bh->b_size;
SCpnt->request_bufflen += bh->b_size;
+ bhprev = bh;
+ continue;
}
}
}
count++;
Index: linux/drivers/scsi/scsi_obsolete.c
diff -c linux/drivers/scsi/scsi_obsolete.c:1.1.1.2
linux/drivers/scsi/scsi_obsolete.c:1.4
*** linux/drivers/scsi/scsi_obsolete.c:1.1.1.2 Sat Dec 18 17:49:41 1999
--- linux/drivers/scsi/scsi_obsolete.c Sat Jan 1 21:39:23 2000
***************
*** 1,5 ****
/*
! * scsi.c Copyright (C) 1992 Drew Eckhardt
* Copyright (C) 1993, 1994, 1995 Eric Youngdale
*
* generic mid-level SCSI driver
--- 1,5 ----
/*
! * scsi_obsolete.c Copyright (C) 1992 Drew Eckhardt
* Copyright (C) 1993, 1994, 1995 Eric Youngdale
*
* generic mid-level SCSI driver
***************
*** 87,95 ****
extern int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt);
! extern volatile struct Scsi_Host *host_active;
! #define SCSI_BLOCK(HOST) ((HOST->block && host_active && HOST != host_active) \
! || (HOST->can_queue && HOST->host_busy >= HOST->can_queue))
static unsigned char generic_sense[6] =
{REQUEST_SENSE, 0, 0, 0, 255, 0};
--- 87,93 ----
extern int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt);
! #define SCSI_BLOCK(HOST) (HOST->can_queue && HOST->host_busy >= HOST->can_queue)
static unsigned char generic_sense[6] =
{REQUEST_SENSE, 0, 0, 0, 255, 0};
***************
*** 334,339 ****
--- 332,338 ----
int checked;
int oldto;
struct Scsi_Host *host = SCpnt->host;
+ Scsi_Device * device = SCpnt->device;
int result = SCpnt->result;
SCpnt->serial_number = 0;
SCpnt->serial_number_at_timeout = 0;
***************
*** 655,660 ****
--- 654,660 ----
printk("Calling done function - at address %p\n", SCpnt->done);
#endif
host->host_busy--; /* Indicate that we are free */
+ device->device_busy--; /* Decrement device usage counter. */
SCpnt->result = result | ((exit & 0xff) << 24);
SCpnt->use_sg = SCpnt->old_use_sg;
Index: linux/drivers/scsi/scsi_queue.c
diff -c linux/drivers/scsi/scsi_queue.c:1.1.1.3 linux/drivers/scsi/scsi_queue.c:1.3
*** linux/drivers/scsi/scsi_queue.c:1.1.1.3 Wed Dec 22 10:28:45 1999
--- linux/drivers/scsi/scsi_queue.c Sun Jan 2 23:45:05 2000
***************
*** 133,138 ****
--- 133,145 ----
cmd->bh_next = NULL;
/*
+ * Decrement the counters, since these commands are no longer
+ * active on the host/device.
+ */
+ cmd->host->host_busy--;
+ cmd->device->device_busy--;
+
+ /*
* Insert this command at the head of the queue for it's device.
* It will go before all other commands that are already in the queue.
*/
Index: linux/drivers/scsi/sd.c
diff -c linux/drivers/scsi/sd.c:1.1.1.4 linux/drivers/scsi/sd.c:1.7
*** linux/drivers/scsi/sd.c:1.1.1.4 Thu Dec 30 00:14:02 1999
--- linux/drivers/scsi/sd.c Sun Jan 2 23:45:05 2000
***************
*** 232,238 ****
--- 232,240 ----
{
int dev, devm, block, this_count;
Scsi_Disk *dpnt;
+ #if CONFIG_SCSI_LOGGING
char nbuff[6];
+ #endif
devm = SD_PARTITION(SCpnt->request.rq_dev);
dev = DEVICE_NR(SCpnt->request.rq_dev);
***************
*** 248,254 ****
!dpnt->device->online ||
block + SCpnt->request.nr_sectors > sd[devm].nr_sects) {
SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n",
SCpnt->request.nr_sectors));
- SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
return 0;
}
--- 250,255 ----
***************
*** 259,265 ****
* bit has been reset
*/
/* printk("SCSI disk has been changed. Prohibiting further I/O.\n");
*/
- SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
return 0;
}
SCSI_LOG_HLQUEUE(2, sd_devname(devm, nbuff));
--- 260,265 ----
***************
*** 280,286 ****
if (dpnt->device->sector_size == 1024) {
if ((block & 1) || (SCpnt->request.nr_sectors & 1)) {
printk("sd.c:Bad block number requested");
- SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
return 0;
} else {
block = block >> 1;
--- 280,285 ----
***************
*** 290,296 ****
if (dpnt->device->sector_size == 2048) {
if ((block & 3) || (SCpnt->request.nr_sectors & 3)) {
printk("sd.c:Bad block number requested");
- SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
return 0;
} else {
block = block >> 2;
--- 289,294 ----
***************
*** 300,306 ****
switch (SCpnt->request.cmd) {
case WRITE:
if (!dpnt->device->writeable) {
- SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
return 0;
}
SCpnt->cmnd[0] = WRITE_6;
--- 298,303 ----
***************
*** 520,526 ****
--- 517,525 ----
static void rw_intr(Scsi_Cmnd * SCpnt)
{
int result = SCpnt->result;
+ #if CONFIG_SCSI_LOGGING
char nbuff[6];
+ #endif
int this_count = SCpnt->bufflen >> 9;
int good_sectors = (result == 0 ? this_count : 0);
int block_sectors = 1;
***************
*** 658,677 ****
return retval;
}
- static void sd_wait_cmd(Scsi_Cmnd * SCpnt, const void *cmnd,
- void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
- int timeout, int retries)
- {
- DECLARE_MUTEX_LOCKED(sem);
-
- SCpnt->request.sem = &sem;
- SCpnt->request.rq_status = RQ_SCSI_BUSY;
- scsi_do_cmd(SCpnt, (void *) cmnd,
- buffer, bufflen, done, timeout, retries);
- down(&sem);
- SCpnt->request.sem = NULL;
- }
-
static void sd_init_done(Scsi_Cmnd * SCpnt)
{
struct request *req;
--- 657,662 ----
***************
*** 729,735 ****
SCpnt->sense_buffer[0] = 0;
SCpnt->sense_buffer[2] = 0;
! sd_wait_cmd (SCpnt, (void *) cmd, (void *) buffer,
0/*512*/, sd_init_done, SD_TIMEOUT, MAX_RETRIES);
the_result = SCpnt->result;
--- 714,720 ----
SCpnt->sense_buffer[0] = 0;
SCpnt->sense_buffer[2] = 0;
! scsi_wait_cmd (SCpnt, (void *) cmd, (void *) buffer,
0/*512*/, sd_init_done, SD_TIMEOUT, MAX_RETRIES);
the_result = SCpnt->result;
***************
*** 755,761 ****
SCpnt->sense_buffer[0] = 0;
SCpnt->sense_buffer[2] = 0;
! sd_wait_cmd(SCpnt, (void *) cmd, (void *) buffer,
512, sd_init_done, SD_TIMEOUT,
MAX_RETRIES);
}
spintime = 1;
--- 740,746 ----
SCpnt->sense_buffer[0] = 0;
SCpnt->sense_buffer[2] = 0;
! scsi_wait_cmd(SCpnt, (void *) cmd, (void *) buffer,
512, sd_init_done, SD_TIMEOUT,
MAX_RETRIES);
}
spintime = 1;
***************
*** 785,791 ****
SCpnt->sense_buffer[0] = 0;
SCpnt->sense_buffer[2] = 0;
! sd_wait_cmd(SCpnt, (void *) cmd, (void *) buffer,
8, sd_init_done, SD_TIMEOUT, MAX_RETRIES);
the_result = SCpnt->result;
--- 770,776 ----
SCpnt->sense_buffer[0] = 0;
SCpnt->sense_buffer[2] = 0;
! scsi_wait_cmd(SCpnt, (void *) cmd, (void *) buffer,
8, sd_init_done, SD_TIMEOUT, MAX_RETRIES);
the_result = SCpnt->result;
***************
*** 936,942 ****
SCpnt->sense_buffer[2] = 0;
/* same code as READCAPA !! */
! sd_wait_cmd(SCpnt, (void *) cmd, (void *) buffer,
512, sd_init_done, SD_TIMEOUT, MAX_RETRIES);
the_result = SCpnt->result;
--- 921,927 ----
SCpnt->sense_buffer[2] = 0;
/* same code as READCAPA !! */
! scsi_wait_cmd(SCpnt, (void *) cmd, (void *) buffer,
512, sd_init_done, SD_TIMEOUT, MAX_RETRIES);
the_result = SCpnt->result;
Index: linux/drivers/scsi/sr.c
diff -c linux/drivers/scsi/sr.c:1.1.1.3 linux/drivers/scsi/sr.c:1.3
*** linux/drivers/scsi/sr.c:1.1.1.3 Wed Dec 22 10:28:47 1999
--- linux/drivers/scsi/sr.c Sun Jan 2 23:45:05 2000
***************
*** 267,273 ****
/*
* Umm, yeah, right. Swapping to a cdrom. Nice try.
*/
- SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
return 0;
}
SCSI_LOG_HLQUEUE(1, printk("Doing sr request, dev = %d, block = %d\n", devm,
block));
--- 267,272 ----
***************
*** 276,282 ****
!scsi_CDs[dev].device ||
!scsi_CDs[dev].device->online) {
SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n",
SCpnt->request.nr_sectors));
- SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
return 0;
}
--- 275,280 ----
***************
*** 286,292 ****
* bit has been reset
*/
/* printk("SCSI disk has been changed. Prohibiting further I/O.\n");
*/
- SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
return 0;
}
/*
--- 284,289 ----
***************
*** 300,313 ****
printk("sr: can't switch blocksize: in interrupt\n");
}
if (SCpnt->request.cmd == WRITE) {
- SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
return 0;
}
if (scsi_CDs[dev].device->sector_size == 1024) {
if ((block & 1) || (SCpnt->request.nr_sectors & 1)) {
printk("sr.c:Bad 1K block number requested (%d %ld)",
block, SCpnt->request.nr_sectors);
- SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
return 0;
} else {
block = block >> 1;
--- 297,308 ----
***************
*** 318,324 ****
if ((block & 3) || (SCpnt->request.nr_sectors & 3)) {
printk("sr.c:Bad 2K block number requested (%d %ld)",
block, SCpnt->request.nr_sectors);
- SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
return 0;
} else {
block = block >> 2;
--- 313,318 ----
***************
*** 328,334 ****
switch (SCpnt->request.cmd) {
case WRITE:
if (!scsi_CDs[dev].device->writeable) {
- SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
return 0;
}
SCpnt->cmnd[0] = WRITE_10;
--- 322,327 ----