For those of you on linux-scsi, the 2.3.32-pre3 does contain the
new SCSI queueing code. I have no illusions about this being bug-free,
but I believe that the most serious problems are resolved. Please be
patient as we iron out the remaining difficulties. FWIW, I have been
running this kernel on my development machine for a while now.
There are a couple of known issues you might want to watch out
for:
1) Doug Gilbert is having trouble with deadlocks if he uses sg and
sd at the same time (both pointing to the same device).
2) I have seen strange failures in the new error handling code
whereby the eh_wait field of the host structure gets zeroed by someone as
yet unidentified. The only cases where I have seen this happen are where
I am trying to read a cdrom with many bad sectors on it, and at one point
after having cranked on the disc for a while, the timeout function is
unable to wake up the error handler thread (because the eh_wait semaphore
pointer got nuked).
3) The bare 2.3.32-pre3 does have a bug that comes up if you ever
do get a MEDIUM_ERROR. The scatter-gather tables are not regenerated when
we go back to attempt to read the sectors past the bad sector. This bug
is fixed in the enclosed patchkit.
I am enclosing some diffs against this release. These contain a
bugfix which I mentioned above. I also got around to cleaning up the
locking with the new scsi error handling code - I convinced myself that
there was no reason to be so aggressive about holding io_request_lock
while error recovery is underway (the few races that did exist have been
dealt with, I believe). I can elaborate if anyone has any questions.
-Eric
"The world was a library, and its books were the stones, leaves,
brooks, grass, and the birds of the earth. We learned to do what only
a student of nature ever learns, and that was to feel beauty."
Chief Luther Standing Bear - Teton Sioux
diff -u -r ./drivers/scsi/aha1542.c ../changed/drivers/scsi/aha1542.c
--- ./drivers/scsi/aha1542.c Mon Dec 13 09:23:46 1999
+++ ../changed/drivers/scsi/aha1542.c Sun Dec 12 01:23:31 1999
@@ -46,11 +46,33 @@
#define SCSI_PA(address) virt_to_bus(address)
-#define BAD_DMA(msg, address, length) \
- { \
- printk(KERN_CRIT "%s address %p length %d\n", msg, address, length); \
- panic("Buffer at physical address > 16Mb used for aha1542"); \
- }
+static void BAD_DMA(void * address, unsigned int length)
+{
+ printk(KERN_CRIT "buf vaddress %p paddress 0x%lx length %d\n",
+ address,
+ SCSI_PA(address),
+ length);
+ panic("Buffer at physical address > 16Mb used for aha1542");
+}
+
+static void BAD_SG_DMA(Scsi_Cmnd * SCpnt,
+ struct scatterlist * sgpnt,
+ int nseg,
+ int badseg)
+{
+ printk(KERN_CRIT "sgpnt[%d:%d] addr %p/0x%lx alt %p/0x%lx length %d\n",
+ badseg, nseg,
+ sgpnt[badseg].address,
+ SCSI_PA(sgpnt[badseg].address),
+ sgpnt[badseg].alt_address,
+ sgpnt[badseg].alt_address ? SCSI_PA(sgpnt[badseg].alt_address) : 0,
+ sgpnt[badseg].length);
+
+ /*
+ * Not safe to continue.
+ */
+ panic("Buffer at physical address > 16Mb used for aha1542");
+}
#include<linux/stat.h>
@@ -655,7 +677,7 @@
};
any2scsi(cptr[i].dataptr, SCSI_PA(sgpnt[i].address));
if(SCSI_PA(sgpnt[i].address+sgpnt[i].length-1) > ISA_DMA_THRESHOLD)
- BAD_DMA("sgpnt", sgpnt[i].address, sgpnt[i].length);
+ BAD_SG_DMA(SCpnt, sgpnt, SCpnt->use_sg, i);
any2scsi(cptr[i].datalen, sgpnt[i].length);
};
any2scsi(ccb[mbo].datalen, SCpnt->use_sg * sizeof(struct chain));
@@ -670,7 +692,7 @@
SCpnt->host_scribble = NULL;
any2scsi(ccb[mbo].datalen, bufflen);
if(buff && SCSI_PA(buff+bufflen-1) > ISA_DMA_THRESHOLD)
- BAD_DMA("buff", buff, bufflen);
+ BAD_DMA(buff, bufflen);
any2scsi(ccb[mbo].dataptr, SCSI_PA(buff));
};
ccb[mbo].idlun = (target&7)<<5 | direction | (lun & 7); /*SCSI Target Id*/
diff -u -r ./drivers/scsi/scsi.c ../changed/drivers/scsi/scsi.c
--- ./drivers/scsi/scsi.c Mon Dec 13 09:23:48 1999
+++ ../changed/drivers/scsi/scsi.c Sun Dec 12 10:22:59 1999
@@ -1137,12 +1134,18 @@
* to complete */
atomic_inc(&SCpnt->host->host_active);
+ SCpnt->buffer = NULL;
+ SCpnt->bufflen = 0;
+ SCpnt->request_buffer = NULL;
+ SCpnt->request_bufflen = 0;
+
SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
SCpnt->old_use_sg = 0;
SCpnt->transfersize = 0; /* No default transfer size */
SCpnt->cmd_len = 0;
SCpnt->underflow = 0; /* Do not flag underflow conditions */
+ SCpnt->resid = 0;
SCpnt->state = SCSI_STATE_INITIALIZING;
SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
@@ -1480,6 +1483,7 @@
* etc, etc.
*/
if (!tstatus) {
+ SCpnt->done_late = 1;
return;
}
/* Set the serial numbers back to zero */
diff -u -r ./drivers/scsi/scsi.h ../changed/drivers/scsi/scsi.h
--- ./drivers/scsi/scsi.h Mon Dec 13 09:23:48 1999
+++ ../changed/drivers/scsi/scsi.h Sun Dec 12 10:38:02 1999
@@ -398,6 +398,11 @@
unsigned int *secs);
/*
+ * Prototypes for functions in scsi_merge.c
+ */
+extern void recount_segments(Scsi_Cmnd * SCpnt);
+
+/*
* Prototypes for functions in scsi_lib.c
*/
extern void initialize_merge_fn(Scsi_Device * SDpnt);
@@ -625,6 +630,14 @@
unsigned flags;
+ /*
+ * Used to indicate that a command which has timed out also
+ * completed normally. Typically the completion function will
+ * do nothing but set this flag in this instance because the
+ * timeout handler is already running.
+ */
+ unsigned done_late:1;
+
/*
* These two flags are used to track commands that are in the
* mid-level queue. The idea is that a command can be there for
@@ -634,11 +647,6 @@
*/
unsigned host_wait:1;
unsigned device_wait:1;
-
- /* These variables are for the cdrom only. Once we have variable size
- * buffers in the buffer cache, they will go away. */
- int this_count;
- /* End of special cdrom variables */
/* Low-level done function - can be used by low-level driver to point
* to completion function. Not used by mid/upper level code. */
diff -u -r ./drivers/scsi/scsi_error.c ../changed/drivers/scsi/scsi_error.c
--- ./drivers/scsi/scsi_error.c Mon Dec 13 09:23:48 1999
+++ ../changed/drivers/scsi/scsi_error.c Sun Dec 12 10:41:30 1999
@@ -117,6 +117,8 @@
SCset->eh_timeout.expires = jiffies + timeout;
SCset->eh_timeout.function = (void (*)(unsigned long)) complete;
+ SCset->done_late = 0;
+
SCSI_LOG_ERROR_RECOVERY(5, printk("Adding timer for command %p at %d (%p)\n",
SCset, timeout, complete));
add_timer(&SCset->eh_timeout);
@@ -159,11 +161,14 @@
*
* Returns: Nothing.
*
- * Notes:
+ * Notes: We do not need to lock this. There is the potential for
+ * a race only in that the normal completion handling might
+ * run, but if the normal completion function determines
+ * that the timer has already fired, then it mustn't do
+ * anything.
*/
-static void do_scsi_times_out(Scsi_Cmnd * SCpnt)
+void scsi_times_out(Scsi_Cmnd * SCpnt)
{
-
/*
* Notify the low-level code that this operation failed and we are
* reposessing the command.
@@ -220,19 +225,13 @@
* command that might have failed. If so, wake up the error handler.
*/
if (SCpnt->host->host_busy == SCpnt->host->host_failed) {
+ if( SCpnt->host->eh_wait == NULL ) {
+ panic("Error handler thread not present");
+ }
up(SCpnt->host->eh_wait);
}
}
-void scsi_times_out(Scsi_Cmnd * SCpnt)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
- do_scsi_times_out(SCpnt);
- spin_unlock_irqrestore(&io_request_lock, flags);
-}
-
/*
* Function scsi_block_when_processing_errors
*
@@ -274,11 +273,8 @@
STATIC
void scsi_eh_times_out(Scsi_Cmnd * SCpnt)
{
- unsigned long flags;
int rtn = FAILED;
- spin_lock_irqsave(&io_request_lock, flags);
-
SCpnt->eh_state = SCSI_STATE_TIMEOUT;
SCpnt->owner = SCSI_OWNER_LOWLEVEL;
@@ -298,7 +294,6 @@
up(SCpnt->host->eh_action);
else
printk("Missing scsi error handler thread\n");
- spin_unlock_irqrestore(&io_request_lock, flags);
}
@@ -319,6 +314,20 @@
STATIC
void scsi_eh_done(Scsi_Cmnd * SCpnt)
{
+ int rtn;
+
+ /*
+ * If the timeout handler is already running, then just set the
+ * flag which says we finished late, and return. We have no
+ * way of stopping the timeout handler from running, so we must
+ * always defer to it.
+ */
+ rtn = del_timer(&SCpnt->eh_timeout);
+ if (!rtn) {
+ SCpnt->done_late = 1;
+ return;
+ }
+
SCpnt->request.rq_status = RQ_SCSI_DONE;
SCpnt->owner = SCSI_OWNER_ERROR_HANDLER;
@@ -418,7 +427,7 @@
{REQUEST_SENSE, 0, 0, 0, 255, 0};
unsigned char scsi_result0[256], *scsi_result = NULL;
- ASSERT_LOCK(&io_request_lock, 1);
+ ASSERT_LOCK(&io_request_lock, 0);
memcpy((void *) SCpnt->cmnd, (void *) generic_sense,
sizeof(generic_sense));
@@ -426,7 +435,7 @@
SCpnt->cmnd[1] = SCpnt->lun << 5;
scsi_result = (!SCpnt->host->hostt->unchecked_isa_dma)
- ? &scsi_result0[0] : scsi_init_malloc(512, GFP_ATOMIC | GFP_DMA);
+ ? &scsi_result0[0] : kmalloc(512, GFP_ATOMIC | GFP_DMA);
if (scsi_result == NULL) {
printk("cannot allocate scsi_result in scsi_request_sense.\n");
@@ -455,7 +464,7 @@
sizeof(SCpnt->sense_buffer));
if (scsi_result != &scsi_result0[0] && scsi_result != NULL)
- scsi_init_free(scsi_result, 512);
+ kfree(scsi_result);
/*
* When we eventually call scsi_finish, we really wish to complete
@@ -492,7 +501,7 @@
SCpnt->cmnd[1] = SCpnt->lun << 5;
scsi_result = (!SCpnt->host->hostt->unchecked_isa_dma)
- ? &scsi_result0[0] : scsi_init_malloc(512, GFP_ATOMIC | GFP_DMA);
+ ? &scsi_result0[0] : kmalloc(512, GFP_ATOMIC | GFP_DMA);
if (scsi_result == NULL) {
printk("cannot allocate scsi_result in scsi_test_unit_ready.\n");
@@ -520,7 +529,7 @@
sizeof(SCpnt->sense_buffer));
if (scsi_result != &scsi_result0[0] && scsi_result != NULL)
- scsi_init_free(scsi_result, 512);
+ kfree(scsi_result);
/*
* When we eventually call scsi_finish, we really wish to complete
@@ -582,9 +591,10 @@
*/
STATIC void scsi_send_eh_cmnd(Scsi_Cmnd * SCpnt, int timeout)
{
+ unsigned long flags;
struct Scsi_Host *host;
- ASSERT_LOCK(&io_request_lock, 1);
+ ASSERT_LOCK(&io_request_lock, 0);
host = SCpnt->host;
@@ -608,15 +618,14 @@
SCpnt->host->eh_action = &sem;
SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ spin_lock_irqsave(&io_request_lock, flags);
host->hostt->queuecommand(SCpnt, scsi_eh_done);
- spin_unlock_irq(&io_request_lock);
+ spin_unlock_irqrestore(&io_request_lock, flags);
+
down(&sem);
- spin_lock_irq(&io_request_lock);
SCpnt->host->eh_action = NULL;
- del_timer(&SCpnt->eh_timeout);
-
/*
* See if timeout. If so, tell the host to forget about it.
* In other words, we don't want a callback any more.
@@ -634,7 +643,10 @@
* protection here, since we would end up waiting in the actual low
* level driver, we don't know how to wake it up.
*/
+ spin_lock_irqsave(&io_request_lock, flags);
temp = host->hostt->command(SCpnt);
+ spin_unlock_irqrestore(&io_request_lock, flags);
+
SCpnt->result = temp;
if (scsi_eh_completed_normally(SCpnt)) {
SCpnt->eh_state = SUCCESS;
@@ -734,6 +746,9 @@
*/
STATIC int scsi_try_to_abort_command(Scsi_Cmnd * SCpnt, int timeout)
{
+ int rtn;
+ unsigned long flags;
+
SCpnt->eh_state = FAILED; /* Until we come up with something better */
if (SCpnt->host->hostt->eh_abort_handler == NULL) {
@@ -748,7 +763,10 @@
SCpnt->owner = SCSI_OWNER_LOWLEVEL;
- return SCpnt->host->hostt->eh_abort_handler(SCpnt);
+ spin_lock_irqsave(&io_request_lock, flags);
+ rtn = SCpnt->host->hostt->eh_abort_handler(SCpnt);
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ return rtn;
}
/*
@@ -767,6 +785,7 @@
*/
STATIC int scsi_try_bus_device_reset(Scsi_Cmnd * SCpnt, int timeout)
{
+ unsigned long flags;
int rtn;
SCpnt->eh_state = FAILED; /* Until we come up with something better */
@@ -776,7 +795,9 @@
}
SCpnt->owner = SCSI_OWNER_LOWLEVEL;
+ spin_lock_irqsave(&io_request_lock, flags);
rtn = SCpnt->host->hostt->eh_device_reset_handler(SCpnt);
+ spin_unlock_irqrestore(&io_request_lock, flags);
if (rtn == SUCCESS)
SCpnt->eh_state = SUCCESS;
@@ -796,6 +817,7 @@
*/
STATIC int scsi_try_bus_reset(Scsi_Cmnd * SCpnt)
{
+ unsigned long flags;
int rtn;
SCpnt->eh_state = FAILED; /* Until we come up with something better */
@@ -805,7 +827,10 @@
if (SCpnt->host->hostt->eh_bus_reset_handler == NULL) {
return FAILED;
}
+
+ spin_lock_irqsave(&io_request_lock, flags);
rtn = SCpnt->host->hostt->eh_bus_reset_handler(SCpnt);
+ spin_unlock_irqrestore(&io_request_lock, flags);
if (rtn == SUCCESS)
SCpnt->eh_state = SUCCESS;
@@ -814,9 +839,7 @@
* If we had a successful bus reset, mark the command blocks to expect
* a condition code of unit attention.
*/
- spin_unlock_irq(&io_request_lock);
scsi_sleep(BUS_RESET_SETTLE_TIME);
- spin_lock_irq(&io_request_lock);
if (SCpnt->eh_state == SUCCESS) {
Scsi_Device *SDloop;
for (SDloop = SCpnt->host->host_queue; SDloop; SDloop = SDloop->next)
{
@@ -841,6 +864,7 @@
*/
STATIC int scsi_try_host_reset(Scsi_Cmnd * SCpnt)
{
+ unsigned long flags;
int rtn;
SCpnt->eh_state = FAILED; /* Until we come up with something better */
@@ -850,7 +874,9 @@
if (SCpnt->host->hostt->eh_host_reset_handler == NULL) {
return FAILED;
}
+ spin_lock_irqsave(&io_request_lock, flags);
rtn = SCpnt->host->hostt->eh_host_reset_handler(SCpnt);
+ spin_unlock_irqrestore(&io_request_lock, flags);
if (rtn == SUCCESS)
SCpnt->eh_state = SUCCESS;
@@ -859,9 +885,7 @@
* If we had a successful host reset, mark the command blocks to expect
* a condition code of unit attention.
*/
- spin_unlock_irq(&io_request_lock);
scsi_sleep(HOST_RESET_SETTLE_TIME);
- spin_lock_irq(&io_request_lock);
if (SCpnt->eh_state == SUCCESS) {
Scsi_Device *SDloop;
for (SDloop = SCpnt->host->host_queue; SDloop; SDloop = SDloop->next)
{
@@ -1258,7 +1282,7 @@
Scsi_Cmnd *SCdone;
int timed_out;
- ASSERT_LOCK(&io_request_lock, 1);
+ ASSERT_LOCK(&io_request_lock, 0);
SCdone = NULL;
@@ -1539,14 +1563,7 @@
* is the case, we are worrying about
nothing here.
*/
- /*
- * Due to the spinlock, we will never
get out of this
- * loop without a proper wait (DB)
- */
- spin_unlock_irq(&io_request_lock);
scsi_sleep(1 * HZ);
- spin_lock_irq(&io_request_lock);
-
goto next_device;
}
}
@@ -1638,9 +1655,7 @@
* Due to the spinlock, we will never get out of this
* loop without a proper wait. (DB)
*/
- spin_unlock_irq(&io_request_lock);
scsi_sleep(1 * HZ);
- spin_lock_irq(&io_request_lock);
goto next_device2;
}
@@ -1784,7 +1799,6 @@
struct Scsi_Host *host = (struct Scsi_Host *) data;
int rtn;
DECLARE_MUTEX_LOCKED(sem);
- unsigned long flags;
siginitsetinv(¤t->blocked, SHUTDOWN_SIGS);
@@ -1828,7 +1842,6 @@
SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler waking up\n"));
- spin_lock_irqsave(&io_request_lock, flags);
host->eh_active = 1;
/*
@@ -1843,9 +1856,6 @@
}
host->eh_active = 0;
-
- /* The spinlock is really needed up to this point. (DB) */
- spin_unlock_irqrestore(&io_request_lock, flags);
/*
* Note - if the above fails completely, the action is to take
diff -u -r ./drivers/scsi/scsi_lib.c ../changed/drivers/scsi/scsi_lib.c
--- ./drivers/scsi/scsi_lib.c Mon Dec 13 09:23:48 1999
+++ ../changed/drivers/scsi/scsi_lib.c Sun Dec 12 01:24:36 1999
@@ -383,6 +383,16 @@
scsi_free(SCpnt->buffer, SCpnt->bufflen);
}
}
+
+ /*
+ * Zero these out. They now point to freed memory, and it is
+ * dangerous to hang onto the pointers.
+ */
+ SCpnt->buffer = NULL;
+ SCpnt->bufflen = 0;
+ SCpnt->request_buffer = NULL;
+ SCpnt->request_bufflen = 0;
+
/*
* Next deal with any sectors which we were able to correctly
* handle.
@@ -630,9 +640,14 @@
/*
* Find the actual device driver associated with this command.
* The SPECIAL requests are things like character device or
- * ioctls, which did not originate from ll_rw_blk.
+ * ioctls, which did not originate from ll_rw_blk. Note that
+ * the special field is also used to indicate the SCpnt for
+ * the remainder of a partially fulfilled request that can
+ * come up when there is a medium error. We have to treat
+ * these two cases differently. We differentiate by looking
+ * at request.cmd, as this tells us the real story.
*/
- if (req->special != NULL) {
+ if (req->cmd == SPECIAL) {
STpnt = NULL;
SCpnt = (Scsi_Cmnd *) req->special;
} else {
@@ -643,7 +658,20 @@
/*
* Now try and find a command block that we can use.
*/
- SCpnt = scsi_allocate_device(SDpnt, FALSE);
+ if( req->special != NULL ) {
+ SCpnt = (Scsi_Cmnd *) req->special;
+ /*
+ * We need to recount the number of
+ * scatter-gather segments here - the
+ * normal case code assumes this to be
+ * correct, as it would be a performance
+ * lose to always recount. Handling
+ * errors is always unusual, of course.
+ */
+ recount_segments(SCpnt);
+ } else {
+ SCpnt = scsi_allocate_device(SDpnt, FALSE);
+ }
/*
* If so, we are ready to do something. Bump the count
* while the queue is locked and then break out of the loop.
@@ -689,8 +717,9 @@
* in this queue are for the same device.
*/
q->current_request = req->next;
+ SCpnt->request.next = NULL;
- if (req->special == NULL) {
+ if (req != &SCpnt->request) {
memcpy(&SCpnt->request, req, sizeof(struct request));
/*
@@ -702,13 +731,15 @@
wake_up(&wait_for_request);
}
/*
- * Now it is finally safe to release the lock. We are not going
- * to noodle the request list until this request has been queued
- * and we loop back to queue another.
+ * Now it is finally safe to release the lock. We are
+ * not going to noodle the request list until this
+ * request has been queued and we loop back to queue
+ * another.
*/
+ req = NULL;
spin_unlock_irq(&io_request_lock);
- if (req->special == NULL) {
+ if (SCpnt->request.cmd != SPECIAL) {
/*
* This will do a couple of things:
* 1) Fill in the actual SCSI command.
diff -u -r ./drivers/scsi/scsi_merge.c ../changed/drivers/scsi/scsi_merge.c
--- ./drivers/scsi/scsi_merge.c Mon Dec 13 09:23:48 1999
+++ ../changed/drivers/scsi/scsi_merge.c Sun Dec 12 01:27:27 1999
@@ -77,15 +77,15 @@
* Dump the information that we have. We know we have an
* inconsistency.
*/
- printk("nr_segments is %lx\n", req->nr_segments);
+ printk("nr_segments is %x\n", req->nr_segments);
printk("counted segments is %x\n", segments);
printk("Flags %d %d\n", use_clustering, dma_host);
for (bh = req->bh; bh->b_reqnext != NULL; bh = bh->b_reqnext)
{
- printk("Segment 0x%p, blocks %d, addr 0x%lx\n",
+ printk("Segment 0x%x, blocks %d, addr 0x%x\n",
bh,
bh->b_size >> 9,
- virt_to_phys(bh->b_data - 1));
+ virt_to_phys(bh->b_data) - 1);
}
panic("Ththththaats all folks. Too dangerous to continue.\n");
}
@@ -101,7 +101,7 @@
{ \
__label__ here; \
here: \
- printk("Incorrect segment count at 0x%p", &&here); \
+ printk("Incorrect segment count at 0x%x", &&here); \
dump_stats(req, _CLUSTER, _DMA, __count_segments(req, _CLUSTER, _DMA)); \
}
#else
@@ -155,7 +155,7 @@
* the DMA threshold boundary.
*/
if (dma_host &&
- virt_to_phys(bh->b_data - 1) == ISA_DMA_THRESHOLD) {
+ virt_to_phys(bh->b_data) - 1 == ISA_DMA_THRESHOLD) {
ret++;
} else if (CONTIGUOUS_BUFFERS(bh, bh->b_reqnext)) {
/*
@@ -172,6 +172,43 @@
}
/*
+ * Function: recount_segments()
+ *
+ * Purpose: Recount the number of scatter-gather segments for this request.
+ *
+ * Arguments: req - request that needs recounting.
+ *
+ * Returns: Count of the number of SG segments for the request.
+ *
+ * Lock status: Irrelevant.
+ *
+ * Notes: This is only used when we have partially completed requests
+ * and the bit that is leftover is of an indeterminate size.
+ * This can come up if you get a MEDIUM_ERROR, for example,
+ * as we will have "completed" all of the sectors up to and
+ * including the bad sector, and the leftover bit is what
+ * we have to do now. This tends to be a rare occurence, so
+ * we aren't busting our butts to instantiate separate versions
+ * of this function for the 4 different flag values. We
+ * probably should, however.
+ */
+void
+recount_segments(Scsi_Cmnd * SCpnt)
+{
+ struct request *req;
+ struct Scsi_Host *SHpnt;
+ Scsi_Device * SDpnt;
+
+ req = &SCpnt->request;
+ SHpnt = SCpnt->host;
+ SDpnt = SCpnt->device;
+
+ req->nr_segments = __count_segments(req,
+ CLUSTERABLE_DEVICE(SHpnt, SDpnt),
+ SHpnt->unchecked_isa_dma);
+}
+
+/*
* Function: __scsi_merge_fn()
*
* Purpose: Prototype for queue merge function.
@@ -235,7 +272,7 @@
* the DMA threshold boundary.
*/
if (dma_host &&
- virt_to_phys(req->bhtail->b_data - 1) ==
ISA_DMA_THRESHOLD) {
+ virt_to_phys(req->bhtail->b_data) - 1 ==
+ISA_DMA_THRESHOLD) {
goto new_segment;
}
if (CONTIGUOUS_BUFFERS(req->bhtail, bh)) {
@@ -255,7 +292,7 @@
* the DMA threshold boundary.
*/
if (dma_host &&
- virt_to_phys(bh->b_data - 1) == ISA_DMA_THRESHOLD) {
+ virt_to_phys(bh->b_data) - 1 == ISA_DMA_THRESHOLD) {
goto new_segment;
}
if (CONTIGUOUS_BUFFERS(bh, req->bh)) {
@@ -379,7 +416,7 @@
* the DMA threshold boundary.
*/
if (dma_host &&
- virt_to_phys(req->bhtail->b_data - 1) == ISA_DMA_THRESHOLD) {
+ virt_to_phys(req->bhtail->b_data) - 1 == ISA_DMA_THRESHOLD) {
goto dont_combine;
}
if (CONTIGUOUS_BUFFERS(req->bhtail, next->bh)) {
@@ -572,7 +609,7 @@
bh; bh = bh->b_reqnext) {
if (use_clustering && bhprev != NULL) {
if (dma_host &&
- virt_to_phys(bhprev->b_data - 1) == ISA_DMA_THRESHOLD) {
+ virt_to_phys(bhprev->b_data) - 1 == ISA_DMA_THRESHOLD) {
/* Nothing - fall through */
} else if (CONTIGUOUS_BUFFERS(bhprev, bh)) {
/*
@@ -611,7 +648,7 @@
for (i = 0; i < count; i++) {
SCpnt->request_bufflen += sgpnt[i].length;
if (virt_to_phys(sgpnt[i].address) + sgpnt[i].length - 1 >
- ISA_DMA_THRESHOLD && !sgpnt[count].alt_address) {
+ ISA_DMA_THRESHOLD) {
sgpnt[i].alt_address = sgpnt[i].address;
sgpnt[i].address =
(char *) scsi_malloc(sgpnt[i].length);
diff -u -r ./drivers/scsi/sr.c ../changed/drivers/scsi/sr.c
--- ./drivers/scsi/sr.c Mon Dec 13 09:23:49 1999
+++ ../changed/drivers/scsi/sr.c Fri Dec 10 20:07:10 1999
@@ -302,7 +302,7 @@
}
if (scsi_CDs[dev].device->sector_size == 1024) {
if ((block & 1) || (SCpnt->request.nr_sectors & 1)) {
- printk("sr.c:Bad 1K block number requested (%d %ld)",
+ printk("sr.c:Bad 1K block number requested (%d %d)",
block, SCpnt->request.nr_sectors);
SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
return 0;
@@ -313,7 +313,7 @@
}
if (scsi_CDs[dev].device->sector_size == 2048) {
if ((block & 3) || (SCpnt->request.nr_sectors & 3)) {
- printk("sr.c:Bad 2K block number requested (%d %ld)",
+ printk("sr.c:Bad 2K block number requested (%d %d)",
block, SCpnt->request.nr_sectors);
SCpnt = scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors);
return 0;