[APPENDIX PATCH 01/13] block: don't call __end_that_request_first() for clone

2008-02-15 Thread Kiyoshi Ueda
This patch adds a flag to indicate the request is a clone and
avoids __end_that_request_first() call for cloned requests
in blk_end_io().
So request-based dm can use blk_end_io() to complete clones
while dm doesn't want to complete the data in the clones.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/blk-core.c   |2 +-
 include/linux/blkdev.h |3 +++
 2 files changed, 4 insertions(+), 1 deletion(-)

Index: 2.6.25-rc1/block/blk-core.c
===
--- 2.6.25-rc1.orig/block/blk-core.c
+++ 2.6.25-rc1/block/blk-core.c
@@ -1880,7 +1880,7 @@ static int blk_end_io(struct request *rq
struct request_queue *q = rq-q;
unsigned long flags = 0UL;
 
-   if (blk_fs_request(rq) || blk_pc_request(rq)) {
+   if ((blk_fs_request(rq) || blk_pc_request(rq))  !blk_cloned_rq(rq)) {
if (__end_that_request_first(rq, error, nr_bytes))
return 1;
 
Index: 2.6.25-rc1/include/linux/blkdev.h
===
--- 2.6.25-rc1.orig/include/linux/blkdev.h
+++ 2.6.25-rc1/include/linux/blkdev.h
@@ -115,6 +115,7 @@ enum rq_flag_bits {
__REQ_RW_SYNC,  /* request is sync (O_DIRECT) */
__REQ_ALLOCED,  /* request came from our alloc pool */
__REQ_RW_META,  /* metadata io request */
+   __REQ_CLONED,   /* request is a clone of another request */
__REQ_NR_BITS,  /* stops here */
 };
 
@@ -136,6 +137,7 @@ enum rq_flag_bits {
 #define REQ_RW_SYNC(1  __REQ_RW_SYNC)
 #define REQ_ALLOCED(1  __REQ_ALLOCED)
 #define REQ_RW_META(1  __REQ_RW_META)
+#define REQ_CLONED (1  __REQ_CLONED)
 
 #define BLK_MAX_CDB16
 
@@ -500,6 +502,7 @@ enum {
 #define blk_sorted_rq(rq)  ((rq)-cmd_flags  REQ_SORTED)
 #define blk_barrier_rq(rq) ((rq)-cmd_flags  REQ_HARDBARRIER)
 #define blk_fua_rq(rq) ((rq)-cmd_flags  REQ_FUA)
+#define blk_cloned_rq(rq)  ((rq)-cmd_flags  REQ_CLONED)
 #define blk_bidi_rq(rq)((rq)-next_rq != NULL)
 #define blk_empty_barrier(rq)  (blk_barrier_rq(rq)  blk_fs_request(rq)  
!(rq)-hard_nr_sectors)
 /* rq-queuelist of dequeued request must be list_empty() */
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[APPENDIX PATCH 05/13] dm: remove dead codes

2008-02-15 Thread Kiyoshi Ueda
This patch removes dead codes for the noflush suspend.
No functional change.

This patch is just a clean up of the codes and not functionally
related to request-based dm.  But included here due to literal
dependency.

The dm_queue_flush(md, DM_WQ_FLUSH_ALL, NULL) in dm_suspend()
is never invoked because:
  - The 'goto flush_and_out' is same as 'goto out', because
the 'goto flush_and_out' is called only when '!noflush'
  - If the 'r  noflush' is true, the interrupt check code above
is invoked and 'goto out'

The DM_WQ_FLUSH_ALL type is used only in dm_suspend().
So no need any more.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/md/dm.c |   14 +-
 1 files changed, 1 insertion(+), 13 deletions(-)

Index: 2.6.25-rc1/drivers/md/dm.c
===
--- 2.6.25-rc1.orig/drivers/md/dm.c
+++ 2.6.25-rc1/drivers/md/dm.c
@@ -76,7 +76,6 @@ union map_info *dm_get_mapinfo(struct bi
  */
 struct dm_wq_req {
enum {
-   DM_WQ_FLUSH_ALL,
DM_WQ_FLUSH_DEFERRED,
} type;
struct work_struct work;
@@ -1340,9 +1339,6 @@ static void dm_wq_work(struct work_struc
 
down_write(md-io_lock);
switch (req-type) {
-   case DM_WQ_FLUSH_ALL:
-   __merge_pushback_list(md);
-   /* pass through */
case DM_WQ_FLUSH_DEFERRED:
__flush_deferred_io(md);
break;
@@ -1472,7 +1468,7 @@ int dm_suspend(struct mapped_device *md,
if (!md-suspended_bdev) {
DMWARN(bdget failed in dm_suspend);
r = -ENOMEM;
-   goto flush_and_out;
+   goto out;
}
 
/*
@@ -1523,14 +1519,6 @@ int dm_suspend(struct mapped_device *md,
 
set_bit(DMF_SUSPENDED, md-flags);
 
-flush_and_out:
-   if (r  noflush)
-   /*
-* Because there may be already I/Os in the pushback list,
-* flush them before return.
-*/
-   dm_queue_flush(md, DM_WQ_FLUSH_ALL, NULL);
-
 out:
if (r  md-suspended_bdev) {
bdput(md-suspended_bdev);
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[APPENDIX PATCH 02/13] block: add request submission interface

2008-02-15 Thread Kiyoshi Ueda
This patch adds a generic request submission interface for request
stacking drivers so that request-based dm can use it to submit
clones to underlying devices.

The request may have been made based on limitations of other queues.
So generic limitation checks based on the submitting queue are needed.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/blk-core.c   |   65 +
 include/linux/blkdev.h |1 
 2 files changed, 66 insertions(+)

Index: 2.6.25-rc1/block/blk-core.c
===
--- 2.6.25-rc1.orig/block/blk-core.c
+++ 2.6.25-rc1/block/blk-core.c
@@ -1510,6 +1510,71 @@ void submit_bio(int rw, struct bio *bio)
 }
 EXPORT_SYMBOL(submit_bio);
 
+/*
+ * Check a request for queue limits
+ */
+static int check_queue_limit(struct request_queue *q, struct request *rq)
+{
+   if (rq-nr_sectors  q-max_sectors ||
+   rq-data_len  9  q-max_hw_sectors) {
+   printk(KERN_ERR %s: over max size limit.\n, __func__);
+   return 1;
+   }
+
+   /*
+* queue's settings related to segment counting like q-bounce_pfn
+* may differ from that of other stacking queues.
+* Recalculate it to check the request correctly on this queue's
+* limitation.
+*/
+   blk_recalc_rq_segments(rq);
+   if (rq-nr_phys_segments  q-max_phys_segments ||
+   rq-nr_hw_segments  q-max_hw_segments) {
+   printk(KERN_ERR %s: over max segments limit.\n, __func__);
+   return 1;
+   }
+
+   return 0;
+}
+
+/**
+ * blk_submit_request - Helper for stacking drivers to submit the request
+ * @q:  the queue to submit the request
+ * @rq: the request being queued
+ **/
+void blk_submit_request(struct request_queue *q, struct request *rq)
+{
+   unsigned long flags;
+
+   if (check_queue_limit(q, rq))
+   goto end_io;
+
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+   if (rq-rq_disk  rq-rq_disk-flags  GENHD_FL_FAIL 
+   should_fail(fail_make_request, blk_rq_bytes(rq)))
+   goto end_io;
+#endif
+
+   spin_lock_irqsave(q-queue_lock, flags);
+
+   /*
+* Submitting request must be dequeued before calling this function
+* because it will be linked to another request_queue
+*/
+   BUG_ON(blk_queued_rq(rq));
+
+   drive_stat_acct(rq, 1);
+   __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
+
+   spin_unlock_irqrestore(q-queue_lock, flags);
+
+   return;
+
+end_io:
+   blk_end_request(rq, -EIO, blk_rq_bytes(rq));
+}
+EXPORT_SYMBOL_GPL(blk_submit_request);
+
 /**
  * __end_that_request_first - end I/O on a request
  * @req:  the request being processed
Index: 2.6.25-rc1/include/linux/blkdev.h
===
--- 2.6.25-rc1.orig/include/linux/blkdev.h
+++ 2.6.25-rc1/include/linux/blkdev.h
@@ -616,6 +616,7 @@ extern void blk_end_sync_rq(struct reque
 extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
 extern void blk_insert_request(struct request_queue *, struct request *, int, 
void *);
 extern void blk_requeue_request(struct request_queue *, struct request *);
+extern void blk_submit_request(struct request_queue *q, struct request *rq);
 extern void blk_plug_device(struct request_queue *);
 extern int blk_remove_plug(struct request_queue *);
 extern void blk_recount_segments(struct request_queue *, struct bio *);
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[APPENDIX PATCH 10/13] dm: enable request-based dm

2008-02-15 Thread Kiyoshi Ueda
This patch enables request-based dm.

Request-based dm and bio-based dm coexist.
There are some limitations between them.
  - OK: bio-based dm device on bio-based dm device
  - OK: bio-based dm device on request-based dm device
  - OK: request-based dm device on request-based dm device
  - NG: request-based dm device on bio-based dm device

The type of a dm device is decided at the first table loading time.
Until then, mempool creations and queue initializations are deferred.
Once the type of a dm device is decided, the type can't be changed.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/md/dm-table.c |   54 +
 drivers/md/dm.c   |  154 ++
 drivers/md/dm.h   |6 +
 3 files changed, 179 insertions(+), 35 deletions(-)

Index: 2.6.25-rc1/drivers/md/dm-table.c
===
--- 2.6.25-rc1.orig/drivers/md/dm-table.c
+++ 2.6.25-rc1/drivers/md/dm-table.c
@@ -813,6 +813,55 @@ static int setup_indexes(struct dm_table
return 0;
 }
 
+#define DM_HOOK_AT_REQUEST 0
+#define DM_HOOK_AT_BIO 1
+
+/*
+ * Check the consistency of targets' hook type
+ *
+ * Returns
+ *DM_HOOK_AT_REQUEST: the table is for request-based dm
+ *DM_HOOK_AT_BIO: the table is for bio-based dm
+ *negative  : the table is not consistent
+ */
+static int check_table_hook_type(struct dm_table *t)
+{
+   unsigned int i;
+   unsigned int bio_based = 0, rq_based = 0;
+   struct dm_target *ti;
+
+   for (i = 0; i  t-num_targets; i++) {
+   ti = t-targets + i;
+
+   if (ti-type-map_rq)
+   rq_based = 1;
+   else
+   bio_based = 1;
+
+   if (rq_based  bio_based) {
+   DMERR(Inconsistent table: different target types
+  mixed up);
+   return -EINVAL;
+   }
+   }
+
+   return rq_based ? DM_HOOK_AT_REQUEST : DM_HOOK_AT_BIO;
+}
+
+static int set_md_hook_type(struct dm_table *t)
+{
+   int r = check_table_hook_type(t);
+
+   switch (r) {
+   case DM_HOOK_AT_REQUEST:
+   return dm_set_md_request_based(t-md);
+   case DM_HOOK_AT_BIO:
+   return dm_set_md_bio_based(t-md);
+   default:
+   return r;
+   }
+}
+
 /*
  * Builds the btree to index the map.
  */
@@ -821,6 +870,11 @@ int dm_table_complete(struct dm_table *t
int r = 0;
unsigned int leaf_nodes;
 
+   /* Setup the mapped_device to bio-based dm or request-based dm */
+   r = set_md_hook_type(t);
+   if (r)
+   return r;
+
check_for_valid_limits(t-limits);
 
/* how many indexes will the btree have ? */
Index: 2.6.25-rc1/drivers/md/dm.c
===
--- 2.6.25-rc1.orig/drivers/md/dm.c
+++ 2.6.25-rc1/drivers/md/dm.c
@@ -95,6 +95,7 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
 #define DMF_DELETING 4
 #define DMF_NOFLUSH_SUSPENDING 5
 #define DMF_REQUEST_BASED 6
+#define DMF_BIO_BASED 7
 
 /*
  * Work processed by per-device workqueue.
@@ -1398,6 +1399,115 @@ out:
return r;
 }
 
+static void init_queue(struct request_queue *q, struct mapped_device *md)
+{
+   q-queuedata = md;
+   q-backing_dev_info.congested_fn = dm_any_congested;
+   q-backing_dev_info.congested_data = md;
+   blk_queue_make_request(q, dm_request);
+   blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
+   q-unplug_fn = dm_unplug_all;
+}
+
+int dm_set_md_request_based(struct mapped_device *md)
+{
+   int r = 0;
+
+   if (test_bit(DMF_REQUEST_BASED, md-flags))
+   /* Initialization is already done */
+   return 0;
+
+   if (test_bit(DMF_BIO_BASED, md-flags)) {
+   DMERR(Can't change hook type to request-based from bio-based);
+   return -EINVAL;
+   }
+
+   md-tio_pool = mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
+   if (!md-tio_pool)
+   return -ENOMEM;
+
+   md-queue = blk_init_queue(dm_request_fn, NULL);
+   if (!md-queue) {
+   DMERR(request queue initialization for request-based failed);
+   r = -ENOMEM;
+   goto out_free_tio_pool;
+   }
+
+   md-saved_make_request_fn = md-queue-make_request_fn;
+   init_queue(md-queue, md);
+   set_bit(QUEUE_FLAG_STACKABLE, md-queue-queue_flags);
+   md-disk-queue = md-queue;
+   r = blk_register_queue(md-disk);
+   if (r) {
+   DMERR(registration of request queue failed);
+   goto out_cleanup_queue;
+   }
+
+   set_bit(DMF_REQUEST_BASED, md-flags);
+
+   return 0;
+
+out_cleanup_queue:
+   blk_cleanup_queue(md-queue);
+   md-disk-queue = md-queue = NULL;
+   md-saved_make_request_fn = NULL

[RFC PATCH 3/3] block: lld busy status exporting interface

2008-02-15 Thread Kiyoshi Ueda
This patch adds an interface to check lld's busy status
from the block layer.
(scsi patch is also included just for example.)
This resolves a performance problem on request stacking devices below.


Some drivers like scsi mid layer stop dispatching requests when
they detect busy state on its low-level device like host/bus/device.
It allows the requests to stay in the I/O scheduler's queue
for a chance of merging.

Request stacking drivers like request-based dm should follow
the same logic.
However, there is no generic interface for the stacked device
to check if the underlying device(s) are busy.
If the stacking driver dispatches and submits requests to
the busy underlying device, the requests will stay in
the underlying device's queue without a chance for merging.
This causes performance problem on burst I/O load.

With this patch, busy state of the underlying device is exported
via the queue flag.  So the stacking driver can check it and
stop dispatching requests if busy.
The underlying device driver must set/clear the flag appropriately.

For example, scsi sets the busy flag when low-level devices are not
ready or the low-level driver rejects dispatching command.
And scsi clears the busy flag when a command has been dispatched
successfully, since there may be more spaces to dispatch (the exact
limit check will be done in the next loop for the next request.)

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/scsi/scsi_lib.c |   11 ++-
 include/linux/blkdev.h  |4 
 2 files changed, 14 insertions(+), 1 deletion(-)

Index: 2.6.25-rc1/include/linux/blkdev.h
===
--- 2.6.25-rc1.orig/include/linux/blkdev.h
+++ 2.6.25-rc1/include/linux/blkdev.h
@@ -430,6 +430,7 @@ struct request_queue
 #define QUEUE_FLAG_ELVSWITCH   8   /* don't use elevator, just do FIFO */
 #define QUEUE_FLAG_BIDI9   /* queue supports bidi requests 
*/
 #define QUEUE_FLAG_STACKABLE   10  /* queue supports request stacking */
+#define QUEUE_FLAG_BUSY11  /* device/host under queue is 
busy */
 
 enum {
/*
@@ -477,6 +478,9 @@ enum {
 #define blk_queue_flushing(q)  ((q)-ordseq)
 #define blk_queue_stackable(q) \
test_bit(QUEUE_FLAG_STACKABLE, (q)-queue_flags)
+#define blk_lld_busy(q)test_bit(QUEUE_FLAG_BUSY, 
(q)-queue_flags)
+#define blk_set_lld_busy(q)set_bit(QUEUE_FLAG_BUSY, (q)-queue_flags)
+#define blk_clear_lld_busy(q)  clear_bit(QUEUE_FLAG_BUSY, (q)-queue_flags)
 
 #define blk_fs_request(rq) ((rq)-cmd_type == REQ_TYPE_FS)
 #define blk_pc_request(rq) ((rq)-cmd_type == REQ_TYPE_BLOCK_PC)
Index: 2.6.25-rc1/drivers/scsi/scsi_lib.c
===
--- 2.6.25-rc1.orig/drivers/scsi/scsi_lib.c
+++ 2.6.25-rc1/drivers/scsi/scsi_lib.c
@@ -1448,9 +1448,14 @@ static void scsi_request_fn(struct reque
 * accept it.
 */
req = elv_next_request(q);
-   if (!req || !scsi_dev_queue_ready(q, sdev))
+   if (!req)
break;
 
+   if (!scsi_dev_queue_ready(q, sdev)) {
+   blk_set_lld_busy(q);
+   break;
+   }
+
if (unlikely(!scsi_device_online(sdev))) {
sdev_printk(KERN_ERR, sdev,
rejecting I/O to offline device\n);
@@ -1506,6 +1511,8 @@ static void scsi_request_fn(struct reque
rtn = scsi_dispatch_cmd(cmd);
spin_lock_irq(q-queue_lock);
if(rtn) {
+   blk_set_lld_busy(q);
+
/* we're refusing the command; because of
 * the way locks get dropped, we need to 
 * check here if plugging is required */
@@ -1514,6 +1521,7 @@ static void scsi_request_fn(struct reque
 
break;
}
+   blk_clear_lld_busy(q);
}
 
goto out;
@@ -1530,6 +1538,7 @@ static void scsi_request_fn(struct reque
 * later time.
 */
spin_lock_irq(q-queue_lock);
+   blk_set_lld_busy(q);
blk_requeue_request(q, req);
sdev-device_busy--;
if(sdev-device_busy == 0)
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[APPENDIX PATCH 07/13] dm: add memory pool

2008-02-15 Thread Kiyoshi Ueda
This patch prepares memory pools for request-based dm.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/md/dm.c |   26 +-
 1 files changed, 25 insertions(+), 1 deletion(-)

Index: 2.6.25-rc1/drivers/md/dm.c
===
--- 2.6.25-rc1.orig/drivers/md/dm.c
+++ 2.6.25-rc1/drivers/md/dm.c
@@ -52,6 +52,22 @@ struct dm_target_io {
union map_info info;
 };
 
+/*
+ * For request based dm.
+ * One of these is allocated per request.
+ *
+ * Since assuming original request : cloned request = 1 : 1 and
+ * a counter for number of clones like struct dm_io.io_count isn't needed,
+ * struct dm_io and struct target_io can merge.
+ */
+struct dm_rq_target_io {
+   struct mapped_device *md;
+   struct dm_target *ti;
+   struct request *orig, clone;
+   int error;
+   union map_info info;
+};
+
 union map_info *dm_get_mapinfo(struct bio *bio)
 {
if (bio  bio-bi_private)
@@ -147,6 +163,7 @@ struct mapped_device {
 #define MIN_IOS 256
 static struct kmem_cache *_io_cache;
 static struct kmem_cache *_tio_cache;
+static struct kmem_cache *_rq_tio_cache; /* tio pool for request-based dm */
 
 static int __init local_init(void)
 {
@@ -162,9 +179,13 @@ static int __init local_init(void)
if (!_tio_cache)
goto out_free_io_cache;
 
+   _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
+   if (!_rq_tio_cache)
+   goto out_free_tio_cache;
+
r = dm_uevent_init();
if (r)
-   goto out_free_tio_cache;
+   goto out_free_rq_tio_cache;
 
_major = major;
r = register_blkdev(_major, _name);
@@ -178,6 +199,8 @@ static int __init local_init(void)
 
 out_uevent_exit:
dm_uevent_exit();
+out_free_rq_tio_cache:
+   kmem_cache_destroy(_rq_tio_cache);
 out_free_tio_cache:
kmem_cache_destroy(_tio_cache);
 out_free_io_cache:
@@ -188,6 +211,7 @@ out_free_io_cache:
 
 static void local_exit(void)
 {
+   kmem_cache_destroy(_rq_tio_cache);
kmem_cache_destroy(_tio_cache);
kmem_cache_destroy(_io_cache);
unregister_blkdev(_major, _name);
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[APPENDIX PATCH 04/13] block: export blk_end_io

2008-02-15 Thread Kiyoshi Ueda
This patch exports blk_end_io() so that request-based dm can use it
to complete their clone.

Request-based dm can't use blk_end_request interfaces for their clone,
since their callback is called again.
So another request completion interface which has no stacking hook
is needed.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/blk-core.c   |6 +++---
 include/linux/blkdev.h |3 +++
 2 files changed, 6 insertions(+), 3 deletions(-)

Index: 2.6.25-rc1/block/blk-core.c
===
--- 2.6.25-rc1.orig/block/blk-core.c
+++ 2.6.25-rc1/block/blk-core.c
@@ -1938,9 +1938,8 @@ EXPORT_SYMBOL(end_request);
  * 0 - we are done with this request
  * 1 - this request is not freed yet, it still has pending buffers.
  **/
-static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
- unsigned int bidi_bytes,
- int (drv_callback)(struct request *))
+int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
+  unsigned int bidi_bytes, int (drv_callback)(struct request *))
 {
struct request_queue *q = rq-q;
unsigned long flags = 0UL;
@@ -1967,6 +1966,7 @@ static int blk_end_io(struct request *rq
 
return 0;
 }
+EXPORT_SYMBOL_GPL(blk_end_io);
 
 /**
  * blk_end_request - Helper function for drivers to complete the request.
Index: 2.6.25-rc1/include/linux/blkdev.h
===
--- 2.6.25-rc1.orig/include/linux/blkdev.h
+++ 2.6.25-rc1/include/linux/blkdev.h
@@ -701,6 +701,9 @@ extern int __blk_end_request(struct requ
unsigned int nr_bytes);
 extern int blk_end_bidi_request(struct request *rq, int error,
unsigned int nr_bytes, unsigned int bidi_bytes);
+extern int blk_end_io(struct request *rq, int error,
+ unsigned int nr_bytes, unsigned int bidi_bytes,
+ int (drv_callback)(struct request *));
 extern void blk_async_end_request(struct request *rq, int error);
 extern void end_request(struct request *, int);
 extern void end_queued_request(struct request *, int);
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[APPENDIX PATCH 13/13] dm-mpath: convert to request-based

2008-02-15 Thread Kiyoshi Ueda
This patch converts dm-multipath target to request-based from bio-based.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/md/dm-mpath.c |  228 +-
 drivers/md/dm-rq-record.h |   36 +++
 2 files changed, 204 insertions(+), 60 deletions(-)

Index: 2.6.25-rc1/drivers/md/dm-mpath.c
===
--- 2.6.25-rc1.orig/drivers/md/dm-mpath.c
+++ 2.6.25-rc1/drivers/md/dm-mpath.c
@@ -8,8 +8,7 @@
 #include dm.h
 #include dm-path-selector.h
 #include dm-hw-handler.h
-#include dm-bio-list.h
-#include dm-bio-record.h
+#include dm-rq-record.h
 #include dm-uevent.h
 
 #include linux/ctype.h
@@ -80,7 +79,7 @@ struct multipath {
unsigned pg_init_count; /* Number of times pg_init called */
 
struct work_struct process_queued_ios;
-   struct bio_list queued_ios;
+   struct list_head queued_ios;
unsigned queue_size;
 
struct work_struct trigger_event;
@@ -89,22 +88,22 @@ struct multipath {
 * We must use a mempool of dm_mpath_io structs so that we
 * can resubmit bios on error.
 */
-   mempool_t *mpio_pool;
+   mempool_t *mpio_pool; /* REMOVE ME */
 };
 
 /*
  * Context information attached to each bio we process.
  */
-struct dm_mpath_io {
+struct dm_mpath_io { /* REMOVE ME */
struct pgpath *pgpath;
-   struct dm_bio_details details;
+   struct dm_rq_details details;
 };
 
 typedef int (*action_fn) (struct pgpath *pgpath);
 
 #define MIN_IOS 256/* Mempool size */
 
-static struct kmem_cache *_mpio_cache;
+static struct kmem_cache *_mpio_cache; /* REMOVE ME */
 
 static struct workqueue_struct *kmultipathd;
 static void process_queued_ios(struct work_struct *work);
@@ -174,6 +173,7 @@ static struct multipath *alloc_multipath
m = kzalloc(sizeof(*m), GFP_KERNEL);
if (m) {
INIT_LIST_HEAD(m-priority_groups);
+   INIT_LIST_HEAD(m-queued_ios);
spin_lock_init(m-lock);
m-queue_io = 1;
INIT_WORK(m-process_queued_ios, process_queued_ios);
@@ -304,12 +304,13 @@ static int __must_push_back(struct multi
dm_noflush_suspending(m-ti));
 }
 
-static int map_io(struct multipath *m, struct bio *bio,
+static int map_io(struct multipath *m, struct request *clone,
  struct dm_mpath_io *mpio, unsigned was_queued)
 {
int r = DM_MAPIO_REMAPPED;
unsigned long flags;
struct pgpath *pgpath;
+   struct block_device *bdev;
 
spin_lock_irqsave(m-lock, flags);
 
@@ -326,19 +327,28 @@ static int map_io(struct multipath *m, s
if ((pgpath  m-queue_io) ||
(!pgpath  m-queue_if_no_path)) {
/* Queue for the daemon to resubmit */
-   bio_list_add(m-queued_ios, bio);
+   list_add_tail(clone-queuelist, m-queued_ios);
m-queue_size++;
if ((m-pg_init_required  !m-pg_init_in_progress) ||
!m-queue_io)
queue_work(kmultipathd, m-process_queued_ios);
pgpath = NULL;
+   clone-q = NULL;
+   clone-rq_disk = NULL;
r = DM_MAPIO_SUBMITTED;
-   } else if (pgpath)
-   bio-bi_bdev = pgpath-path.dev-bdev;
-   else if (__must_push_back(m))
+   } else if (pgpath) {
+   bdev = pgpath-path.dev-bdev;
+   clone-q = bdev_get_queue(bdev);
+   clone-rq_disk = bdev-bd_disk;
+   } else if (__must_push_back(m)) {
+   clone-q = NULL;
+   clone-rq_disk = NULL;
r = DM_MAPIO_REQUEUE;
-   else
+   } else {
+   clone-q = NULL;
+   clone-rq_disk = NULL;
r = -EIO;   /* Failed */
+   }
 
mpio-pgpath = pgpath;
 
@@ -378,30 +388,28 @@ static void dispatch_queued_ios(struct m
 {
int r;
unsigned long flags;
-   struct bio *bio = NULL, *next;
struct dm_mpath_io *mpio;
union map_info *info;
+   struct request *clone, *n;
+   LIST_HEAD(cl);
 
spin_lock_irqsave(m-lock, flags);
-   bio = bio_list_get(m-queued_ios);
+   list_splice_init(m-queued_ios, cl);
spin_unlock_irqrestore(m-lock, flags);
 
-   while (bio) {
-   next = bio-bi_next;
-   bio-bi_next = NULL;
+   list_for_each_entry_safe(clone, n, cl, queuelist) {
+   list_del_init(clone-queuelist);
 
-   info = dm_get_mapinfo(bio);
+   info = dm_get_rq_mapinfo(clone);
mpio = info-ptr;
 
-   r = map_io(m, bio, mpio, 1);
+   r = map_io(m, clone, mpio, 1);
if (r  0)
-   bio_endio(bio, r);
+   blk_end_request(clone, r, blk_rq_bytes(clone));
else if (r

[APPENDIX PATCH 09/13] dm: add core functions

2008-02-15 Thread Kiyoshi Ueda
This patch adds core functions for request-based dm.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/md/dm.c |  452 +++-
 drivers/md/dm.h |7 
 2 files changed, 456 insertions(+), 3 deletions(-)

Index: 2.6.25-rc1/drivers/md/dm.c
===
--- 2.6.25-rc1.orig/drivers/md/dm.c
+++ 2.6.25-rc1/drivers/md/dm.c
@@ -75,6 +75,14 @@ union map_info *dm_get_mapinfo(struct bi
return NULL;
 }
 
+union map_info *dm_get_rq_mapinfo(struct request *rq)
+{
+   if (rq  rq-end_io_data)
+   return ((struct dm_rq_target_io *)rq-end_io_data)-info;
+   return NULL;
+}
+EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
+
 #define MINOR_ALLOCED ((void *)-1)
 
 /*
@@ -86,6 +94,7 @@ union map_info *dm_get_mapinfo(struct bi
 #define DMF_FREEING 3
 #define DMF_DELETING 4
 #define DMF_NOFLUSH_SUSPENDING 5
+#define DMF_REQUEST_BASED 6
 
 /*
  * Work processed by per-device workqueue.
@@ -158,6 +167,9 @@ struct mapped_device {
 
/* forced geometry settings */
struct hd_geometry geometry;
+
+   /* For saving the address of __make_request for request based dm */
+   make_request_fn *saved_make_request_fn;
 };
 
 #define MIN_IOS 256
@@ -395,6 +407,17 @@ static void free_tio(struct mapped_devic
mempool_free(tio, md-tio_pool);
 }
 
+static inline struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md)
+{
+   return mempool_alloc(md-tio_pool, GFP_ATOMIC);
+}
+
+static inline void free_rq_tio(struct mapped_device *md,
+  struct dm_rq_target_io *tio)
+{
+   mempool_free(tio, md-tio_pool);
+}
+
 static void start_io_acct(struct dm_io *io)
 {
struct mapped_device *md = io-md;
@@ -583,6 +606,181 @@ static void clone_endio(struct bio *bio,
free_tio(md, tio);
 }
 
+static void __requeue_request(struct request_queue *q, struct request *rq)
+{
+   if (elv_queue_empty(q))
+   blk_plug_device(q);
+   blk_requeue_request(q, rq);
+}
+
+static void requeue_request(struct request_queue *q, struct request *rq)
+{
+   unsigned long flags = 0UL;
+
+   spin_lock_irqsave(q-queue_lock, flags);
+   __requeue_request(q, rq);
+   spin_unlock_irqrestore(q-queue_lock, flags);
+}
+
+static void dec_rq_pending(struct dm_rq_target_io *tio)
+{
+   if (!atomic_dec_return(tio-md-pending))
+   /* nudge anyone waiting on suspend queue */
+   wake_up(tio-md-wait);
+}
+
+static void blk_update_cloned_rq(struct request *rq, struct request *clone)
+{
+   clone-nr_phys_segments = rq-nr_phys_segments;
+   clone-nr_hw_segments = rq-nr_hw_segments;
+   clone-current_nr_sectors = rq-current_nr_sectors;
+   clone-hard_cur_sectors = rq-hard_cur_sectors;
+   clone-hard_nr_sectors = rq-hard_nr_sectors;
+   clone-nr_sectors = rq-nr_sectors;
+   clone-hard_sector = rq-hard_sector;
+   clone-sector = rq-sector;
+   clone-data_len = rq-data_len;
+   clone-buffer = rq-buffer;
+   clone-data = rq-data;
+   clone-bio = rq-bio;
+   clone-biotail = rq-biotail;
+}
+
+static void finish_clone(struct request *clone)
+{
+   if (!clone-q)
+   /*
+* The clone was not dispatched into underlying devices and
+* it means the caller is not underlying device driver,
+* the caller should be dm. (e.g. dispatch_queued_ios() of
+* dm-multipath)
+* So no need to do anything here for this clone.
+*/
+   return;
+
+   /*
+* For just cleaning up the information of the queue in which
+* the clone was dispatched.
+* The clone is *NOT* freed actually here because it is alloced from
+* dm own mempool and REQ_ALLOCED isn't set in clone-cmd_flags.
+*
+* The 'error' and 'nr_bytes' arguments of blk_end_io() don't matter
+* because they aren't used for dm's clones.
+*/
+   if (blk_end_io(clone, 0, 0, 0, NULL))
+   DMWARN(dm ignores the immediate return request of callback.);
+}
+
+static void clean_clone(struct request *clone)
+{
+   finish_clone(clone);
+   clone-special = NULL;
+   clone-errors = 0;
+   clone-endio_error = 0;
+}
+
+/**
+ * Must be called without the queue lock
+ **/
+static int clone_end_request(struct request *clone, int error,
+unsigned int nr_bytes, unsigned int bidi_bytes,
+int (drv_callback)(struct request *))
+{
+   int r = 0, rw = rq_data_dir(clone), requeued = 0;
+   struct dm_rq_target_io *tio = clone-end_io_data;
+   dm_request_endio_first_fn endio_first = tio-ti-type-rq_end_io_first;
+   dm_request_endio_fn endio = tio-ti-type-rq_end_io;
+   dm_request_queue_in_tgt_fn queue_in_tgt = tio-ti-type-queue_in_tgt

[APPENDIX PATCH 08/13] dm: add target interfaces

2008-02-15 Thread Kiyoshi Ueda
This patch adds target interfaces for request-based dm.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 include/linux/device-mapper.h |   23 +++
 1 files changed, 23 insertions(+)

Index: 2.6.25-rc1/include/linux/device-mapper.h
===
--- 2.6.25-rc1.orig/include/linux/device-mapper.h
+++ 2.6.25-rc1/include/linux/device-mapper.h
@@ -10,6 +10,8 @@
 
 #ifdef __KERNEL__
 
+struct request;
+struct request_queue;
 struct dm_target;
 struct dm_table;
 struct dm_dev;
@@ -45,6 +47,9 @@ typedef void (*dm_dtr_fn) (struct dm_tar
 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio,
  union map_info *map_context);
 
+typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
+ union map_info *map_context);
+
 /*
  * Returns:
  *  0 : error (currently ignored)
@@ -57,6 +62,18 @@ typedef int (*dm_endio_fn) (struct dm_ta
struct bio *bio, int error,
union map_info *map_context);
 
+typedef int (*dm_request_endio_first_fn) (struct dm_target *ti,
+ struct request *clone, int error,
+ union map_info *map_context);
+
+typedef int (*dm_request_endio_fn) (struct dm_target *ti,
+   struct request *clone, int error,
+   union map_info *map_context);
+
+typedef void (*dm_request_queue_in_tgt_fn) (struct dm_target *ti,
+   struct request *clone,
+   union map_info *map_context);
+
 typedef void (*dm_flush_fn) (struct dm_target *ti);
 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
 typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
@@ -71,6 +88,7 @@ typedef int (*dm_message_fn) (struct dm_
 typedef int (*dm_ioctl_fn) (struct dm_target *ti, struct inode *inode,
struct file *filp, unsigned int cmd,
unsigned long arg);
+typedef int (*dm_congested_fn) (struct dm_target *ti);
 
 void dm_error(const char *message);
 
@@ -98,7 +116,11 @@ struct target_type {
dm_ctr_fn ctr;
dm_dtr_fn dtr;
dm_map_fn map;
+   dm_map_request_fn map_rq;
dm_endio_fn end_io;
+   dm_request_endio_first_fn rq_end_io_first;
+   dm_request_endio_fn rq_end_io;
+   dm_request_queue_in_tgt_fn queue_in_tgt;
dm_flush_fn flush;
dm_presuspend_fn presuspend;
dm_postsuspend_fn postsuspend;
@@ -107,6 +129,7 @@ struct target_type {
dm_status_fn status;
dm_message_fn message;
dm_ioctl_fn ioctl;
+   dm_congested_fn congested;
 };
 
 struct io_restrictions {
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH 2/3] block: move internal request completion to kblockd

2008-02-15 Thread Kiyoshi Ueda
This patch eliminates the use of __blk_end_request() and
end_queued_request() in the block layer.

On the current request stacking design, drivers are not ready
for request stacking if they hold the queue lock when completing
request.
However, some block layer functions are doing that, and the block
layer is not ready for request stacking now.

To complete all requests without the queue lock, this patch uses
kblockd in such cases.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/blk-barrier.c|8 ++--
 block/blk-core.c   |   49 +
 block/blk-settings.c   |2 ++
 block/blk.h|2 ++
 block/elevator.c   |4 ++--
 include/linux/blkdev.h |   25 +
 6 files changed, 82 insertions(+), 8 deletions(-)

Index: 2.6.25-rc1/include/linux/blkdev.h
===
--- 2.6.25-rc1.orig/include/linux/blkdev.h
+++ 2.6.25-rc1/include/linux/blkdev.h
@@ -236,6 +236,23 @@ struct request {
 
/* for bidi */
struct request *next_rq;
+
+   /*
+* For calling the request completion interface without the queue lock
+* using workqueue.
+*
+* The work handler needs to know the error code and the completion
+* size of the request to complete it.
+* We don't need to pass the completion size to the work handler,
+* because the workqueue completion method doesn't allow partial
+* completion and the work handler can use the whole size of
+* the request.
+* For the error code, we need to pass it to the work handler because
+* no member in the struct request can be used for the purpose.
+* (-errors should not be used, because the upper layer may expect
+*  that driver-specific error codes is there.)
+*/
+   int endio_error;
 };
 
 /*
@@ -393,6 +410,13 @@ struct request_queue
 #if defined(CONFIG_BLK_DEV_BSG)
struct bsg_class_device bsg_dev;
 #endif
+
+   /*
+* For request completion without queue lock.
+* The workqueue completion method doesn't allow partial completion.
+*/
+   struct work_struct  endio_work;
+   struct list_headendio_list;
 };
 
 #define QUEUE_FLAG_CLUSTER 0   /* cluster several segments into 1 */
@@ -669,6 +693,7 @@ extern int __blk_end_request(struct requ
unsigned int nr_bytes);
 extern int blk_end_bidi_request(struct request *rq, int error,
unsigned int nr_bytes, unsigned int bidi_bytes);
+extern void blk_async_end_request(struct request *rq, int error);
 extern void end_request(struct request *, int);
 extern void end_queued_request(struct request *, int);
 extern void end_dequeued_request(struct request *, int);
Index: 2.6.25-rc1/block/elevator.c
===
--- 2.6.25-rc1.orig/block/elevator.c
+++ 2.6.25-rc1/block/elevator.c
@@ -721,7 +721,7 @@ struct request *elv_next_request(struct 
 * not ever see it.
 */
if (blk_empty_barrier(rq)) {
-   end_queued_request(rq, 1);
+   blk_async_end_request(rq, 0);
continue;
}
if (!(rq-cmd_flags  REQ_STARTED)) {
@@ -788,7 +788,7 @@ struct request *elv_next_request(struct 
break;
} else if (ret == BLKPREP_KILL) {
rq-cmd_flags |= REQ_QUIET;
-   end_queued_request(rq, 0);
+   blk_async_end_request(rq, -EIO);
} else {
printk(KERN_ERR %s: bad return=%d\n, __FUNCTION__,
ret);
Index: 2.6.25-rc1/block/blk-barrier.c
===
--- 2.6.25-rc1.orig/block/blk-barrier.c
+++ 2.6.25-rc1/block/blk-barrier.c
@@ -108,8 +108,7 @@ void blk_ordered_complete_seq(struct req
q-ordseq = 0;
rq = q-orig_bar_rq;
 
-   if (__blk_end_request(rq, q-orderr, blk_rq_bytes(rq)))
-   BUG();
+   blk_async_end_request(rq, q-orderr);
 }
 
 static void pre_flush_end_io(struct request *rq, int error)
@@ -225,10 +224,7 @@ int blk_do_ordered(struct request_queue 
 * This can happen when the queue switches to
 * ORDERED_NONE while this request is on it.
 */
-   blkdev_dequeue_request(rq);
-   if (__blk_end_request(rq, -EOPNOTSUPP,
- blk_rq_bytes(rq)))
-   BUG();
+   blk_async_end_request(rq, -EOPNOTSUPP);
*rqp = NULL

[APPENDIX PATCH 03/13] block: export blk_register_queue

2008-02-15 Thread Kiyoshi Ueda
This patch exports blk_register_queue().

Request-based dm and bio-based dm will coexist, since there are
some target drivers which are more fitting to bio-based dm.
dm decides the hook type for a dm device and initializes the queue,
when a table is loaded to the dm device, not device creation time.
Then, request-based dm sets q-request_fn and wants to register
the queue correctly again.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/blk-sysfs.c |1 +
 1 files changed, 1 insertion(+)

Index: 2.6.25-rc1/block/blk-sysfs.c
===
--- 2.6.25-rc1.orig/block/blk-sysfs.c
+++ 2.6.25-rc1/block/blk-sysfs.c
@@ -295,6 +295,7 @@ int blk_register_queue(struct gendisk *d
 
return 0;
 }
+EXPORT_SYMBOL_GPL(blk_register_queue);
 
 void blk_unregister_queue(struct gendisk *disk)
 {
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[APPENDIX PATCH 12/13] dm-mpath: add hw-handler interface

2008-02-15 Thread Kiyoshi Ueda
This patch adds a hw-handler interface for request-based dm-multipath.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/md/dm-hw-handler.h |1 +
 1 files changed, 1 insertion(+)

Index: 2.6.25-rc1/drivers/md/dm-hw-handler.h
===
--- 2.6.25-rc1.orig/drivers/md/dm-hw-handler.h
+++ 2.6.25-rc1/drivers/md/dm-hw-handler.h
@@ -35,6 +35,7 @@ struct hw_handler_type {
void (*pg_init) (struct hw_handler *hwh, unsigned bypassed,
 struct dm_path *path);
unsigned (*error) (struct hw_handler *hwh, struct bio *bio);
+   unsigned (*error_rq) (struct hw_handler *hwh, struct request *rq);
int (*status) (struct hw_handler *hwh, status_type_t type,
   char *result, unsigned int maxlen);
 };
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[APPENDIX PATCH 06/13] dm: tidy local_init

2008-02-15 Thread Kiyoshi Ueda
This patch tidies local_init() as preparation for request-based dm.
No functional change.

This patch is just a clean up of the codes and not functionally
related to request-based dm.  But included here due to literal
dependency.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/md/dm.c |   34 +-
 1 files changed, 17 insertions(+), 17 deletions(-)

Index: 2.6.25-rc1/drivers/md/dm.c
===
--- 2.6.25-rc1.orig/drivers/md/dm.c
+++ 2.6.25-rc1/drivers/md/dm.c
@@ -150,40 +150,40 @@ static struct kmem_cache *_tio_cache;
 
 static int __init local_init(void)
 {
-   int r;
+   int r = -ENOMEM;
 
/* allocate a slab for the dm_ios */
_io_cache = KMEM_CACHE(dm_io, 0);
if (!_io_cache)
-   return -ENOMEM;
+   return r;
 
/* allocate a slab for the target ios */
_tio_cache = KMEM_CACHE(dm_target_io, 0);
-   if (!_tio_cache) {
-   kmem_cache_destroy(_io_cache);
-   return -ENOMEM;
-   }
+   if (!_tio_cache)
+   goto out_free_io_cache;
 
r = dm_uevent_init();
-   if (r) {
-   kmem_cache_destroy(_tio_cache);
-   kmem_cache_destroy(_io_cache);
-   return r;
-   }
+   if (r)
+   goto out_free_tio_cache;
 
_major = major;
r = register_blkdev(_major, _name);
-   if (r  0) {
-   kmem_cache_destroy(_tio_cache);
-   kmem_cache_destroy(_io_cache);
-   dm_uevent_exit();
-   return r;
-   }
+   if (r  0)
+   goto out_uevent_exit;
 
if (!_major)
_major = r;
 
return 0;
+
+out_uevent_exit:
+   dm_uevent_exit();
+out_free_tio_cache:
+   kmem_cache_destroy(_tio_cache);
+out_free_io_cache:
+   kmem_cache_destroy(_io_cache);
+
+   return r;
 }
 
 static void local_exit(void)
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[APPENDIX PATCH 11/13] dm: reject bad table load

2008-02-15 Thread Kiyoshi Ueda
This patch rejects bad table load for request-based dm.

The following table loadings are rejected:
  - including non-stackable device
  - shrinking the current restrictions

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/md/dm-table.c |   48 --
 drivers/md/dm.c   |   25 +
 include/linux/device-mapper.h |9 +++
 3 files changed, 80 insertions(+), 2 deletions(-)

Index: 2.6.25-rc1/drivers/md/dm-table.c
===
--- 2.6.25-rc1.orig/drivers/md/dm-table.c
+++ 2.6.25-rc1/drivers/md/dm-table.c
@@ -108,6 +108,8 @@ static void combine_restrictions_low(str
lhs-bounce_pfn = min_not_zero(lhs-bounce_pfn, rhs-bounce_pfn);
 
lhs-no_cluster |= rhs-no_cluster;
+
+   lhs-no_stack |= rhs-no_stack;
 }
 
 /*
@@ -578,6 +580,8 @@ void dm_set_device_limits(struct dm_targ
rs-bounce_pfn = min_not_zero(rs-bounce_pfn, q-bounce_pfn);
 
rs-no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, q-queue_flags);
+
+   rs-no_stack |= !blk_queue_stackable(q);
 }
 EXPORT_SYMBOL_GPL(dm_set_device_limits);
 
@@ -704,8 +708,13 @@ int dm_split_args(int *argc, char ***arg
return 0;
 }
 
-static void check_for_valid_limits(struct io_restrictions *rs)
+static int check_for_valid_limits(struct io_restrictions *rs,
+ struct mapped_device *md)
 {
+   int r = 0;
+   struct request_queue *q;
+
+   /* Set maximum value if no restriction */
if (!rs-max_sectors)
rs-max_sectors = SAFE_MAX_SECTORS;
if (!rs-max_hw_sectors)
@@ -722,6 +731,39 @@ static void check_for_valid_limits(struc
rs-seg_boundary_mask = -1;
if (!rs-bounce_pfn)
rs-bounce_pfn = -1;
+
+   /* Request-based dm allows to load only request stackable tables */
+   if (dm_request_based(md)  rs-no_stack) {
+   DMERR(table load rejected: including non-stackable devices);
+   return -EINVAL;
+   }
+
+   /* First table loading must be allowed */
+   if (!dm_request_based(md) || !dm_bound_table(md))
+   return 0;
+
+   q  = dm_get_queue(md);
+   if (!q) {
+   DMERR(can't get queue from the mapped device);
+   return -EINVAL;
+   }
+
+   if ((rs-max_sectors  q-max_sectors) ||
+   (rs-max_hw_sectors  q-max_hw_sectors) ||
+   (rs-max_phys_segments  q-max_phys_segments) ||
+   (rs-max_hw_segments  q-max_hw_segments) ||
+   (rs-hardsect_size  q-hardsect_size) ||
+   (rs-max_segment_size  q-max_segment_size) ||
+   (rs-seg_boundary_mask  q-seg_boundary_mask) ||
+   (rs-bounce_pfn  q-bounce_pfn) ||
+   (rs-no_cluster  test_bit(QUEUE_FLAG_CLUSTER, q-queue_flags))) {
+   DMERR(table load rejected: shrinking current restriction);
+   r = -EINVAL;
+   }
+
+   dm_put_queue(q);
+
+   return r;
 }
 
 int dm_table_add_target(struct dm_table *t, const char *type,
@@ -875,7 +917,9 @@ int dm_table_complete(struct dm_table *t
if (r)
return r;
 
-   check_for_valid_limits(t-limits);
+   r = check_for_valid_limits(t-limits, t-md);
+   if (r)
+   return r;
 
/* how many indexes will the btree have ? */
leaf_nodes = dm_div_up(t-num_targets, KEYS_PER_NODE);
Index: 2.6.25-rc1/drivers/md/dm.c
===
--- 2.6.25-rc1.orig/drivers/md/dm.c
+++ 2.6.25-rc1/drivers/md/dm.c
@@ -96,6 +96,7 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
 #define DMF_NOFLUSH_SUSPENDING 5
 #define DMF_REQUEST_BASED 6
 #define DMF_BIO_BASED 7
+#define DMF_BOUND_TABLE 8
 
 /*
  * Work processed by per-device workqueue.
@@ -1672,6 +1673,7 @@ static int __bind(struct mapped_device *
write_lock(md-map_lock);
md-map = t;
dm_table_set_restrictions(t, q);
+   set_bit(DMF_BOUND_TABLE, md-flags);
write_unlock(md-map_lock);
 
return 0;
@@ -1912,6 +1914,19 @@ static void start_queue(struct request_q
spin_unlock_irqrestore(q-queue_lock, flags);
 }
 
+struct request_queue *dm_get_queue(struct mapped_device *md)
+{
+   if (blk_get_queue(md-queue))
+   return NULL;
+
+   return md-queue;
+}
+
+void dm_put_queue(struct request_queue *q)
+{
+   blk_put_queue(q);
+}
+
 /*
  * Functions to lock and unlock any filesystem running on the
  * device.
@@ -2174,6 +2189,16 @@ int dm_suspended(struct mapped_device *m
return test_bit(DMF_SUSPENDED, md-flags);
 }
 
+int dm_request_based(struct mapped_device *md)
+{
+   return test_bit(DMF_REQUEST_BASED, md-flags);
+}
+
+int dm_bound_table(struct mapped_device *md)
+{
+   return test_bit(DMF_BOUND_TABLE, md-flags);
+}
+
 int dm_noflush_suspending(struct dm_target *ti

[RFC PATCH 1/3] block: add rq-complete_io hook for request stacking

2008-02-15 Thread Kiyoshi Ueda
This patch adds -complete_io() hook for request stacking.
Request stacking drivers (such as request-based dm) can set
a callback for completion.
(The hook is not called in blk_end_io(), since request-based dm uses
 it for clone completion in the following appendix patches.)

For the stacking to work without deadlock between stacking devices,
both the submission function and the completion function must be
called without the queue lock.
So the stacking is not available for __blk_end_request(), which
is called with the queue lock held.
The patch adds a queue flag (QUEUE_FLAG_STACKABLE) and a check to
make sure that the stacking is not enabled in drivers calling
__blk_end_request().

It means that only scsi mid-layer, cciss and i2o are ready for
the stacking.
I believe current dm-multipath users are using scsi, cciss and dasd.
So at least, dasd needs to be changed not to use __blk_end_request()
for request-based dm-multipath.



Below is the detailed explanation why the stacking is not possible
for drivers using __blk_end_request().

If the completion function is called with the queue lock held,
we have 2 deadlock problems:
  a). when the stacking driver completes the hooked request
  b). when the stacking driver submits other requests in that
  completion context

  Suppose we have the following stack:
 
||
| stacking device: DEV#2 |
||
 
||
| real device: DEV#1 |
||

  For example of a):
1. The device driver takes the queue lock for DEV#1
2. The device driver calls __blk_end_request() for the request
   and it is hooked by the stacking driver
3. The stacking driver tries to take the queue lock for DEV#1
   to complete the hooked request
= deadlock happens on the queue lock for DEV#1

  For example of b): Assume the a) is worked-around by something
1. The device driver takes the queue lock for DEV#1
2. The device driver calls __blk_end_request() for the request
   and it is hooked by the stacking driver
3. The stacking driver completes the hooked request
4. The stacking driver dequeues the next request from DEV#2
   to dispatch quickly due to some performance reasons
5. The stacking driver tries to take the queue lock for DEV#1
   to submit the next request
= deadlock happens on the queue lock for DEV#1

To prevent such deadlock problems on the stacking, I'd like to say
that drivers which hold the queue lock when completing request are
not ready for the stacking.
So drivers using __blk_end_request() (and end_[queued|dequeued_]request())
need to be modified in this request stacking design, if we need to
use such devices for the stacking.

To prevent request stacking drivers from using such unstackable
devices, a queue flag, QUEUE_FLAG_STACKABLE, is added.
And device drivers can set it to be used by request stacking drivers.
(scsi patch is included just for an example of the setting.)
To detect wrong use of the flag and the hook, some checks are also
put in __blk_end_request().


Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/blk-core.c|   31 +++
 drivers/scsi/scsi_lib.c |7 +++
 include/linux/blkdev.h  |7 +++
 3 files changed, 45 insertions(+)

Index: 2.6.25-rc1/block/blk-core.c
===
--- 2.6.25-rc1.orig/block/blk-core.c
+++ 2.6.25-rc1/block/blk-core.c
@@ -138,6 +138,7 @@ void rq_init(struct request_queue *q, st
rq-data = NULL;
rq-sense = NULL;
rq-end_io = NULL;
+   rq-complete_io = NULL;
rq-end_io_data = NULL;
rq-next_rq = NULL;
 }
@@ -1917,6 +1918,9 @@ static int blk_end_io(struct request *rq
  **/
 int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
 {
+   if (rq-complete_io)
+   return rq-complete_io(rq, error, nr_bytes, 0, NULL);
+
return blk_end_io(rq, error, nr_bytes, 0, NULL);
 }
 EXPORT_SYMBOL_GPL(blk_end_request);
@@ -1936,6 +1940,27 @@ EXPORT_SYMBOL_GPL(blk_end_request);
  **/
 int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
 {
+   if (unlikely(blk_queue_stackable(rq-q)))
+   printk(KERN_WARNING dev: %s: Not ready for request stacking, 
+  but the device driver set it stackable. 
+  Need to fix the device driver!\n,
+  rq-rq_disk ? rq-rq_disk-disk_name : ?);
+
+   if (unlikely(rq-complete_io)) {
+   /*
+* If we invoke the -complete_io here, the request submitter's
+* handler would get deadlock on the queue lock.
+*
+* This happens, when the request submitter didn't check
+* whether the queue is stackable

Re: [PATCH 20/30] blk_end_request: changing xsysace (take 4)

2007-12-12 Thread Kiyoshi Ueda
Hi,

On Wed, 12 Dec 2007 11:09:12 +0200, Boaz Harrosh [EMAIL PROTECTED] wrote:
  Index: 2.6.24-rc4/drivers/block/xsysace.c
  ===
  --- 2.6.24-rc4.orig/drivers/block/xsysace.c
  +++ 2.6.24-rc4/drivers/block/xsysace.c
  @@ -703,7 +703,7 @@ static void ace_fsm_dostate(struct ace_d
   
  /* bio finished; is there another one? */
  i = ace-req-current_nr_sectors;
  -   if (end_that_request_first(ace-req, 1, i)) {
  +   if (__blk_end_request(ace-req, 0, i)) {

 end_that_request_first() took sectors __blk_end_request() now takes
 bytes

Thank you for pointing it out!  And I'm very sorry for the bug.
I have checked all conversions between sectors and bytes through
all patches again, and I found no other miss conversions.

Below is the revised patch for xsysace.

Thanks,
Kiyoshi Ueda


Subject: [PATCH 20/30] blk_end_request: changing xsysace (take 4)

This patch converts xsysace to use blk_end_request interfaces.
Related 'uptodate' arguments are converted to 'error'.

xsysace is a little bit different from normal drivers.
xsysace driver has a state machine in it.
It calls end_that_request_first() and end_that_request_last()
from different states. (ACE_FSM_STATE_REQ_TRANSFER and
ACE_FSM_STATE_REQ_COMPLETE, respectively.)

However, those states are consecutive and without any interruption
inbetween.
So we can just follow the standard conversion rule (b) mentioned in
the patch subject [PATCH 01/30] blk_end_request: add new request
completion interface.


In ace_fsm_dostate(), the variable 'i' was used only for passing
sector size of the request to end_that_request_first().
So I removed it and changed the code to pass the size in bytes
directly to __blk_end_request().

Cc: Grant Likely [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/xsysace.c |9 ++---
 1 files changed, 2 insertions(+), 7 deletions(-)

Index: 2.6.24-rc4/drivers/block/xsysace.c
===
--- 2.6.24-rc4.orig/drivers/block/xsysace.c
+++ 2.6.24-rc4/drivers/block/xsysace.c
@@ -483,7 +483,6 @@ static void ace_fsm_dostate(struct ace_d
u32 status;
u16 val;
int count;
-   int i;
 
 #if defined(DEBUG)
dev_dbg(ace-dev, fsm_state=%i, id_req_count=%i\n,
@@ -688,7 +687,6 @@ static void ace_fsm_dostate(struct ace_d
}
 
/* Transfer the next buffer */
-   i = 16;
if (ace-fsm_task == ACE_TASK_WRITE)
ace-reg_ops-dataout(ace);
else
@@ -702,8 +700,8 @@ static void ace_fsm_dostate(struct ace_d
}
 
/* bio finished; is there another one? */
-   i = ace-req-current_nr_sectors;
-   if (end_that_request_first(ace-req, 1, i)) {
+   if (__blk_end_request(ace-req, 0,
+ blk_rq_cur_bytes(ace-req))) {
/* dev_dbg(ace-dev, next block; h=%li c=%i\n,
 *  ace-req-hard_nr_sectors,
 *  ace-req-current_nr_sectors);
@@ -718,9 +716,6 @@ static void ace_fsm_dostate(struct ace_d
break;
 
case ACE_FSM_STATE_REQ_COMPLETE:
-   /* Complete the block request */
-   blkdev_dequeue_request(ace-req);
-   end_that_request_last(ace-req, 1);
ace-req = NULL;
 
/* Finished request; go to idle state */
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 21/30] blk_end_request: changing cciss (take 4)

2007-12-12 Thread Kiyoshi Ueda
Hi Mike,

On Wed, 12 Dec 2007 15:25:10 +, Miller, Mike (OS Dev) [EMAIL PROTECTED] 
wrote:
  Index: 2.6.24-rc4/drivers/block/cciss.c
  ===
  --- 2.6.24-rc4.orig/drivers/block/cciss.c
  +++ 2.6.24-rc4/drivers/block/cciss.c
snip
  +2526,6 @@ after_error_processing:
  }
  cmd-rq-data_len = 0;
  cmd-rq-completion_data = cmd;
  -   blk_add_trace_rq(cmd-rq-q, cmd-rq, BLK_TA_COMPLETE);
 
 Why is this removed?

Sorry for the less explanation.

Because it is done in __end_that_request_first() called from
blk_end_request().
I'll add the explanation to the patch description when I update
the patch.

Thanks,
Kiyoshi Ueda
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 12/30] blk_end_request: changing ub (take 4)

2007-12-12 Thread Kiyoshi Ueda
Hi Pete,

On Tue, 11 Dec 2007 15:48:03 -0800, Pete Zaitcev [EMAIL PROTECTED] wrote:
  if (scsi_status == 0) {
  -   uptodate = 1;
  +   error = 0;
  } else {
  -   uptodate = 0;
  +   error = -EIO;
  rq-errors = scsi_status;
  }
  -   end_that_request_first(rq, uptodate, rq-hard_nr_sectors);
  -   end_that_request_last(rq, uptodate);
  +   if (__blk_end_request(rq, error, blk_rq_bytes(rq)))
  +   BUG();
 
 Acked-by: Pete Zaitcev [EMAIL PROTECTED]
 
 I follow the discussion, actually, and wanted to ask someone to look
 closer if it's appropriate to use __blk_end_request() here.
 My understanding was, blk_end_request() is the same thing, only
 takes the queue lock. But then, should I refactor ub so that it
 calls __blk_end_request if request function ends with an error
 and blk_end_request if the end-of-IO even is processed? If not,
 and the above is sufficient, why have blk_end_request at all?

The difference between blk_end_request() and __blk_end_request() is
whether the queue lock is held or not when end_that_request_last()
is called.
It's not relevant to the status of the request (error or not).

I'm using __blk_end_request() here and I think it's sufficient, because:
  o end_that_request_last() must be called with the queue lock held
  o ub_end_rq() calls end_that_request_last() without taking
the queue lock in itself.
So the queue lock must have been taken outside ub_end_rq().

But, if ub is calling end_that_request_last() without the queue lock,
it is a bug in the original code and we should use blk_end_request()
to fix that.

Does that answer satisfy you?

Thanks,
Kiyoshi Ueda
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 01/30] blk_end_request: add new request completion interface (take 4)

2007-12-12 Thread Kiyoshi Ueda
Hi James, Jens,

On Wed, 12 Dec 2007 07:53:36 -0500, James Bottomley wrote:
 On Tue, 2007-12-11 at 17:40 -0500, Kiyoshi Ueda wrote:
  This patch adds 2 new interfaces for request completion:
o blk_end_request()   : called without queue lock
o __blk_end_request() : called with queue lock held
  
  blk_end_request takes 'error' as an argument instead of 'uptodate',
  which current end_that_request_* take.
  The meanings of values are below and the value is used when bio is
  completed.
  0 : success
 0 : error
  
  Some device drivers call some generic functions below between
  end_that_request_{first/chunk} and end_that_request_last().
o add_disk_randomness()
o blk_queue_end_tag()
o blkdev_dequeue_request()
 
 If we can roll the whole thing together, that would be nice.  However,
 the way you're doing it with this patch, we now have an asymmetrical
 interface:  The request routine must explicitly start the tag, but now
 doesn't have to end it.
 
 We really need symmetry.  Either go back to start tag/end tag, or absorb
 the whole lot into the block infrastructure.
 
 The original reason for the explicit start/end is that there are some
 requests on a tagged device that aren't able to be tagged by the block
 layer (some devices reserve tag numbers for specific meanings).
 However, I don't think there's any driver that actually implemented this
 feature.

As far as I investigated in 2.6.24-rc5, only scsi uses the blk_queue_tag
and no files in drivers/scsi reserving tag_index in Scsi_Host-bqt.
So I would like to take the absorbing start tag in the block layer way.

The patch below is on top of the blk_end_request patch-set.
Is it acceptable?

Thanks,
Kiyoshi Ueda



Subject: [PATCH 31/31] blk_end_request: merge start_tag to block layer


This patch merges blk_queue_start_tag() into blkdev_dequeue_request().

blk_queue_start_tag() and blk_queue_end_tag() are a pair of
interfaces for starting/ending request tagging.
Since with the new blk_end_request interfaces, blk_queue_end_tag()
is done implicitly in the block layer, blk_queue_start_tag() should
be done in the block layer, too, for keeping the interface symmetric.

Originally, the start/end tag was not done by the block layer so that
drivers can choose tag numbers for their specific meanings.
But no driver uses the feature.
Scsi is the only user of the block layer tagging and it uses
the generic blk_queue_start/end_tag.
So moving the start/end tag to the block layer is not an issue now.


Cc: James Bottomley [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/ll_rw_blk.c   |2 +-
 drivers/scsi/scsi_lib.c |3 +--
 include/linux/blkdev.h  |   11 ++-
 3 files changed, 8 insertions(+), 8 deletions(-)

Index: 2.6.24-rc5/block/ll_rw_blk.c
===
--- 2.6.24-rc5.orig/block/ll_rw_blk.c
+++ 2.6.24-rc5/block/ll_rw_blk.c
@@ -1115,7 +1115,7 @@ int blk_queue_start_tag(struct request_q
rq-cmd_flags |= REQ_QUEUED;
rq-tag = tag;
bqt-tag_index[tag] = rq;
-   blkdev_dequeue_request(rq);
+   elv_dequeue_request(q, rq);
list_add(rq-queuelist, q-tag_busy_list);
bqt-busy++;
return 0;
Index: 2.6.24-rc5/drivers/scsi/scsi_lib.c
===
--- 2.6.24-rc5.orig/drivers/scsi/scsi_lib.c
+++ 2.6.24-rc5/drivers/scsi/scsi_lib.c
@@ -1530,8 +1530,7 @@ static void scsi_request_fn(struct reque
/*
 * Remove the request from the request list.
 */
-   if (!(blk_queue_tagged(q)  !blk_queue_start_tag(q, req)))
-   blkdev_dequeue_request(req);
+   blkdev_dequeue_request(req);
sdev-device_busy++;
 
spin_unlock(q-queue_lock);
Index: 2.6.24-rc5/include/linux/blkdev.h
===
--- 2.6.24-rc5.orig/include/linux/blkdev.h
+++ 2.6.24-rc5/include/linux/blkdev.h
@@ -747,11 +747,6 @@ extern void blk_complete_request(struct 
 extern unsigned int blk_rq_bytes(struct request *rq);
 extern unsigned int blk_rq_cur_bytes(struct request *rq);
 
-static inline void blkdev_dequeue_request(struct request *req)
-{
-   elv_dequeue_request(req-q, req);
-}
-
 /*
  * Access functions for manipulating queue properties
  */
@@ -814,6 +809,12 @@ static inline struct request *blk_map_qu
return bqt-tag_index[tag];
 }
 
+static inline void blkdev_dequeue_request(struct request *req)
+{
+   if (!(blk_queue_tagged(req-q)  !blk_queue_start_tag(req-q, req)))
+   elv_dequeue_request(req-q, req);
+}
+
 extern int blkdev_issue_flush(struct block_device *, sector_t *);
 
 #define MAX_PHYS_SEGMENTS 128
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http

[PATCH 01/30] blk_end_request: add new request completion interface (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch adds 2 new interfaces for request completion:
  o blk_end_request()   : called without queue lock
  o __blk_end_request() : called with queue lock held

blk_end_request takes 'error' as an argument instead of 'uptodate',
which current end_that_request_* take.
The meanings of values are below and the value is used when bio is
completed.
0 : success
   0 : error

Some device drivers call some generic functions below between
end_that_request_{first/chunk} and end_that_request_last().
  o add_disk_randomness()
  o blk_queue_end_tag()
  o blkdev_dequeue_request()
These are called in the blk_end_request interfaces as a part of
generic request completion.
So all device drivers become to call above functions.
To decide whether to call blkdev_dequeue_request(), blk_end_request
uses list_empty(rq-queuelist) (blk_queued_rq() macro is added for it).
So drivers must re-initialize it using list_init() or so before calling
blk_end_request if drivers use it for its specific purpose.
(Currently, there is no driver which completes request without
 re-initializing the queuelist after used it.  So rq-queuelist
 can be used for the purpose above.)

Normal drivers can be converted to use blk_end_request()
in a standard way shown below.

 a) end_that_request_{chunk/first}
spin_lock_irqsave()
(add_disk_randomness(), blk_queue_end_tag(), blkdev_dequeue_request())
end_that_request_last()
spin_unlock_irqrestore()
= blk_end_request()

 b) spin_lock_irqsave()
end_that_request_{chunk/first}
(add_disk_randomness(), blk_queue_end_tag(), blkdev_dequeue_request())
end_that_request_last()
spin_unlock_irqrestore()
= spin_lock_irqsave()
   __blk_end_request()
   spin_unlock_irqsave()

 c) spin_lock_irqsave()
(add_disk_randomness(), blk_queue_end_tag(), blkdev_dequeue_request())
end_that_request_last()
spin_unlock_irqrestore()
= blk_end_request()   or   spin_lock_irqsave()
__blk_end_request()
spin_unlock_irqrestore()

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/ll_rw_blk.c  |   96 +
 include/linux/blkdev.h |4 ++
 2 files changed, 100 insertions(+)

Index: 2.6.24-rc4/block/ll_rw_blk.c
===
--- 2.6.24-rc4.orig/block/ll_rw_blk.c
+++ 2.6.24-rc4/block/ll_rw_blk.c
@@ -3769,6 +3769,102 @@ void end_request(struct request *req, in
 }
 EXPORT_SYMBOL(end_request);
 
+static void complete_request(struct request *rq, int error)
+{
+   /*
+* REMOVEME: This conversion is transitional and will be removed
+*   when old end_that_request_* are unexported.
+*/
+   int uptodate = 1;
+   if (error)
+   uptodate = (error == -EIO) ? 0 : error;
+
+   if (blk_rq_tagged(rq))
+   blk_queue_end_tag(rq-q, rq);
+
+   if (blk_queued_rq(rq))
+   blkdev_dequeue_request(rq);
+
+   end_that_request_last(rq, uptodate);
+}
+
+/**
+ * blk_end_request - Helper function for drivers to complete the request.
+ * @rq:   the request being processed
+ * @error:0 for success,  0 for error
+ * @nr_bytes: number of bytes to complete
+ *
+ * Description:
+ * Ends I/O on a number of bytes attached to @rq.
+ * If @rq has leftover, sets it up for the next range of segments.
+ *
+ * Return:
+ * 0 - we are done with this request
+ * 1 - still buffers pending for this request
+ **/
+int blk_end_request(struct request *rq, int error, int nr_bytes)
+{
+   struct request_queue *q = rq-q;
+   unsigned long flags = 0UL;
+   /*
+* REMOVEME: This conversion is transitional and will be removed
+*   when old end_that_request_* are unexported.
+*/
+   int uptodate = 1;
+   if (error)
+   uptodate = (error == -EIO) ? 0 : error;
+
+   if (blk_fs_request(rq) || blk_pc_request(rq)) {
+   if (__end_that_request_first(rq, uptodate, nr_bytes))
+   return 1;
+   }
+
+   add_disk_randomness(rq-rq_disk);
+
+   spin_lock_irqsave(q-queue_lock, flags);
+   complete_request(rq, error);
+   spin_unlock_irqrestore(q-queue_lock, flags);
+
+   return 0;
+}
+EXPORT_SYMBOL_GPL(blk_end_request);
+
+/**
+ * __blk_end_request - Helper function for drivers to complete the request.
+ * @rq:   the request being processed
+ * @error:0 for success,  0 for error
+ * @nr_bytes: number of bytes to complete
+ *
+ * Description:
+ * Must be called with queue lock held unlike blk_end_request().
+ *
+ * Return:
+ * 0 - we are done with this request
+ * 1 - still buffers pending for this request
+ **/
+int __blk_end_request(struct request *rq, int error, int nr_bytes)
+{
+   /*
+* REMOVEME: This conversion is transitional and will be removed

[PATCH 02/30] blk_end_request: add/export functions to get request size (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch adds/exports functions to get the size of request in bytes.
They are useful because blk_end_request interfaces take bytes
as a completed I/O size instead of sectors.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/ll_rw_blk.c  |   25 ++---
 include/linux/blkdev.h |8 
 2 files changed, 30 insertions(+), 3 deletions(-)

Index: 2.6.24-rc4/include/linux/blkdev.h
===
--- 2.6.24-rc4.orig/include/linux/blkdev.h
+++ 2.6.24-rc4/include/linux/blkdev.h
@@ -739,6 +739,14 @@ extern void end_dequeued_request(struct 
 extern void blk_complete_request(struct request *);
 
 /*
+ * blk_end_request() takes bytes instead of sectors as a complete size.
+ * blk_rq_bytes() returns bytes left to complete in the entire request.
+ * blk_rq_cur_bytes() returns bytes left to complete in the current segment.
+ */
+extern unsigned int blk_rq_bytes(struct request *rq);
+extern unsigned int blk_rq_cur_bytes(struct request *rq);
+
+/*
  * end_that_request_first/chunk() takes an uptodate argument. we account
  * any value = as an io error. 0 means -EIO for compatability reasons,
  * any other  0 value is the direct error type. An uptodate value of
Index: 2.6.24-rc4/block/ll_rw_blk.c
===
--- 2.6.24-rc4.orig/block/ll_rw_blk.c
+++ 2.6.24-rc4/block/ll_rw_blk.c
@@ -3701,13 +3701,32 @@ static inline void __end_request(struct 
}
 }
 
-static unsigned int rq_byte_size(struct request *rq)
+/**
+ * blk_rq_bytes - Returns bytes left to complete in the entire request
+ **/
+unsigned int blk_rq_bytes(struct request *rq)
 {
if (blk_fs_request(rq))
return rq-hard_nr_sectors  9;
 
return rq-data_len;
 }
+EXPORT_SYMBOL_GPL(blk_rq_bytes);
+
+/**
+ * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
+ **/
+unsigned int blk_rq_cur_bytes(struct request *rq)
+{
+   if (blk_fs_request(rq))
+   return rq-current_nr_sectors  9;
+
+   if (rq-bio)
+   return rq-bio-bi_size;
+
+   return rq-data_len;
+}
+EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
 
 /**
  * end_queued_request - end all I/O on a queued request
@@ -3722,7 +3741,7 @@ static unsigned int rq_byte_size(struct 
  **/
 void end_queued_request(struct request *rq, int uptodate)
 {
-   __end_request(rq, uptodate, rq_byte_size(rq), 1);
+   __end_request(rq, uptodate, blk_rq_bytes(rq), 1);
 }
 EXPORT_SYMBOL(end_queued_request);
 
@@ -3739,7 +3758,7 @@ EXPORT_SYMBOL(end_queued_request);
  **/
 void end_dequeued_request(struct request *rq, int uptodate)
 {
-   __end_request(rq, uptodate, rq_byte_size(rq), 0);
+   __end_request(rq, uptodate, blk_rq_bytes(rq), 0);
 }
 EXPORT_SYMBOL(end_dequeued_request);
 
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 00/30] blk_end_request: full I/O completion handler (take 4)

2007-12-11 Thread Kiyoshi Ueda
-end_io() is necessary
to allow device stacking at request level, that is request-based
device-mapper multipath.
Currently, device-mapper is implemented as a stacking block device
at BIO level.  OTOH, request-based dm will stack at request level
to allow better multipathing decision.
To allow device stacking at request level, the completion procedure
need to provide a hook for it.
For example, dm-multipath has to check errors and retry with other
paths if necessary before returning the I/O result to upper layer.
struct request has 'end_io' hook currently.  But it's called at
the very late stage of completion handling where the I/O result
is already returned to the upper layer.
So we need something here.

The first approach to hook in completion of each chunk of request
was adding a new rq-end_io_first() hook and calling it on the top
of __end_that_request_first().
  - http://marc.theaimsgroup.com/?l=linux-scsim=115520444515914w=2
  - http://marc.theaimsgroup.com/?l=linux-kernelm=116656637425880w=2
However, Jens pointed out that redesigning rq-end_io() as a full
completion handler would be better:

On Thu, 21 Dec 2006 08:49:47 +0100, Jens Axboe [EMAIL PROTECTED] wrote:
 Ok, I see what you are getting at. The current -end_io() is called when
 the request has fully completed, you want notification for each chunk
 potentially completed.
 
 I think a better design here would be to use -end_io() as the full
 completion handler, similar to how bio-bi_end_io() works. A request
 originating from __make_request() would set something ala:
.
 instead of calling the functions manually. That would allow you to get
 notification right at the beginning and do what you need, without adding
 a special hook for this.

I thought his comment was reasonable.
So I modified the patches based on his suggestion.


WHAT IS CHANGED
===
The change is basically illustlated by the following pseudo code:

[Before]
  if (end_that_request_{first/chunk} succeeds) { -- completes bios
 do something driver specific
 end_that_request_last() -- calls end_io()
 the request is free from the driver
  } else {
 the request was incomplete, retry for leftover or ignoring
  }

[After]
  if (blk_end_request() succeeds) { -- calls end_io(), completes bios
 the request is free from the driver
  } else {
 the request was incomplete, retry for leftover or ignoring
  }


In detail, request completion procedures are changed like below.

[Before]
  o 2 steps completion using end_that_request_{first/chunk}
and end_that_request_last().
  o Device drivers have ownership of a request until they
call end_that_request_last().
  o rq-end_io() is called at the last stage of
end_that_request_last() for some block layer codes need
specific request handling when completing it.

[After]
  o 1 step completion using blk_end_request().
(end_that_request_* are no longer used from device drivers.)
  o Device drivers give over ownership of a request
when calling blk_end_request().
If it returns 0, the request is completed.
If it returns 1, the request isn't completed and
the ownership is returned to the device driver again.
  o rq-end_io() is called at the top of blk_end_request() to
allow to hook all parts of request completion.
Existing users of rq-end_io() must be changed to do
all parts of request completion.

Thanks,
Kiyoshi Ueda
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 04/30] blk_end_request: changing arm (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts arm's OMAP mailbox driver to use
blk_end_request interfaces.

If the original code was converted literally, blk_end_request would
be called with '-EIO' because end_that_request_last() were called
with '0' (i.e. failure).
But I think these '0's are bugs in the original code because it's
unlikely that all requests are treated as failure.
(The bugs should have no effect unless these requests have an end_io
 callback.)

So I changed them to pass '0' (i.e. success) to blk_end_request.

Cc: Toshihiro Kobayashi [EMAIL PROTECTED]
Cc: Hiroshi DOYU [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---

 arch/arm/plat-omap/mailbox.c |   16 ++--
 1 files changed, 6 insertions(+), 10 deletions(-)

Index: 2.6.24-rc4/arch/arm/plat-omap/mailbox.c
===
--- 2.6.24-rc4.orig/arch/arm/plat-omap/mailbox.c
+++ 2.6.24-rc4/arch/arm/plat-omap/mailbox.c
@@ -116,8 +116,8 @@ static void mbox_tx_work(struct work_str
}
 
spin_lock(q-queue_lock);
-   blkdev_dequeue_request(rq);
-   end_that_request_last(rq, 0);
+   if (__blk_end_request(rq, 0, 0))
+   BUG();
spin_unlock(q-queue_lock);
}
 }
@@ -149,10 +149,8 @@ static void mbox_rx_work(struct work_str
 
msg = (mbox_msg_t) rq-data;
 
-   spin_lock_irqsave(q-queue_lock, flags);
-   blkdev_dequeue_request(rq);
-   end_that_request_last(rq, 0);
-   spin_unlock_irqrestore(q-queue_lock, flags);
+   if (blk_end_request(rq, 0, 0))
+   BUG();
 
mbox-rxq-callback((void *)msg);
}
@@ -263,10 +261,8 @@ omap_mbox_read(struct device *dev, struc
 
*p = (mbox_msg_t) rq-data;
 
-   spin_lock_irqsave(q-queue_lock, flags);
-   blkdev_dequeue_request(rq);
-   end_that_request_last(rq, 0);
-   spin_unlock_irqrestore(q-queue_lock, flags);
+   if (blk_end_request(rq, 0, 0))
+   BUG();
 
if (unlikely(mbox_seq_test(mbox, *p))) {
pr_info(mbox: Illegal seq bit!(%08x) ignored\n, *p);
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 03/30] blk_end_request: changing block layer core (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts core parts of block layer to use blk_end_request
interfaces.  Related 'uptodate' arguments are converted to 'error'.

'dequeue' argument was originally introduced for end_dequeued_request(),
where no attempt should be made to dequeue the request as it's already
dequeued.
However, it's not necessary as it can be checked with
list_empty(rq-queuelist).
(Dequeued request has empty list and queued request doesn't.)
And it has been done in blk_end_request interfaces.

As a result of this patch, end_queued_request() and
end_dequeued_request() become identical.  A future patch will merge
and rename them and change users of those functions.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/ll_rw_blk.c |   35 +++
 1 files changed, 15 insertions(+), 20 deletions(-)

Index: 2.6.24-rc4/block/ll_rw_blk.c
===
--- 2.6.24-rc4.orig/block/ll_rw_blk.c
+++ 2.6.24-rc4/block/ll_rw_blk.c
@@ -347,7 +347,6 @@ unsigned blk_ordered_req_seq(struct requ
 void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
 {
struct request *rq;
-   int uptodate;
 
if (error  !q-orderr)
q-orderr = error;
@@ -361,15 +360,11 @@ void blk_ordered_complete_seq(struct req
/*
 * Okay, sequence complete.
 */
-   uptodate = 1;
-   if (q-orderr)
-   uptodate = q-orderr;
-
q-ordseq = 0;
rq = q-orig_bar_rq;
 
-   end_that_request_first(rq, uptodate, rq-hard_nr_sectors);
-   end_that_request_last(rq, uptodate);
+   if (__blk_end_request(rq, q-orderr, blk_rq_bytes(rq)))
+   BUG();
 }
 
 static void pre_flush_end_io(struct request *rq, int error)
@@ -486,9 +481,9 @@ int blk_do_ordered(struct request_queue 
 * ORDERED_NONE while this request is on it.
 */
blkdev_dequeue_request(rq);
-   end_that_request_first(rq, -EOPNOTSUPP,
-  rq-hard_nr_sectors);
-   end_that_request_last(rq, -EOPNOTSUPP);
+   if (__blk_end_request(rq, -EOPNOTSUPP,
+ blk_rq_bytes(rq)))
+   BUG();
*rqp = NULL;
return 0;
}
@@ -3691,14 +3686,14 @@ void end_that_request_last(struct reques
 EXPORT_SYMBOL(end_that_request_last);
 
 static inline void __end_request(struct request *rq, int uptodate,
-unsigned int nr_bytes, int dequeue)
+unsigned int nr_bytes)
 {
-   if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
-   if (dequeue)
-   blkdev_dequeue_request(rq);
-   add_disk_randomness(rq-rq_disk);
-   end_that_request_last(rq, uptodate);
-   }
+   int error = 0;
+
+   if (uptodate = 0)
+   error = uptodate ? uptodate : -EIO;
+
+   __blk_end_request(rq, error, nr_bytes);
 }
 
 /**
@@ -3741,7 +3736,7 @@ EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
  **/
 void end_queued_request(struct request *rq, int uptodate)
 {
-   __end_request(rq, uptodate, blk_rq_bytes(rq), 1);
+   __end_request(rq, uptodate, blk_rq_bytes(rq));
 }
 EXPORT_SYMBOL(end_queued_request);
 
@@ -3758,7 +3753,7 @@ EXPORT_SYMBOL(end_queued_request);
  **/
 void end_dequeued_request(struct request *rq, int uptodate)
 {
-   __end_request(rq, uptodate, blk_rq_bytes(rq), 0);
+   __end_request(rq, uptodate, blk_rq_bytes(rq));
 }
 EXPORT_SYMBOL(end_dequeued_request);
 
@@ -3784,7 +3779,7 @@ EXPORT_SYMBOL(end_dequeued_request);
  **/
 void end_request(struct request *req, int uptodate)
 {
-   __end_request(req, uptodate, req-hard_cur_sectors  9, 1);
+   __end_request(req, uptodate, req-hard_cur_sectors  9);
 }
 EXPORT_SYMBOL(end_request);
 
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 07/30] blk_end_request: changing floppy (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts floppy to use blk_end_request interfaces.
Related 'uptodate' arguments are converted to 'error'.

As a result, the interface of internal function, floppy_end_request(),
is changed.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/floppy.c |   16 +++-
 1 files changed, 7 insertions(+), 9 deletions(-)

Index: 2.6.24-rc4/drivers/block/floppy.c
===
--- 2.6.24-rc4.orig/drivers/block/floppy.c
+++ 2.6.24-rc4/drivers/block/floppy.c
@@ -2287,21 +2287,19 @@ static int do_format(int drive, struct f
  * =
  */
 
-static void floppy_end_request(struct request *req, int uptodate)
+static void floppy_end_request(struct request *req, int error)
 {
unsigned int nr_sectors = current_count_sectors;
+   unsigned int drive = (unsigned int)req-rq_disk-private_data;
 
/* current_count_sectors can be zero if transfer failed */
-   if (!uptodate)
+   if (error)
nr_sectors = req-current_nr_sectors;
-   if (end_that_request_first(req, uptodate, nr_sectors))
+   if (__blk_end_request(req, error, nr_sectors  9))
return;
-   add_disk_randomness(req-rq_disk);
-   floppy_off((long)req-rq_disk-private_data);
-   blkdev_dequeue_request(req);
-   end_that_request_last(req, uptodate);
 
/* We're done with the request */
+   floppy_off(drive);
current_req = NULL;
 }
 
@@ -2332,7 +2330,7 @@ static void request_done(int uptodate)
 
/* unlock chained buffers */
spin_lock_irqsave(q-queue_lock, flags);
-   floppy_end_request(req, 1);
+   floppy_end_request(req, 0);
spin_unlock_irqrestore(q-queue_lock, flags);
} else {
if (rq_data_dir(req) == WRITE) {
@@ -2346,7 +2344,7 @@ static void request_done(int uptodate)
DRWE-last_error_generation = DRS-generation;
}
spin_lock_irqsave(q-queue_lock, flags);
-   floppy_end_request(req, 0);
+   floppy_end_request(req, -EIO);
spin_unlock_irqrestore(q-queue_lock, flags);
}
 }
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 05/30] blk_end_request: changing um (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts um to use blk_end_request interfaces.
Related 'uptodate' arguments are converted to 'error'.

As a result, the interface of internal function, ubd_end_request(),
is changed.

Cc: Jeff Dike [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 arch/um/drivers/ubd_kern.c |   16 
 1 files changed, 4 insertions(+), 12 deletions(-)

Index: 2.6.24-rc4/arch/um/drivers/ubd_kern.c
===
--- 2.6.24-rc4.orig/arch/um/drivers/ubd_kern.c
+++ 2.6.24-rc4/arch/um/drivers/ubd_kern.c
@@ -475,17 +475,9 @@ static void do_ubd_request(struct reques
 /* Only changed by ubd_init, which is an initcall. */
 int thread_fd = -1;
 
-static void ubd_end_request(struct request *req, int bytes, int uptodate)
+static void ubd_end_request(struct request *req, int bytes, int error)
 {
-   if (!end_that_request_first(req, uptodate, bytes  9)) {
-   struct ubd *dev = req-rq_disk-private_data;
-   unsigned long flags;
-
-   add_disk_randomness(req-rq_disk);
-   spin_lock_irqsave(dev-lock, flags);
-   end_that_request_last(req, uptodate);
-   spin_unlock_irqrestore(dev-lock, flags);
-   }
+   blk_end_request(req, error, bytes);
 }
 
 /* Callable only from interrupt context - otherwise you need to do
@@ -493,10 +485,10 @@ static void ubd_end_request(struct reque
 static inline void ubd_finish(struct request *req, int bytes)
 {
if(bytes  0){
-   ubd_end_request(req, 0, 0);
+   ubd_end_request(req, 0, -EIO);
return;
}
-   ubd_end_request(req, bytes, 1);
+   ubd_end_request(req, bytes, 0);
 }
 
 static LIST_HEAD(restart);
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 06/30] blk_end_request: changing DAC960 (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts DAC960 to use blk_end_request interfaces.
Related 'UpToDate' arguments are converted to 'Error'.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/DAC960.c |   11 ++-
 1 files changed, 2 insertions(+), 9 deletions(-)

Index: 2.6.24-rc4/drivers/block/DAC960.c
===
--- 2.6.24-rc4.orig/drivers/block/DAC960.c
+++ 2.6.24-rc4/drivers/block/DAC960.c
@@ -3455,19 +3455,12 @@ static inline bool DAC960_ProcessComplet
 bool SuccessfulIO)
 {
struct request *Request = Command-Request;
-   int UpToDate;
-
-   UpToDate = 0;
-   if (SuccessfulIO)
-   UpToDate = 1;
+   int Error = SuccessfulIO ? 0 : -EIO;
 
pci_unmap_sg(Command-Controller-PCIDevice, Command-cmd_sglist,
Command-SegmentCount, Command-DmaDirection);
 
-if (!end_that_request_first(Request, UpToDate, Command-BlockCount)) {
-   add_disk_randomness(Request-rq_disk);
-   end_that_request_last(Request, UpToDate);
-
+if (!__blk_end_request(Request, Error, Command-BlockCount  9)) {
if (Command-Completion) {
complete(Command-Completion);
Command-Completion = NULL;
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 08/30] blk_end_request: changing nbd (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts nbd to use blk_end_request interfaces.
Related 'uptodate' arguments are converted to 'error'.

Cc: Paul Clements [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/nbd.c |8 +++-
 1 files changed, 3 insertions(+), 5 deletions(-)

Index: 2.6.24-rc4/drivers/block/nbd.c
===
--- 2.6.24-rc4.orig/drivers/block/nbd.c
+++ 2.6.24-rc4/drivers/block/nbd.c
@@ -100,17 +100,15 @@ static const char *nbdcmd_to_ascii(int c
 
 static void nbd_end_request(struct request *req)
 {
-   int uptodate = (req-errors == 0) ? 1 : 0;
+   int error = req-errors ? -EIO : 0;
struct request_queue *q = req-q;
unsigned long flags;
 
dprintk(DBG_BLKDEV, %s: request %p: %s\n, req-rq_disk-disk_name,
-   req, uptodate? done: failed);
+   req, error ? failed : done);
 
spin_lock_irqsave(q-queue_lock, flags);
-   if (!end_that_request_first(req, uptodate, req-nr_sectors)) {
-   end_that_request_last(req, uptodate);
-   }
+   __blk_end_request(req, error, req-nr_sectors  9);
spin_unlock_irqrestore(q-queue_lock, flags);
 }
 
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 10/30] blk_end_request: changing sunvdc (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts sunvdc to use blk_end_request interfaces.
Related 'uptodate' arguments are converted to 'error'.

As a result, the interface of internal function, vdc_end_request(),
is changed.

Cc: David S. Miller [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/sunvdc.c |   11 ---
 1 files changed, 4 insertions(+), 7 deletions(-)

Index: 2.6.24-rc4/drivers/block/sunvdc.c
===
--- 2.6.24-rc4.orig/drivers/block/sunvdc.c
+++ 2.6.24-rc4/drivers/block/sunvdc.c
@@ -212,12 +212,9 @@ static void vdc_end_special(struct vdc_p
vdc_finish(port-vio, -err, WAITING_FOR_GEN_CMD);
 }
 
-static void vdc_end_request(struct request *req, int uptodate, int num_sectors)
+static void vdc_end_request(struct request *req, int error, int num_sectors)
 {
-   if (end_that_request_first(req, uptodate, num_sectors))
-   return;
-   add_disk_randomness(req-rq_disk);
-   end_that_request_last(req, uptodate);
+   __blk_end_request(req, error, num_sectors  9);
 }
 
 static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
@@ -242,7 +239,7 @@ static void vdc_end_one(struct vdc_port 
 
rqe-req = NULL;
 
-   vdc_end_request(req, !desc-status, desc-size  9);
+   vdc_end_request(req, (desc-status ? -EIO : 0), desc-size  9);
 
if (blk_queue_stopped(port-disk-queue))
blk_start_queue(port-disk-queue);
@@ -456,7 +453,7 @@ static void do_vdc_request(struct reques
 
blkdev_dequeue_request(req);
if (__send_request(req)  0)
-   vdc_end_request(req, 0, req-hard_nr_sectors);
+   vdc_end_request(req, -EIO, req-hard_nr_sectors);
}
 }
 
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 09/30] blk_end_request: changing ps3disk (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts ps3disk to use blk_end_request interfaces.
Related 'uptodate' arguments are converted to 'error'.

Cc: Geoff Levand [EMAIL PROTECTED]
Cc: Geert Uytterhoeven [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/ps3disk.c |   12 
 1 files changed, 4 insertions(+), 8 deletions(-)

Index: 2.6.24-rc4/drivers/block/ps3disk.c
===
--- 2.6.24-rc4.orig/drivers/block/ps3disk.c
+++ 2.6.24-rc4/drivers/block/ps3disk.c
@@ -229,7 +229,7 @@ static irqreturn_t ps3disk_interrupt(int
struct ps3_storage_device *dev = data;
struct ps3disk_private *priv;
struct request *req;
-   int res, read, uptodate;
+   int res, read, error;
u64 tag, status;
unsigned long num_sectors;
const char *op;
@@ -270,21 +270,17 @@ static irqreturn_t ps3disk_interrupt(int
if (status) {
dev_dbg(dev-sbd.core, %s:%u: %s failed 0x%lx\n, __func__,
__LINE__, op, status);
-   uptodate = 0;
+   error = -EIO;
} else {
dev_dbg(dev-sbd.core, %s:%u: %s completed\n, __func__,
__LINE__, op);
-   uptodate = 1;
+   error = 0;
if (read)
ps3disk_scatter_gather(dev, req, 0);
}
 
spin_lock(priv-lock);
-   if (!end_that_request_first(req, uptodate, num_sectors)) {
-   add_disk_randomness(req-rq_disk);
-   blkdev_dequeue_request(req);
-   end_that_request_last(req, uptodate);
-   }
+   __blk_end_request(req, error, num_sectors  9);
priv-req = NULL;
ps3disk_do_request(dev, priv-queue);
spin_unlock(priv-lock);
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 12/30] blk_end_request: changing ub (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts ub to use blk_end_request interfaces.
Related 'uptodate' arguments are converted to 'error'.

Cc: Pete Zaitcev [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/ub.c |   10 +-
 1 files changed, 5 insertions(+), 5 deletions(-)

Index: 2.6.24-rc4/drivers/block/ub.c
===
--- 2.6.24-rc4.orig/drivers/block/ub.c
+++ 2.6.24-rc4/drivers/block/ub.c
@@ -808,16 +808,16 @@ static void ub_rw_cmd_done(struct ub_dev
 
 static void ub_end_rq(struct request *rq, unsigned int scsi_status)
 {
-   int uptodate;
+   int error;
 
if (scsi_status == 0) {
-   uptodate = 1;
+   error = 0;
} else {
-   uptodate = 0;
+   error = -EIO;
rq-errors = scsi_status;
}
-   end_that_request_first(rq, uptodate, rq-hard_nr_sectors);
-   end_that_request_last(rq, uptodate);
+   if (__blk_end_request(rq, error, blk_rq_bytes(rq)))
+   BUG();
 }
 
 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 11/30] blk_end_request: changing sx8 (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts sx8 to use blk_end_request interfaces.
Related 'uptodate' and 'is_ok' arguments are converted to 'error'.

As a result, the interfaces of internal functions below are changed.
  o carm_end_request_queued
  o carm_end_rq
  o carm_handle_array_info
  o carm_handle_scan_chan
  o carm_handle_generic
  o carm_handle_rw

The 'is_ok' is set at only one place in carm_handle_resp() below:

int is_ok = (status == RMSG_OK);

And the value is propagated to all functions above, and no modification
in other places.
So the actual conversion of the 'is_ok' is done at only one place above.

Cc: Jeff Garzik [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/sx8.c |   58 +---
 1 files changed, 28 insertions(+), 30 deletions(-)

Index: 2.6.24-rc4/drivers/block/sx8.c
===
--- 2.6.24-rc4.orig/drivers/block/sx8.c
+++ 2.6.24-rc4/drivers/block/sx8.c
@@ -744,16 +744,14 @@ static unsigned int carm_fill_get_fw_ver
 
 static inline void carm_end_request_queued(struct carm_host *host,
   struct carm_request *crq,
-  int uptodate)
+  int error)
 {
struct request *req = crq-rq;
int rc;
 
-   rc = end_that_request_first(req, uptodate, req-hard_nr_sectors);
+   rc = __blk_end_request(req, error, blk_rq_bytes(req));
assert(rc == 0);
 
-   end_that_request_last(req, uptodate);
-
rc = carm_put_request(host, crq);
assert(rc == 0);
 }
@@ -793,9 +791,9 @@ static inline void carm_round_robin(stru
 }
 
 static inline void carm_end_rq(struct carm_host *host, struct carm_request 
*crq,
-   int is_ok)
+  int error)
 {
-   carm_end_request_queued(host, crq, is_ok);
+   carm_end_request_queued(host, crq, error);
if (max_queue == 1)
carm_round_robin(host);
else if ((host-n_msgs = CARM_MSG_LOW_WATER) 
@@ -873,14 +871,14 @@ queue_one_request:
sg = crq-sg[0];
n_elem = blk_rq_map_sg(q, rq, sg);
if (n_elem = 0) {
-   carm_end_rq(host, crq, 0);
+   carm_end_rq(host, crq, -EIO);
return; /* request with no s/g entries? */
}
 
/* map scatterlist to PCI bus addresses */
n_elem = pci_map_sg(host-pdev, sg, n_elem, pci_dir);
if (n_elem = 0) {
-   carm_end_rq(host, crq, 0);
+   carm_end_rq(host, crq, -EIO);
return; /* request with no s/g entries? */
}
crq-n_elem = n_elem;
@@ -941,7 +939,7 @@ queue_one_request:
 
 static void carm_handle_array_info(struct carm_host *host,
   struct carm_request *crq, u8 *mem,
-  int is_ok)
+  int error)
 {
struct carm_port *port;
u8 *msg_data = mem + sizeof(struct carm_array_info);
@@ -952,9 +950,9 @@ static void carm_handle_array_info(struc
 
DPRINTK(ENTER\n);
 
-   carm_end_rq(host, crq, is_ok);
+   carm_end_rq(host, crq, error);
 
-   if (!is_ok)
+   if (error)
goto out;
if (le32_to_cpu(desc-array_status)  ARRAY_NO_EXIST)
goto out;
@@ -1001,7 +999,7 @@ out:
 
 static void carm_handle_scan_chan(struct carm_host *host,
  struct carm_request *crq, u8 *mem,
- int is_ok)
+ int error)
 {
u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET;
unsigned int i, dev_count = 0;
@@ -1009,9 +1007,9 @@ static void carm_handle_scan_chan(struct
 
DPRINTK(ENTER\n);
 
-   carm_end_rq(host, crq, is_ok);
+   carm_end_rq(host, crq, error);
 
-   if (!is_ok) {
+   if (error) {
new_state = HST_ERROR;
goto out;
}
@@ -1033,23 +1031,23 @@ out:
 }
 
 static void carm_handle_generic(struct carm_host *host,
-   struct carm_request *crq, int is_ok,
+   struct carm_request *crq, int error,
int cur_state, int next_state)
 {
DPRINTK(ENTER\n);
 
-   carm_end_rq(host, crq, is_ok);
+   carm_end_rq(host, crq, error);
 
assert(host-state == cur_state);
-   if (is_ok)
-   host-state = next_state;
-   else
+   if (error)
host-state = HST_ERROR;
+   else
+   host-state = next_state;
schedule_work(host-fsm_task);
 }
 
 static inline void carm_handle_rw(struct carm_host *host,
- struct carm_request *crq, int is_ok)
+ struct carm_request *crq, int error)
 {
int

[PATCH 13/30] blk_end_request: changing viodasd (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts viodasd to use blk_end_request interfaces.
Related 'uptodate' arguments are converted to 'error'.

As a result, the interface of internal function, viodasd_end_request(),
is changed.

Cc: Stephen Rothwell [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/viodasd.c |   15 ++-
 1 files changed, 6 insertions(+), 9 deletions(-)

Index: 2.6.24-rc4/drivers/block/viodasd.c
===
--- 2.6.24-rc4.orig/drivers/block/viodasd.c
+++ 2.6.24-rc4/drivers/block/viodasd.c
@@ -229,13 +229,10 @@ static struct block_device_operations vi
 /*
  * End a request
  */
-static void viodasd_end_request(struct request *req, int uptodate,
+static void viodasd_end_request(struct request *req, int error,
int num_sectors)
 {
-   if (end_that_request_first(req, uptodate, num_sectors))
-   return;
-   add_disk_randomness(req-rq_disk);
-   end_that_request_last(req, uptodate);
+   __blk_end_request(req, error, num_sectors  9);
 }
 
 /*
@@ -374,12 +371,12 @@ static void do_viodasd_request(struct re
blkdev_dequeue_request(req);
/* check that request contains a valid command */
if (!blk_fs_request(req)) {
-   viodasd_end_request(req, 0, req-hard_nr_sectors);
+   viodasd_end_request(req, -EIO, req-hard_nr_sectors);
continue;
}
/* Try sending the request */
if (send_request(req) != 0)
-   viodasd_end_request(req, 0, req-hard_nr_sectors);
+   viodasd_end_request(req, -EIO, req-hard_nr_sectors);
}
 }
 
@@ -591,7 +588,7 @@ static int viodasd_handle_read_write(str
num_req_outstanding--;
spin_unlock_irqrestore(viodasd_spinlock, irq_flags);
 
-   error = event-xRc != HvLpEvent_Rc_Good;
+   error = (event-xRc == HvLpEvent_Rc_Good) ? 0 : -EIO;
if (error) {
const struct vio_error_entry *err;
err = vio_lookup_rc(viodasd_err_table, bevent-sub_result);
@@ -601,7 +598,7 @@ static int viodasd_handle_read_write(str
}
qlock = req-q-queue_lock;
spin_lock_irqsave(qlock, irq_flags);
-   viodasd_end_request(req, !error, num_sect);
+   viodasd_end_request(req, error, num_sect);
spin_unlock_irqrestore(qlock, irq_flags);
 
/* Finally, try to get more requests off of this device's queue */
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 16/30] blk_end_request: changing i2o_block (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts i2o_block to use blk_end_request interfaces.
Related 'uptodate' arguments are converted to 'error'.

As a result, the interface of internal function, i2o_block_end_request(),
is changed.

Cc: Markus Lidel [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/message/i2o/i2o_block.c |   20 
 1 files changed, 8 insertions(+), 12 deletions(-)

Index: 2.6.24-rc4/drivers/message/i2o/i2o_block.c
===
--- 2.6.24-rc4.orig/drivers/message/i2o/i2o_block.c
+++ 2.6.24-rc4/drivers/message/i2o/i2o_block.c
@@ -412,13 +412,13 @@ static void i2o_block_delayed_request_fn
 /**
  * i2o_block_end_request - Post-processing of completed commands
  * @req: request which should be completed
- * @uptodate: 1 for success, 0 for I/O error,  0 for specific error
+ * @error: 0 for success,  0 for error
  * @nr_bytes: number of bytes to complete
  *
  * Mark the request as complete. The lock must not be held when entering.
  *
  */
-static void i2o_block_end_request(struct request *req, int uptodate,
+static void i2o_block_end_request(struct request *req, int error,
  int nr_bytes)
 {
struct i2o_block_request *ireq = req-special;
@@ -426,22 +426,18 @@ static void i2o_block_end_request(struct
struct request_queue *q = req-q;
unsigned long flags;
 
-   if (end_that_request_chunk(req, uptodate, nr_bytes)) {
+   if (blk_end_request(req, error, nr_bytes)) {
int leftover = (req-hard_nr_sectors  KERNEL_SECTOR_SHIFT);
 
if (blk_pc_request(req))
leftover = req-data_len;
 
-   if (end_io_error(uptodate))
-   end_that_request_chunk(req, 0, leftover);
+   if (error)
+   blk_end_request(req, -EIO, leftover);
}
 
-   add_disk_randomness(req-rq_disk);
-
spin_lock_irqsave(q-queue_lock, flags);
 
-   end_that_request_last(req, uptodate);
-
if (likely(dev)) {
dev-open_queue_depth--;
list_del(ireq-queue);
@@ -468,7 +464,7 @@ static int i2o_block_reply(struct i2o_co
   struct i2o_message *msg)
 {
struct request *req;
-   int uptodate = 1;
+   int error = 0;
 
req = i2o_cntxt_list_get(c, le32_to_cpu(msg-u.s.tcntxt));
if (unlikely(!req)) {
@@ -501,10 +497,10 @@ static int i2o_block_reply(struct i2o_co
 
req-errors++;
 
-   uptodate = 0;
+   error = -EIO;
}
 
-   i2o_block_end_request(req, uptodate, le32_to_cpu(msg-body[1]));
+   i2o_block_end_request(req, error, le32_to_cpu(msg-body[1]));
 
return 1;
 };
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 14/30] blk_end_request: changing xen-blkfront (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts xen-blkfront to use blk_end_request interfaces.
Related 'uptodate' arguments are converted to 'error'.

Cc: Jeremy Fitzhardinge [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/xen-blkfront.c |   10 --
 1 files changed, 4 insertions(+), 6 deletions(-)

Index: 2.6.24-rc4/drivers/block/xen-blkfront.c
===
--- 2.6.24-rc4.orig/drivers/block/xen-blkfront.c
+++ 2.6.24-rc4/drivers/block/xen-blkfront.c
@@ -452,7 +452,7 @@ static irqreturn_t blkif_interrupt(int i
RING_IDX i, rp;
unsigned long flags;
struct blkfront_info *info = (struct blkfront_info *)dev_id;
-   int uptodate;
+   int error;
 
spin_lock_irqsave(blkif_io_lock, flags);
 
@@ -477,13 +477,13 @@ static irqreturn_t blkif_interrupt(int i
 
add_id_to_freelist(info, id);
 
-   uptodate = (bret-status == BLKIF_RSP_OKAY);
+   error = (bret-status == BLKIF_RSP_OKAY) ? 0 : -EIO;
switch (bret-operation) {
case BLKIF_OP_WRITE_BARRIER:
if (unlikely(bret-status == BLKIF_RSP_EOPNOTSUPP)) {
printk(KERN_WARNING blkfront: %s: write 
barrier op failed\n,
   info-gd-disk_name);
-   uptodate = -EOPNOTSUPP;
+   error = -EOPNOTSUPP;
info-feature_barrier = 0;
xlvbd_barrier(info);
}
@@ -494,10 +494,8 @@ static irqreturn_t blkif_interrupt(int i
dev_dbg(info-xbdev-dev, Bad return from 
blkdev data 
request: %x\n, bret-status);
 
-   ret = end_that_request_first(req, uptodate,
-   req-hard_nr_sectors);
+   ret = __blk_end_request(req, error, blk_rq_bytes(req));
BUG_ON(ret);
-   end_that_request_last(req, uptodate);
break;
default:
BUG();
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 15/30] blk_end_request: changing viocd (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts viocd to use blk_end_request interfaces.
Related 'uptodate' arguments are converted to 'error'.

As a result, the interface of internal function, viocd_end_request(),
is changed.

Cc: Stephen Rothwell [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/cdrom/viocd.c |   15 ++-
 1 files changed, 6 insertions(+), 9 deletions(-)

Index: 2.6.24-rc4/drivers/cdrom/viocd.c
===
--- 2.6.24-rc4.orig/drivers/cdrom/viocd.c
+++ 2.6.24-rc4/drivers/cdrom/viocd.c
@@ -289,7 +289,7 @@ static int send_request(struct request *
return 0;
 }
 
-static void viocd_end_request(struct request *req, int uptodate)
+static void viocd_end_request(struct request *req, int error)
 {
int nsectors = req-hard_nr_sectors;
 
@@ -302,11 +302,8 @@ static void viocd_end_request(struct req
if (!nsectors)
nsectors = 1;
 
-   if (end_that_request_first(req, uptodate, nsectors))
+   if (__blk_end_request(req, error, nsectors  9))
BUG();
-   add_disk_randomness(req-rq_disk);
-   blkdev_dequeue_request(req);
-   end_that_request_last(req, uptodate);
 }
 
 static int rwreq;
@@ -317,11 +314,11 @@ static void do_viocd_request(struct requ
 
while ((rwreq == 0)  ((req = elv_next_request(q)) != NULL)) {
if (!blk_fs_request(req))
-   viocd_end_request(req, 0);
+   viocd_end_request(req, -EIO);
else if (send_request(req)  0) {
printk(VIOCD_KERN_WARNING
unable to send message to OS/400!);
-   viocd_end_request(req, 0);
+   viocd_end_request(req, -EIO);
} else
rwreq++;
}
@@ -532,9 +529,9 @@ return_complete:
with rc %d:0x%04X: %s\n,
req, event-xRc,
bevent-sub_result, err-msg);
-   viocd_end_request(req, 0);
+   viocd_end_request(req, -EIO);
} else
-   viocd_end_request(req, 1);
+   viocd_end_request(req, 0);
 
/* restart handling of incoming requests */
spin_unlock_irqrestore(viocd_reqlock, flags);
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 18/30] blk_end_request: changing s390 (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts s390 to use blk_end_request interfaces.
Related 'uptodate' arguments are converted to 'error'.

As a result, the interfaces of internal functions below are changed:
  o dasd_end_request
  o tapeblock_end_request

Cc: Martin Schwidefsky [EMAIL PROTECTED]
Cc: Heiko Carstens [EMAIL PROTECTED]
Cc: [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/s390/block/dasd.c  |   19 ++-
 drivers/s390/char/tape_block.c |   13 ++---
 2 files changed, 16 insertions(+), 16 deletions(-)

Index: 2.6.24-rc4/drivers/s390/block/dasd.c
===
--- 2.6.24-rc4.orig/drivers/s390/block/dasd.c
+++ 2.6.24-rc4/drivers/s390/block/dasd.c
@@ -1078,12 +1078,10 @@ dasd_int_handler(struct ccw_device *cdev
  * posts the buffer_cache about a finalized request
  */
 static inline void
-dasd_end_request(struct request *req, int uptodate)
+dasd_end_request(struct request *req, int error)
 {
-   if (end_that_request_first(req, uptodate, req-hard_nr_sectors))
+   if (__blk_end_request(req, error, blk_rq_bytes(req)))
BUG();
-   add_disk_randomness(req-rq_disk);
-   end_that_request_last(req, uptodate);
 }
 
 /*
@@ -1170,13 +1168,16 @@ dasd_end_request_cb(struct dasd_ccw_req 
struct request *req;
struct dasd_device *device;
int status;
+   int error = 0;
 
req = (struct request *) data;
device = cqr-device;
dasd_profile_end(device, cqr, req);
status = cqr-device-discipline-free_cp(cqr,req);
+   if (status = 0)
+   error = status ? status : -EIO;
spin_lock_irq(device-request_queue_lock);
-   dasd_end_request(req, status);
+   dasd_end_request(req, error);
spin_unlock_irq(device-request_queue_lock);
 }
 
@@ -1223,12 +1224,12 @@ __dasd_process_blk_queue(struct dasd_dev
  Rejecting write request %p,
  req);
blkdev_dequeue_request(req);
-   dasd_end_request(req, 0);
+   dasd_end_request(req, -EIO);
continue;
}
if (device-stopped  DASD_STOPPED_DC_EIO) {
blkdev_dequeue_request(req);
-   dasd_end_request(req, 0);
+   dasd_end_request(req, -EIO);
continue;
}
cqr = device-discipline-build_cp(device, req);
@@ -1253,7 +1254,7 @@ __dasd_process_blk_queue(struct dasd_dev
  on request %p,
  PTR_ERR(cqr), req);
blkdev_dequeue_request(req);
-   dasd_end_request(req, 0);
+   dasd_end_request(req, -EIO);
continue;
}
cqr-callback = dasd_end_request_cb;
@@ -1821,7 +1822,7 @@ dasd_flush_request_queue(struct dasd_dev
spin_lock_irq(device-request_queue_lock);
while ((req = elv_next_request(device-request_queue))) {
blkdev_dequeue_request(req);
-   dasd_end_request(req, 0);
+   dasd_end_request(req, -EIO);
}
spin_unlock_irq(device-request_queue_lock);
 }
Index: 2.6.24-rc4/drivers/s390/char/tape_block.c
===
--- 2.6.24-rc4.orig/drivers/s390/char/tape_block.c
+++ 2.6.24-rc4/drivers/s390/char/tape_block.c
@@ -74,11 +74,10 @@ tapeblock_trigger_requeue(struct tape_de
  * Post finished request.
  */
 static void
-tapeblock_end_request(struct request *req, int uptodate)
+tapeblock_end_request(struct request *req, int error)
 {
-   if (end_that_request_first(req, uptodate, req-hard_nr_sectors))
+   if (__blk_end_request(req, error, blk_rq_bytes(req)))
BUG();
-   end_that_request_last(req, uptodate);
 }
 
 static void
@@ -91,7 +90,7 @@ __tapeblock_end_request(struct tape_requ
 
device = ccw_req-device;
req = (struct request *) data;
-   tapeblock_end_request(req, ccw_req-rc == 0);
+   tapeblock_end_request(req, (ccw_req-rc == 0) ? 0 : -EIO);
if (ccw_req-rc == 0)
/* Update position. */
device-blk_data.block_position =
@@ -119,7 +118,7 @@ tapeblock_start_request(struct tape_devi
ccw_req = device-discipline-bread(device, req);
if (IS_ERR(ccw_req)) {
DBF_EVENT(1, TBLOCK: bread failed\n);
-   tapeblock_end_request(req, 0);
+   tapeblock_end_request(req, -EIO);
return PTR_ERR(ccw_req);
}
ccw_req-callback = __tapeblock_end_request;
@@ -132,7 +131,7 @@ tapeblock_start_request(struct tape_devi
 * Start/enqueueing failed. No retries in
 * this case

[PATCH 19/30] blk_end_request: changing ide-scsi (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts ide-scsi to use blk_end_request interfaces.
Related 'uptodate' arguments are converted to 'error'.

Cc: Bartlomiej Zolnierkiewicz [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/scsi/ide-scsi.c |8 
 1 files changed, 4 insertions(+), 4 deletions(-)

Index: 2.6.24-rc4/drivers/scsi/ide-scsi.c
===
--- 2.6.24-rc4.orig/drivers/scsi/ide-scsi.c
+++ 2.6.24-rc4/drivers/scsi/ide-scsi.c
@@ -921,8 +921,8 @@ static int idescsi_eh_reset (struct scsi
}
 
/* kill current request */
-   blkdev_dequeue_request(req);
-   end_that_request_last(req, 0);
+   if (__blk_end_request(req, -EIO, 0))
+   BUG();
if (blk_sense_request(req))
kfree(scsi-pc-buffer);
kfree(scsi-pc);
@@ -931,8 +931,8 @@ static int idescsi_eh_reset (struct scsi
 
/* now nuke the drive queue */
while ((req = elv_next_request(drive-queue))) {
-   blkdev_dequeue_request(req);
-   end_that_request_last(req, 0);
+   if (__blk_end_request(req, -EIO, 0))
+   BUG();
}
 
HWGROUP(drive)-rq = NULL;
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 21/30] blk_end_request: changing cciss (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts cciss to use blk_end_request interfaces.
Related 'uptodate' arguments are converted to 'error'.

cciss is a little bit different from normal drivers.
cciss directly calls bio_endio() and disk_stat_add()
when completing request.  But those can be replaced with
__end_that_request_first().
After the replacement, request completion procedures of
those drivers become like the following:
o end_that_request_first()
o add_disk_randomness()
o end_that_request_last()
This can be converted to blk_end_request() by following
the rule (a) mentioned in the patch subject
[PATCH 01/30] blk_end_request: add new request completion interface.

Cc: Mike Miller [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/cciss.c |   25 +++--
 1 files changed, 3 insertions(+), 22 deletions(-)

Index: 2.6.24-rc4/drivers/block/cciss.c
===
--- 2.6.24-rc4.orig/drivers/block/cciss.c
+++ 2.6.24-rc4/drivers/block/cciss.c
@@ -1187,17 +1187,6 @@ static int cciss_ioctl(struct inode *ino
}
 }
 
-static inline void complete_buffers(struct bio *bio, int status)
-{
-   while (bio) {
-   struct bio *xbh = bio-bi_next;
-
-   bio-bi_next = NULL;
-   bio_endio(bio, status ? 0 : -EIO);
-   bio = xbh;
-   }
-}
-
 static void cciss_check_queues(ctlr_info_t *h)
 {
int start_queue = h-next_to_run;
@@ -1263,21 +1252,14 @@ static void cciss_softirq_done(struct re
pci_unmap_page(h-pdev, temp64.val, cmd-SG[i].Len, ddir);
}
 
-   complete_buffers(rq-bio, (rq-errors == 0));
-
-   if (blk_fs_request(rq)) {
-   const int rw = rq_data_dir(rq);
-
-   disk_stat_add(rq-rq_disk, sectors[rw], rq-nr_sectors);
-   }
-
 #ifdef CCISS_DEBUG
printk(Done with %p\n, rq);
 #endif /* CCISS_DEBUG */
 
-   add_disk_randomness(rq-rq_disk);
+   if (blk_end_request(rq, (rq-errors == 0) ? 0 : -EIO, blk_rq_bytes(rq)))
+   BUG();
+
spin_lock_irqsave(h-lock, flags);
-   end_that_request_last(rq, (rq-errors == 0));
cmd_free(h, cmd, 1);
cciss_check_queues(h);
spin_unlock_irqrestore(h-lock, flags);
@@ -2544,7 +2526,6 @@ after_error_processing:
}
cmd-rq-data_len = 0;
cmd-rq-completion_data = cmd;
-   blk_add_trace_rq(cmd-rq-q, cmd-rq, BLK_TA_COMPLETE);
blk_complete_request(cmd-rq);
 }
 
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 20/30] blk_end_request: changing xsysace (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts xsysace to use blk_end_request interfaces.
Related 'uptodate' arguments are converted to 'error'.

xsysace is a little bit different from normal drivers.
xsysace driver has a state machine in it.
It calls end_that_request_first() and end_that_request_last()
from different states. (ACE_FSM_STATE_REQ_TRANSFER and
ACE_FSM_STATE_REQ_COMPLETE, respectively.)

However, those states are consecutive and without any interruption
inbetween.
So we can just follow the standard conversion rule (b) mentioned in
the patch subject [PATCH 01/30] blk_end_request: add new request
completion interface.

Cc: Grant Likely [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/xsysace.c |5 +
 1 files changed, 1 insertion(+), 4 deletions(-)

Index: 2.6.24-rc4/drivers/block/xsysace.c
===
--- 2.6.24-rc4.orig/drivers/block/xsysace.c
+++ 2.6.24-rc4/drivers/block/xsysace.c
@@ -703,7 +703,7 @@ static void ace_fsm_dostate(struct ace_d
 
/* bio finished; is there another one? */
i = ace-req-current_nr_sectors;
-   if (end_that_request_first(ace-req, 1, i)) {
+   if (__blk_end_request(ace-req, 0, i)) {
/* dev_dbg(ace-dev, next block; h=%li c=%i\n,
 *  ace-req-hard_nr_sectors,
 *  ace-req-current_nr_sectors);
@@ -718,9 +718,6 @@ static void ace_fsm_dostate(struct ace_d
break;
 
case ACE_FSM_STATE_REQ_COMPLETE:
-   /* Complete the block request */
-   blkdev_dequeue_request(ace-req);
-   end_that_request_last(ace-req, 1);
ace-req = NULL;
 
/* Finished request; go to idle state */
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 23/30] blk_end_request: changing ide normal caller (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts normal parts of ide to use blk_end_request
interfaces.  Related 'uptodate' arguments are converted to 'error'.

The conversion of 'uptodate' to 'error' is done only for the internal
function, __ide_end_request().
ide_end_request() was not changed since it's exported and used
by many ide drivers.


With this patch, blkdev_dequeue_request() in __ide_end_request() is
moved to blk_end_request, since blk_end_request takes care of
dequeueing request like below:

if (!list_empty(rq-queuelist))
blkdev_dequeue_request(rq);

In the case of ide,
  o 'dequeue' variable of __ide_end_request() is 1 only when the request
is still linked to the queue (i.e. rq-queuelist is not empty)
  o 'dequeue' variable of __ide_end_request() is 0 only when the request
has already been removed from the queue (i.e. rq-queuelist is empty)
So blk_end_request can handle it correctly although ide always run
thought the code above.

Cc: Bartlomiej Zolnierkiewicz [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/ide/ide-cd.c |6 +++---
 drivers/ide/ide-io.c |   25 -
 2 files changed, 15 insertions(+), 16 deletions(-)

Index: 2.6.24-rc4/drivers/ide/ide-cd.c
===
--- 2.6.24-rc4.orig/drivers/ide/ide-cd.c
+++ 2.6.24-rc4/drivers/ide/ide-cd.c
@@ -655,9 +655,9 @@ static void cdrom_end_request (ide_drive
BUG();
} else {
spin_lock_irqsave(ide_lock, flags);
-   end_that_request_chunk(failed, 0,
-   failed-data_len);
-   end_that_request_last(failed, 0);
+   if (__blk_end_request(failed, -EIO,
+ failed-data_len))
+   BUG();
spin_unlock_irqrestore(ide_lock, flags);
}
} else
Index: 2.6.24-rc4/drivers/ide/ide-io.c
===
--- 2.6.24-rc4.orig/drivers/ide/ide-io.c
+++ 2.6.24-rc4/drivers/ide/ide-io.c
@@ -58,15 +58,19 @@ static int __ide_end_request(ide_drive_t
 int uptodate, unsigned int nr_bytes, int dequeue)
 {
int ret = 1;
+   int error = 0;
+
+   if (uptodate = 0)
+   error = uptodate ? uptodate : -EIO;
 
/*
 * if failfast is set on a request, override number of sectors and
 * complete the whole request right now
 */
-   if (blk_noretry_request(rq)  end_io_error(uptodate))
+   if (blk_noretry_request(rq)  error)
nr_bytes = rq-hard_nr_sectors  9;
 
-   if (!blk_fs_request(rq)  end_io_error(uptodate)  !rq-errors)
+   if (!blk_fs_request(rq)  error  !rq-errors)
rq-errors = -EIO;
 
/*
@@ -78,14 +82,9 @@ static int __ide_end_request(ide_drive_t
HWGROUP(drive)-hwif-ide_dma_on(drive);
}
 
-   if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
-   add_disk_randomness(rq-rq_disk);
-   if (dequeue) {
-   if (!list_empty(rq-queuelist))
-   blkdev_dequeue_request(rq);
+   if (!__blk_end_request(rq, error, nr_bytes)) {
+   if (dequeue)
HWGROUP(drive)-rq = NULL;
-   }
-   end_that_request_last(rq, uptodate);
ret = 0;
}
 
@@ -292,9 +291,9 @@ static void ide_complete_pm_request (ide
drive-blocked = 0;
blk_start_queue(drive-queue);
}
-   blkdev_dequeue_request(rq);
HWGROUP(drive)-rq = NULL;
-   end_that_request_last(rq, 1);
+   if (__blk_end_request(rq, 0, 0))
+   BUG();
spin_unlock_irqrestore(ide_lock, flags);
 }
 
@@ -391,10 +390,10 @@ void ide_end_drive_cmd (ide_drive_t *dri
}
 
spin_lock_irqsave(ide_lock, flags);
-   blkdev_dequeue_request(rq);
HWGROUP(drive)-rq = NULL;
rq-errors = err;
-   end_that_request_last(rq, !rq-errors);
+   if (__blk_end_request(rq, (rq-errors ? -EIO : 0), 0))
+   BUG();
spin_unlock_irqrestore(ide_lock, flags);
 }
 
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 24/30] blk_end_request: add callback feature (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch adds a variant of the interface, blk_end_request_callback(),
which has driver callback feature.

Drivers may need to do special works between end_that_request_first()
and end_that_request_last().
For such drivers, blk_end_request_callback() allows it to pass
a callback function which is called between end_that_request_first()
and end_that_request_last().

This interface is only for fallback of other blk_end_request interfaces.
Drivers should avoid their tricky behaviors and use other interfaces
as much as possible.

Currently, only one driver, ide-cd, needs this interface.
So this interface should/will be removed, after the driver removes
such tricky behaviors.

o ide-cd (cdrom_newpc_intr())
  In PIO mode, cdrom_newpc_intr() needs to defer end_that_request_last()
  until the device clears DRQ_STAT and raises an interrupt after
  end_that_request_first().
  So end_that_request_first() and end_that_request_last() are called
  separately in cdrom_newpc_intr().

  This means blk_end_request_callback() has to return without
  completing request even if no leftover in the request.
  To satisfy the requirement, callback function has return value
  so that drivers can tell blk_end_request_callback() to return
  without completing request.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/ll_rw_blk.c  |   72 -
 include/linux/blkdev.h |2 +
 2 files changed, 68 insertions(+), 6 deletions(-)

Index: 2.6.24-rc4/block/ll_rw_blk.c
===
--- 2.6.24-rc4.orig/block/ll_rw_blk.c
+++ 2.6.24-rc4/block/ll_rw_blk.c
@@ -3803,10 +3803,14 @@ static void complete_request(struct requ
 }
 
 /**
- * blk_end_request - Helper function for drivers to complete the request.
- * @rq:   the request being processed
- * @error:0 for success,  0 for error
- * @nr_bytes: number of bytes to complete
+ * blk_end_io - Generic end_io function to complete a request.
+ * @rq:   the request being processed
+ * @error:0 for success,  0 for error
+ * @nr_bytes: number of bytes to complete
+ * @drv_callback: function called between completion of bios in the request
+ *and completion of the request.
+ *If the callback returns non 0, this helper returns without
+ *completion of the request.
  *
  * Description:
  * Ends I/O on a number of bytes attached to @rq.
@@ -3814,9 +3818,10 @@ static void complete_request(struct requ
  *
  * Return:
  * 0 - we are done with this request
- * 1 - still buffers pending for this request
+ * 1 - this request is not freed yet, it still has pending buffers.
  **/
-int blk_end_request(struct request *rq, int error, int nr_bytes)
+static int blk_end_io(struct request *rq, int error, int nr_bytes,
+ int (drv_callback)(struct request *))
 {
struct request_queue *q = rq-q;
unsigned long flags = 0UL;
@@ -3833,6 +3838,10 @@ int blk_end_request(struct request *rq, 
return 1;
}
 
+   /* Special feature for tricky drivers */
+   if (drv_callback  drv_callback(rq))
+   return 1;
+
add_disk_randomness(rq-rq_disk);
 
spin_lock_irqsave(q-queue_lock, flags);
@@ -3841,6 +3850,25 @@ int blk_end_request(struct request *rq, 
 
return 0;
 }
+
+/**
+ * blk_end_request - Helper function for drivers to complete the request.
+ * @rq:   the request being processed
+ * @error:0 for success,  0 for error
+ * @nr_bytes: number of bytes to complete
+ *
+ * Description:
+ * Ends I/O on a number of bytes attached to @rq.
+ * If @rq has leftover, sets it up for the next range of segments.
+ *
+ * Return:
+ * 0 - we are done with this request
+ * 1 - still buffers pending for this request
+ **/
+int blk_end_request(struct request *rq, int error, int nr_bytes)
+{
+   return blk_end_io(rq, error, nr_bytes, NULL);
+}
 EXPORT_SYMBOL_GPL(blk_end_request);
 
 /**
@@ -3879,6 +3907,38 @@ int __blk_end_request(struct request *rq
 }
 EXPORT_SYMBOL_GPL(__blk_end_request);
 
+/**
+ * blk_end_request_callback - Special helper function for tricky drivers
+ * @rq:   the request being processed
+ * @error:0 for success,  0 for error
+ * @nr_bytes: number of bytes to complete
+ * @drv_callback: function called between completion of bios in the request
+ *and completion of the request.
+ *If the callback returns non 0, this helper returns without
+ *completion of the request.
+ *
+ * Description:
+ * Ends I/O on a number of bytes attached to @rq.
+ * If @rq has leftover, sets it up for the next range of segments.
+ *
+ * This special helper function is used only for existing tricky drivers.
+ * (e.g. cdrom_newpc_intr() of ide-cd)
+ * This interface will be removed when

[PATCH 22/30] blk_end_request: changing cpqarray (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts cpqarray to use blk_end_request interfaces.
Related 'ok' arguments are converted to 'error'.

cpqarray is a little bit different from normal drivers.
cpqarray directly calls bio_endio() and disk_stat_add()
when completing request.  But those can be replaced with
__end_that_request_first().
After the replacement, request completion procedures of
those drivers become like the following:
o end_that_request_first()
o add_disk_randomness()
o end_that_request_last()
This can be converted to __blk_end_request() by following
the rule (b) mentioned in the patch subject
[PATCH 01/30] blk_end_request: add new request completion interface.

Cc: Mike Miller [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/cpqarray.c |   36 +++-
 1 files changed, 7 insertions(+), 29 deletions(-)

Index: 2.6.24-rc4/drivers/block/cpqarray.c
===
--- 2.6.24-rc4.orig/drivers/block/cpqarray.c
+++ 2.6.24-rc4/drivers/block/cpqarray.c
@@ -167,7 +167,6 @@ static void start_io(ctlr_info_t *h);
 
 static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
 static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
-static inline void complete_buffers(struct bio *bio, int ok);
 static inline void complete_command(cmdlist_t *cmd, int timeout);
 
 static irqreturn_t do_ida_intr(int irq, void *dev_id);
@@ -980,26 +979,13 @@ static void start_io(ctlr_info_t *h)
}
 }
 
-static inline void complete_buffers(struct bio *bio, int ok)
-{
-   struct bio *xbh;
-
-   while (bio) {
-   xbh = bio-bi_next;
-   bio-bi_next = NULL;
-   
-   bio_endio(bio, ok ? 0 : -EIO);
-
-   bio = xbh;
-   }
-}
 /*
  * Mark all buffers that cmd was responsible for
  */
 static inline void complete_command(cmdlist_t *cmd, int timeout)
 {
struct request *rq = cmd-rq;
-   int ok=1;
+   int error = 0;
int i, ddir;
 
if (cmd-req.hdr.rcode  RCODE_NONFATAL 
@@ -1011,16 +997,17 @@ static inline void complete_command(cmdl
if (cmd-req.hdr.rcode  RCODE_FATAL) {
printk(KERN_WARNING Fatal error on ida/c%dd%d\n,
cmd-ctlr, cmd-hdr.unit);
-   ok = 0;
+   error = -EIO;
}
if (cmd-req.hdr.rcode  RCODE_INVREQ) {
printk(KERN_WARNING Invalid request on 
ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n,
cmd-ctlr, cmd-hdr.unit, cmd-req.hdr.cmd,
cmd-req.hdr.blk, cmd-req.hdr.blk_cnt,
cmd-req.hdr.sg_cnt, cmd-req.hdr.rcode);
-   ok = 0; 
+   error = -EIO;
}
-   if (timeout) ok = 0;
+   if (timeout)
+   error = -EIO;
/* unmap the DMA mapping for all the scatter gather elements */
if (cmd-req.hdr.cmd == IDA_READ)
ddir = PCI_DMA_FROMDEVICE;
@@ -1030,18 +1017,9 @@ static inline void complete_command(cmdl
 pci_unmap_page(hba[cmd-ctlr]-pci_dev, cmd-req.sg[i].addr,
cmd-req.sg[i].size, ddir);
 
-   complete_buffers(rq-bio, ok);
-
-   if (blk_fs_request(rq)) {
-   const int rw = rq_data_dir(rq);
-
-   disk_stat_add(rq-rq_disk, sectors[rw], rq-nr_sectors);
-   }
-
-   add_disk_randomness(rq-rq_disk);
-
DBGPX(printk(Done with %p\n, rq););
-   end_that_request_last(rq, ok ? 1 : -EIO);
+   if (__blk_end_request(rq, error, blk_rq_bytes(rq)))
+   BUG();
 }
 
 /*
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 25/30] blk_end_request: changing ide-cd (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts ide-cd (cdrom_newpc_intr()) to use blk_end_request
interfaces.  Related 'uptodate' arguments are converted to 'error'.

In PIO mode, ide-cd (cdrom_newpc_intr()) needs to defer
end_that_request_last() until the device clears DRQ_STAT and raises
an interrupt after end_that_request_first().
So blk_end_request() has to return without completing request
even if no leftover in the request.

ide-cd uses blk_end_request_callback() and a dummy callback function,
which just returns value '1', to tell blk_end_request_callback()
about that.

Cc: Bartlomiej Zolnierkiewicz [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/ide/ide-cd.c |   49 +++--
 1 files changed, 35 insertions(+), 14 deletions(-)

Index: 2.6.24-rc4/drivers/ide/ide-cd.c
===
--- 2.6.24-rc4.orig/drivers/ide/ide-cd.c
+++ 2.6.24-rc4/drivers/ide/ide-cd.c
@@ -1650,6 +1650,17 @@ static int cdrom_write_check_ireason(ide
return 1;
 }
 
+/*
+ * Called from blk_end_request_callback() after the data of the request
+ * is completed and before the request is completed.
+ * By returning value '1', blk_end_request_callback() returns immediately
+ * without completing the request.
+ */
+static int cdrom_newpc_intr_dummy_cb(struct request *rq)
+{
+   return 1;
+}
+
 typedef void (xfer_func_t)(ide_drive_t *, void *, u32);
 
 /*
@@ -1688,9 +1699,13 @@ static ide_startstop_t cdrom_newpc_intr(
return ide_error(drive, dma error, stat);
}
 
-   end_that_request_chunk(rq, 1, rq-data_len);
-   rq-data_len = 0;
-   goto end_request;
+   spin_lock_irqsave(ide_lock, flags);
+   if (__blk_end_request(rq, 0, rq-data_len))
+   BUG();
+   HWGROUP(drive)-rq = NULL;
+   spin_unlock_irqrestore(ide_lock, flags);
+
+   return ide_stopped;
}
 
/*
@@ -1708,8 +1723,15 @@ static ide_startstop_t cdrom_newpc_intr(
/*
 * If DRQ is clear, the command has completed.
 */
-   if ((stat  DRQ_STAT) == 0)
-   goto end_request;
+   if ((stat  DRQ_STAT) == 0) {
+   spin_lock_irqsave(ide_lock, flags);
+   if (__blk_end_request(rq, 0, 0))
+   BUG();
+   HWGROUP(drive)-rq = NULL;
+   spin_unlock_irqrestore(ide_lock, flags);
+
+   return ide_stopped;
+   }
 
/*
 * check which way to transfer data
@@ -1762,7 +1784,14 @@ static ide_startstop_t cdrom_newpc_intr(
rq-data_len -= blen;
 
if (rq-bio)
-   end_that_request_chunk(rq, 1, blen);
+   /*
+* The request can't be completed until DRQ is cleared.
+* So complete the data, but don't complete the request
+* using the dummy function for the callback feature
+* of blk_end_request_callback().
+*/
+   blk_end_request_callback(rq, 0, blen,
+cdrom_newpc_intr_dummy_cb);
else
rq-data += blen;
}
@@ -1783,14 +1812,6 @@ static ide_startstop_t cdrom_newpc_intr(
 
ide_set_handler(drive, cdrom_newpc_intr, rq-timeout, NULL);
return ide_started;
-
-end_request:
-   spin_lock_irqsave(ide_lock, flags);
-   blkdev_dequeue_request(rq);
-   end_that_request_last(rq, 1);
-   HWGROUP(drive)-rq = NULL;
-   spin_unlock_irqrestore(ide_lock, flags);
-   return ide_stopped;
 }
 
 static ide_startstop_t cdrom_write_intr(ide_drive_t *drive)
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 26/30] blk_end_request: add bidi completion interface (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch adds a variant of the interface, blk_end_bidi_request(),
which completes a bidi request.

Bidi request must be completed as a whole, both rq and rq-next_rq
at once.  So the interface has 2 arguments for completion size.

As for -end_io, only rq-end_io is called (rq-next_rq-end_io is not
called).  So if special completion handling is needed, the handler
must be set to rq-end_io.
And the handler must take care of freeing next_rq too, since
the interface doesn't care of it if rq-end_io is not NULL.

Cc: Boaz Harrosh [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/ll_rw_blk.c  |   40 +++-
 include/linux/blkdev.h |2 ++
 2 files changed, 37 insertions(+), 5 deletions(-)

Index: 2.6.24-rc4/block/ll_rw_blk.c
===
--- 2.6.24-rc4.orig/block/ll_rw_blk.c
+++ 2.6.24-rc4/block/ll_rw_blk.c
@@ -3799,6 +3799,9 @@ static void complete_request(struct requ
if (blk_queued_rq(rq))
blkdev_dequeue_request(rq);
 
+   if (blk_bidi_rq(rq)  !rq-end_io)
+   __blk_put_request(rq-next_rq-q, rq-next_rq);
+
end_that_request_last(rq, uptodate);
 }
 
@@ -3806,14 +3809,15 @@ static void complete_request(struct requ
  * blk_end_io - Generic end_io function to complete a request.
  * @rq:   the request being processed
  * @error:0 for success,  0 for error
- * @nr_bytes: number of bytes to complete
+ * @nr_bytes: number of bytes to complete @rq
+ * @bidi_bytes:   number of bytes to complete @rq-next_rq
  * @drv_callback: function called between completion of bios in the request
  *and completion of the request.
  *If the callback returns non 0, this helper returns without
  *completion of the request.
  *
  * Description:
- * Ends I/O on a number of bytes attached to @rq.
+ * Ends I/O on a number of bytes attached to @rq and @rq-next_rq.
  * If @rq has leftover, sets it up for the next range of segments.
  *
  * Return:
@@ -3821,7 +3825,7 @@ static void complete_request(struct requ
  * 1 - this request is not freed yet, it still has pending buffers.
  **/
 static int blk_end_io(struct request *rq, int error, int nr_bytes,
- int (drv_callback)(struct request *))
+ int bidi_bytes, int (drv_callback)(struct request *))
 {
struct request_queue *q = rq-q;
unsigned long flags = 0UL;
@@ -3836,6 +3840,11 @@ static int blk_end_io(struct request *rq
if (blk_fs_request(rq) || blk_pc_request(rq)) {
if (__end_that_request_first(rq, uptodate, nr_bytes))
return 1;
+
+   /* Bidi request must be completed as a whole */
+   if (blk_bidi_rq(rq) 
+   __end_that_request_first(rq-next_rq, uptodate, bidi_bytes))
+   return 1;
}
 
/* Special feature for tricky drivers */
@@ -3867,7 +3876,7 @@ static int blk_end_io(struct request *rq
  **/
 int blk_end_request(struct request *rq, int error, int nr_bytes)
 {
-   return blk_end_io(rq, error, nr_bytes, NULL);
+   return blk_end_io(rq, error, nr_bytes, 0, NULL);
 }
 EXPORT_SYMBOL_GPL(blk_end_request);
 
@@ -3908,6 +3917,27 @@ int __blk_end_request(struct request *rq
 EXPORT_SYMBOL_GPL(__blk_end_request);
 
 /**
+ * blk_end_bidi_request - Helper function for drivers to complete bidi request.
+ * @rq: the bidi request being processed
+ * @error:  0 for success,  0 for error
+ * @nr_bytes:   number of bytes to complete @rq
+ * @bidi_bytes: number of bytes to complete @rq-next_rq
+ *
+ * Description:
+ * Ends I/O on a number of bytes attached to @rq and @rq-next_rq.
+ *
+ * Return:
+ * 0 - we are done with this request
+ * 1 - still buffers pending for this request
+ **/
+int blk_end_bidi_request(struct request *rq, int error, int nr_bytes,
+int bidi_bytes)
+{
+   return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
+}
+EXPORT_SYMBOL_GPL(blk_end_bidi_request);
+
+/**
  * blk_end_request_callback - Special helper function for tricky drivers
  * @rq:   the request being processed
  * @error:0 for success,  0 for error
@@ -3935,7 +3965,7 @@ EXPORT_SYMBOL_GPL(__blk_end_request);
 int blk_end_request_callback(struct request *rq, int error, int nr_bytes,
 int (drv_callback)(struct request *))
 {
-   return blk_end_io(rq, error, nr_bytes, drv_callback);
+   return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
 }
 EXPORT_SYMBOL_GPL(blk_end_request_callback);
 
Index: 2.6.24-rc4/include/linux/blkdev.h
===
--- 2.6.24-rc4.orig/include/linux/blkdev.h
+++ 2.6.24-rc4/include/linux/blkdev.h
@@ -730,6 +730,8 @@ static inline void blk_run_address_space

[PATCH 27/30] blk_end_request: changing scsi (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts scsi mid-layer to use blk_end_request interfaces.
Related 'uptodate' arguments are converted to 'error'.

As a result, the interface of internal function, scsi_end_request(),
is changed.

Cc: James Bottomley [EMAIL PROTECTED]
Cc: Boaz Harrosh [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/scsi/scsi_lib.c |   31 +++
 1 files changed, 11 insertions(+), 20 deletions(-)

Index: 2.6.24-rc4/drivers/scsi/scsi_lib.c
===
--- 2.6.24-rc4.orig/drivers/scsi/scsi_lib.c
+++ 2.6.24-rc4/drivers/scsi/scsi_lib.c
@@ -632,7 +632,7 @@ void scsi_run_host_queues(struct Scsi_Ho
  * of upper level post-processing and scsi_io_completion).
  *
  * Arguments:   cmd - command that is complete.
- *  uptodate - 1 if I/O indicates success, = 0 for I/O error.
+ *  error- 0 if I/O indicates success,  0 for I/O error.
  *  bytes- number of bytes of completed I/O
  * requeue  - indicates whether we should requeue leftovers.
  *
@@ -647,26 +647,25 @@ void scsi_run_host_queues(struct Scsi_Ho
  * at some point during this call.
  * Notes:  If cmd was requeued, upon return it will be a stale pointer.
  */
-static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
+static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
  int bytes, int requeue)
 {
struct request_queue *q = cmd-device-request_queue;
struct request *req = cmd-request;
-   unsigned long flags;
 
/*
 * If there are blocks left over at the end, set up the command
 * to queue the remainder of them.
 */
-   if (end_that_request_chunk(req, uptodate, bytes)) {
+   if (blk_end_request(req, error, bytes)) {
int leftover = (req-hard_nr_sectors  9);
 
if (blk_pc_request(req))
leftover = req-data_len;
 
/* kill remainder if no retrys */
-   if (!uptodate  blk_noretry_request(req))
-   end_that_request_chunk(req, 0, leftover);
+   if (error  blk_noretry_request(req))
+   blk_end_request(req, error, leftover);
else {
if (requeue) {
/*
@@ -681,14 +680,6 @@ static struct scsi_cmnd *scsi_end_reques
}
}
 
-   add_disk_randomness(req-rq_disk);
-
-   spin_lock_irqsave(q-queue_lock, flags);
-   if (blk_rq_tagged(req))
-   blk_queue_end_tag(q, req);
-   end_that_request_last(req, uptodate);
-   spin_unlock_irqrestore(q-queue_lock, flags);
-
/*
 * This will goose the queue request function at the end, so we don't
 * need to worry about launching another command.
@@ -985,7 +976,7 @@ void scsi_io_completion(struct scsi_cmnd
 * are leftovers and there is some kind of error
 * (result != 0), retry the rest.
 */
-   if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
+   if (scsi_end_request(cmd, 0, good_bytes, result == 0) == NULL)
return;
 
/* good_bytes = 0, or (inclusive) there were leftovers and
@@ -999,7 +990,7 @@ void scsi_io_completion(struct scsi_cmnd
 * and quietly refuse further access.
 */
cmd-device-changed = 1;
-   scsi_end_request(cmd, 0, this_count, 1);
+   scsi_end_request(cmd, -EIO, this_count, 1);
return;
} else {
/* Must have been a power glitch, or a
@@ -1031,7 +1022,7 @@ void scsi_io_completion(struct scsi_cmnd
scsi_requeue_command(q, cmd);
return;
} else {
-   scsi_end_request(cmd, 0, this_count, 1);
+   scsi_end_request(cmd, -EIO, this_count, 1);
return;
}
break;
@@ -1059,7 +1050,7 @@ void scsi_io_completion(struct scsi_cmnd
 Device not ready,
 sshdr);
 
-   scsi_end_request(cmd, 0, this_count, 1);
+   scsi_end_request(cmd, -EIO, this_count, 1);
return;
case VOLUME_OVERFLOW:
if (!(req-cmd_flags  REQ_QUIET)) {
@@ -1069,7 +1060,7 @@ void scsi_io_completion(struct scsi_cmnd
scsi_print_sense(, cmd

[PATCH 28/30] blk_end_request: remove/unexport end_that_request_* (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch removes the following functions:
  o end_that_request_first()
  o end_that_request_chunk()
and stops exporting the functions below:
  o end_that_request_last()

Cc: Boaz Harrosh [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/ll_rw_blk.c  |   61 -
 include/linux/blkdev.h |   13 --
 2 files changed, 20 insertions(+), 54 deletions(-)

Index: 2.6.24-rc4/block/ll_rw_blk.c
===
--- 2.6.24-rc4.orig/block/ll_rw_blk.c
+++ 2.6.24-rc4/block/ll_rw_blk.c
@@ -3410,6 +3410,20 @@ static void blk_recalc_rq_sectors(struct
}
 }
 
+/**
+ * __end_that_request_first - end I/O on a request
+ * @req:  the request being processed
+ * @uptodate: 1 for success, 0 for I/O error,  0 for specific error
+ * @nr_bytes: number of bytes to complete
+ *
+ * Description:
+ * Ends I/O on a number of bytes attached to @req, and sets it up
+ * for the next range of segments (if any) in the cluster.
+ *
+ * Return:
+ * 0 - we are done with this request, call end_that_request_last()
+ * 1 - still buffers pending for this request
+ **/
 static int __end_that_request_first(struct request *req, int uptodate,
int nr_bytes)
 {
@@ -3526,49 +3540,6 @@ static int __end_that_request_first(stru
return 1;
 }
 
-/**
- * end_that_request_first - end I/O on a request
- * @req:  the request being processed
- * @uptodate: 1 for success, 0 for I/O error,  0 for specific error
- * @nr_sectors: number of sectors to end I/O on
- *
- * Description:
- * Ends I/O on a number of sectors attached to @req, and sets it up
- * for the next range of segments (if any) in the cluster.
- *
- * Return:
- * 0 - we are done with this request, call end_that_request_last()
- * 1 - still buffers pending for this request
- **/
-int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
-{
-   return __end_that_request_first(req, uptodate, nr_sectors  9);
-}
-
-EXPORT_SYMBOL(end_that_request_first);
-
-/**
- * end_that_request_chunk - end I/O on a request
- * @req:  the request being processed
- * @uptodate: 1 for success, 0 for I/O error,  0 for specific error
- * @nr_bytes: number of bytes to complete
- *
- * Description:
- * Ends I/O on a number of bytes attached to @req, and sets it up
- * for the next range of segments (if any). Like end_that_request_first(),
- * but deals with bytes instead of sectors.
- *
- * Return:
- * 0 - we are done with this request, call end_that_request_last()
- * 1 - still buffers pending for this request
- **/
-int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes)
-{
-   return __end_that_request_first(req, uptodate, nr_bytes);
-}
-
-EXPORT_SYMBOL(end_that_request_chunk);
-
 /*
  * splice the completion data to a local structure and hand off to
  * process_completion_queue() to complete the requests
@@ -3648,7 +3619,7 @@ EXPORT_SYMBOL(blk_complete_request);
 /*
  * queue lock must be held
  */
-void end_that_request_last(struct request *req, int uptodate)
+static void end_that_request_last(struct request *req, int uptodate)
 {
struct gendisk *disk = req-rq_disk;
int error;
@@ -3683,8 +3654,6 @@ void end_that_request_last(struct reques
__blk_put_request(req-q, req);
 }
 
-EXPORT_SYMBOL(end_that_request_last);
-
 static inline void __end_request(struct request *rq, int uptodate,
 unsigned int nr_bytes)
 {
Index: 2.6.24-rc4/include/linux/blkdev.h
===
--- 2.6.24-rc4.orig/include/linux/blkdev.h
+++ 2.6.24-rc4/include/linux/blkdev.h
@@ -720,21 +720,18 @@ static inline void blk_run_address_space
 }
 
 /*
- * end_request() and friends. Must be called with the request queue spinlock
- * acquired. All functions called within end_request() _must_be_ atomic.
+ * blk_end_request() and friends.
+ * __blk_end_request() and end_request() must be called with
+ * the request queue spinlock acquired.
  *
  * Several drivers define their own end_request and call
- * end_that_request_first() and end_that_request_last()
- * for parts of the original function. This prevents
- * code duplication in drivers.
+ * blk_end_request() for parts of the original function.
+ * This prevents code duplication in drivers.
  */
 extern int blk_end_request(struct request *rq, int error, int nr_bytes);
 extern int __blk_end_request(struct request *rq, int error, int nr_bytes);
 extern int blk_end_bidi_request(struct request *rq, int error, int nr_bytes,
int bidi_bytes);
-extern int end_that_request_first(struct request *, int, int);
-extern int end_that_request_chunk(struct request *, int, int);
-extern void end_that_request_last(struct request *, int);
 extern

[PATCH 30/30] blk_end_request: cleanup of request completion (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch merges complete_request() into end_that_request_last()
for cleanup.

complete_request() was introduced by earlier part of this patch-set,
not to break the existing users of end_that_request_last().

Since all users are converted to blk_end_request interfaces and
end_that_request_last() is no longer exported, the code can be
merged to end_that_request_last().

Cc: Boaz Harrosh [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/ll_rw_blk.c |   31 ++-
 1 files changed, 14 insertions(+), 17 deletions(-)

Index: 2.6.24-rc4/block/ll_rw_blk.c
===
--- 2.6.24-rc4.orig/block/ll_rw_blk.c
+++ 2.6.24-rc4/block/ll_rw_blk.c
@@ -3616,6 +3616,12 @@ static void end_that_request_last(struct
 {
struct gendisk *disk = req-rq_disk;
 
+   if (blk_rq_tagged(req))
+   blk_queue_end_tag(req-q, req);
+
+   if (blk_queued_rq(req))
+   blkdev_dequeue_request(req);
+
if (unlikely(laptop_mode)  blk_fs_request(req))
laptop_io_completion();
 
@@ -3633,10 +3639,15 @@ static void end_that_request_last(struct
disk_round_stats(disk);
disk-in_flight--;
}
+
if (req-end_io)
req-end_io(req, error);
-   else
+   else {
+   if (blk_bidi_rq(req))
+   __blk_put_request(req-next_rq-q, req-next_rq);
+
__blk_put_request(req-q, req);
+   }
 }
 
 static inline void __end_request(struct request *rq, int uptodate,
@@ -3737,20 +3748,6 @@ void end_request(struct request *req, in
 }
 EXPORT_SYMBOL(end_request);
 
-static void complete_request(struct request *rq, int error)
-{
-   if (blk_rq_tagged(rq))
-   blk_queue_end_tag(rq-q, rq);
-
-   if (blk_queued_rq(rq))
-   blkdev_dequeue_request(rq);
-
-   if (blk_bidi_rq(rq)  !rq-end_io)
-   __blk_put_request(rq-next_rq-q, rq-next_rq);
-
-   end_that_request_last(rq, error);
-}
-
 /**
  * blk_end_io - Generic end_io function to complete a request.
  * @rq:   the request being processed
@@ -3793,7 +3790,7 @@ static int blk_end_io(struct request *rq
add_disk_randomness(rq-rq_disk);
 
spin_lock_irqsave(q-queue_lock, flags);
-   complete_request(rq, error);
+   end_that_request_last(rq, error);
spin_unlock_irqrestore(q-queue_lock, flags);
 
return 0;
@@ -3841,7 +3838,7 @@ int __blk_end_request(struct request *rq
 
add_disk_randomness(rq-rq_disk);
 
-   complete_request(rq, error);
+   end_that_request_last(rq, error);
 
return 0;
 }
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 29/30] blk_end_request: cleanup 'uptodate' related code (take 4)

2007-12-11 Thread Kiyoshi Ueda
This patch converts 'uptodate' arguments of no longer exported
interfaces, end_that_request_first/last, to 'error', and removes
internal conversions for it in blk_end_request interfaces.

Also, this patch removes no longer needed end_io_error().

Cc: Boaz Harrosh [EMAIL PROTECTED]
Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/ll_rw_blk.c  |   56 +++--
 include/linux/blkdev.h |8 ---
 2 files changed, 9 insertions(+), 55 deletions(-)

Index: 2.6.24-rc4/block/ll_rw_blk.c
===
--- 2.6.24-rc4.orig/block/ll_rw_blk.c
+++ 2.6.24-rc4/block/ll_rw_blk.c
@@ -3413,7 +3413,7 @@ static void blk_recalc_rq_sectors(struct
 /**
  * __end_that_request_first - end I/O on a request
  * @req:  the request being processed
- * @uptodate: 1 for success, 0 for I/O error,  0 for specific error
+ * @error:0 for success,  0 for error
  * @nr_bytes: number of bytes to complete
  *
  * Description:
@@ -3424,29 +3424,22 @@ static void blk_recalc_rq_sectors(struct
  * 0 - we are done with this request, call end_that_request_last()
  * 1 - still buffers pending for this request
  **/
-static int __end_that_request_first(struct request *req, int uptodate,
+static int __end_that_request_first(struct request *req, int error,
int nr_bytes)
 {
-   int total_bytes, bio_nbytes, error, next_idx = 0;
+   int total_bytes, bio_nbytes, next_idx = 0;
struct bio *bio;
 
blk_add_trace_rq(req-q, req, BLK_TA_COMPLETE);
 
/*
-* extend uptodate bool to allow  0 value to be direct io error
-*/
-   error = 0;
-   if (end_io_error(uptodate))
-   error = !uptodate ? -EIO : uptodate;
-
-   /*
 * for a REQ_BLOCK_PC request, we want to carry any eventual
 * sense key with us all the way through
 */
if (!blk_pc_request(req))
req-errors = 0;
 
-   if (!uptodate) {
+   if (error) {
if (blk_fs_request(req)  !(req-cmd_flags  REQ_QUIET))
printk(end_request: I/O error, dev %s, sector %llu\n,
req-rq_disk ? req-rq_disk-disk_name : ?,
@@ -3619,17 +3612,9 @@ EXPORT_SYMBOL(blk_complete_request);
 /*
  * queue lock must be held
  */
-static void end_that_request_last(struct request *req, int uptodate)
+static void end_that_request_last(struct request *req, int error)
 {
struct gendisk *disk = req-rq_disk;
-   int error;
-
-   /*
-* extend uptodate bool to allow  0 value to be direct io error
-*/
-   error = 0;
-   if (end_io_error(uptodate))
-   error = !uptodate ? -EIO : uptodate;
 
if (unlikely(laptop_mode)  blk_fs_request(req))
laptop_io_completion();
@@ -3754,14 +3739,6 @@ EXPORT_SYMBOL(end_request);
 
 static void complete_request(struct request *rq, int error)
 {
-   /*
-* REMOVEME: This conversion is transitional and will be removed
-*   when old end_that_request_* are unexported.
-*/
-   int uptodate = 1;
-   if (error)
-   uptodate = (error == -EIO) ? 0 : error;
-
if (blk_rq_tagged(rq))
blk_queue_end_tag(rq-q, rq);
 
@@ -3771,7 +3748,7 @@ static void complete_request(struct requ
if (blk_bidi_rq(rq)  !rq-end_io)
__blk_put_request(rq-next_rq-q, rq-next_rq);
 
-   end_that_request_last(rq, uptodate);
+   end_that_request_last(rq, error);
 }
 
 /**
@@ -3798,21 +3775,14 @@ static int blk_end_io(struct request *rq
 {
struct request_queue *q = rq-q;
unsigned long flags = 0UL;
-   /*
-* REMOVEME: This conversion is transitional and will be removed
-*   when old end_that_request_* are unexported.
-*/
-   int uptodate = 1;
-   if (error)
-   uptodate = (error == -EIO) ? 0 : error;
 
if (blk_fs_request(rq) || blk_pc_request(rq)) {
-   if (__end_that_request_first(rq, uptodate, nr_bytes))
+   if (__end_that_request_first(rq, error, nr_bytes))
return 1;
 
/* Bidi request must be completed as a whole */
if (blk_bidi_rq(rq) 
-   __end_that_request_first(rq-next_rq, uptodate, bidi_bytes))
+   __end_that_request_first(rq-next_rq, error, bidi_bytes))
return 1;
}
 
@@ -3864,16 +3834,8 @@ EXPORT_SYMBOL_GPL(blk_end_request);
  **/
 int __blk_end_request(struct request *rq, int error, int nr_bytes)
 {
-   /*
-* REMOVEME: This conversion is transitional and will be removed
-*   when old end_that_request_* are unexported.
-*/
-   int uptodate = 1;
-   if (error)
-   uptodate = (error == -EIO) ? 0 : error

Re: [PATCH 27/28] blk_end_request: changing scsi mid-layer for bidi (take 3)

2007-12-06 Thread Kiyoshi Ueda
;
  }
   
  /* Special feature for tricky drivers */

 No I don't like it. The only client left for blk_end_request_callback()
 is bidi,

ide-cd (cdrom_newpc_intr) is another client.
So I can't drop blk_end_request_callback() even if bidi doesn't use it.

 and for bidi we can do a much simpler thing. listed below 
 are some possible solutions.
 
 1. Take extra parm to blk_end_request like
 int blk_end_request(struct request *rq, int error, int nr_bytes, int 
 bidi_bytes)
 
 if the bidi_bytes is not zero than req-next_req is freed like above.

OK, I agree with the interface basically.
I didn't like to have the 'bidi_bytes', since I considered it was
a driver specific argument and I prefer to avoid driver specific
interface like blk_end_request_callback() as much as possible.
However, it can be considered as block-layer generic, since next_rq is
a member of struct request (and other drivers might have bidi support
although scsi is only one user right now).

So I made a tentative patch below, which adds a variant,
blk_end_bidi_request().
Please take a look.


 And please reconsider that double implementation. (triple above, but I hope
 now I have convinced you to drop one). Jense please ?

I have a plan to remove that duplication using the internal-with-flags
implementation something like the patch (blk_end_io) below.

Jens,
If you agree with passing a 'needlock' flag, which is used to decide
whether to get queue's lock, I can remove the last duplication of
__blk_end_request() too.  Do you still disagree?

Thanks,
Kiyoshi Ueda


Index: 2.6.24-rc3-mm2/drivers/scsi/scsi_lib.c
===
--- 2.6.24-rc3-mm2.orig/drivers/scsi/scsi_lib.c
+++ 2.6.24-rc3-mm2/drivers/scsi/scsi_lib.c
@@ -629,28 +629,6 @@ void scsi_run_host_queues(struct Scsi_Ho
scsi_run_queue(sdev-request_queue);
 }
 
-static void scsi_finalize_request(struct scsi_cmnd *cmd, int uptodate)
-{
-   struct request_queue *q = cmd-device-request_queue;
-   struct request *req = cmd-request;
-   unsigned long flags;
-
-   add_disk_randomness(req-rq_disk);
-
-   spin_lock_irqsave(q-queue_lock, flags);
-   if (blk_rq_tagged(req))
-   blk_queue_end_tag(q, req);
-
-   end_that_request_last(req, uptodate);
-   spin_unlock_irqrestore(q-queue_lock, flags);
-
-   /*
-* This will goose the queue request function at the end, so we don't
-* need to worry about launching another command.
-*/
-   scsi_next_command(cmd);
-}
-
 /*
  * Function:scsi_end_request()
  *
@@ -930,23 +908,25 @@ EXPORT_SYMBOL(scsi_release_buffers);
 void scsi_end_bidi_request(struct scsi_cmnd *cmd)
 {
struct request *req = cmd-request;
+   unsigned int dlen = req-data_len;
+   unsigned int next_dlen = req-next_rq-data_len;
 
-   end_that_request_chunk(req, 1, req-data_len);
req-data_len = scsi_out(cmd)-resid;
-
-   end_that_request_chunk(req-next_rq, 1, req-next_rq-data_len);
req-next_rq-data_len = scsi_in(cmd)-resid;
 
-   scsi_release_buffers(cmd);
-
/*
 *FIXME: If ll_rw_blk.c is changed to also put_request(req-next_rq)
-*   in end_that_request_last() then this WARN_ON must be removed.
+*   in blk_end_bidi_request() then this WARN_ON must be removed.
 *   for now, upper-driver must have registered an end_io.
 */
WARN_ON(!req-end_io);
 
-   scsi_finalize_request(cmd, 1);
+   if (blk_end_bidi_request(req, 1, dlen, next_dlen))
+   /* req has not been completed */
+   BUG();
+
+   scsi_release_buffers(cmd);
+   scsi_next_command(cmd);
 }
 
 /*
Index: 2.6.24-rc3-mm2/block/ll_rw_blk.c
===
--- 2.6.24-rc3-mm2.orig/block/ll_rw_blk.c
+++ 2.6.24-rc3-mm2/block/ll_rw_blk.c
@@ -3792,24 +3792,17 @@ static void complete_request(struct requ
if (!list_empty(rq-queuelist))
blkdev_dequeue_request(rq);
 
+   if (blk_bidi_rq(rq)  !rq-end_io)
+   __blk_put_request(rq-next_rq-q, rq-next_rq);
+
end_that_request_last(rq, uptodate);
 }
 
-/**
- * blk_end_request - Helper function for drivers to complete the request.
- * @rq:   the request being processed
- * @uptodate: 1 for success, 0 for I/O error,  0 for specific error
- * @nr_bytes: number of bytes to complete
- *
- * Description:
- * Ends I/O on a number of bytes attached to @rq.
- * If @rq has leftover, sets it up for the next range of segments.
- *
- * Return:
- * 0 - we are done with this request
- * 1 - still buffers pending for this request
- **/
-int blk_end_request(struct request *rq, int uptodate, int nr_bytes)
+/*
+ * Internal function
+ */
+static int blk_end_io(struct request *rq, int uptodate, int nr_bytes,
+ int bidi_bytes, int (drv_callback)(struct request *))
 {
struct request_queue *q = rq-q

Re: [PATCH 27/28] blk_end_request: changing scsi mid-layer for bidi (take 3)

2007-12-05 Thread Kiyoshi Ueda
Hi Boaz,

On Tue, 04 Dec 2007 15:39:12 +0200, Boaz Harrosh [EMAIL PROTECTED] wrote:
 On Sat, Dec 01 2007 at 1:35 +0200, Kiyoshi Ueda [EMAIL PROTECTED] wrote:
  This patch converts bidi of scsi mid-layer to use blk_end_request().
  
  rq-next_rq represents a pair of bidi requests.
  (There are no other use of 'next_rq' of struct request.)
  For both requests in the pair, end_that_request_chunk() should be
  called before end_that_request_last() is called for one of them.
  Since the calls to end_that_request_first()/chunk() and
  end_that_request_last() are packaged into blk_end_request(),
  the handling of next_rq completion has to be moved into
  blk_end_request(), too.
  
  Bidi sets its specific value to rq-data_len before the request is
  completed so that upper-layer can read it.
  This setting must be between end_that_request_chunk() and
  end_that_request_last(), because rq-data_len may be used
  in end_that_request_chunk() by blk_trace and so on.
  To satisfy the requirement, use blk_end_request_callback() which
  is added in PATCH 25 only for the tricky drivers.
  
  If bidi didn't reuse rq-data_len and added new members to request
  for the specific value, it could set before end_that_request_chunk()
  and use the standard blk_end_request() like below.
  
  void scsi_end_bidi_request(struct scsi_cmnd *cmd)
  {
  struct request *req = cmd-request;
  
  rq-resid = scsi_out(cmd)-resid;
  rq-next_rq-resid = scsi_in(cmd)-resid;
  
  if (blk_end_request(req, 1, req-data_len))
  BUG();
  
  scsi_release_buffers(cmd);
  scsi_next_command(cmd);
  }
...
snip
...

 rq-data_len = scsi_out(cmd)-resid is Not Just a problem of bidi
 it is a General problem of scsi residual handling, and user code.
 
 Even today before any bidi. at scsi_lib.c at scsi_io_completion()
 we do req-data_len = scsi_get_resid(cmd);
 ( or: req-data_len = cmd-resid; depends which version you look)
 And then call scsi_end_request() which calls __end_that_request_first/last
 So it is assumed even today that req-data_len is not touched by
 __end_that_request_first/last unless __end_that_request_first returned
 that there is more work to do and the command is resubmitted in which
 case the resid information is discarded.
 
 So if the regular resid handling is acceptable - Set req-data_len
 before the call to __end_that_request_first/last, or blk_end_request()
 in your case, then here goes your second client of the _callback and
 it can be removed.
 But if it is found that req-data_len is touched and the resid information
 gets lost, than it should be fixed for the common uni-io case, by - for 
 example
 - pass resid to the blk_end_request() function.
 (So in any way the _callback can go)

Thank you for the explanation of scsi's rq-data_len usage.
I see that scsi usually uses rq-data_len for cmd-resid.

I have investigated the possibility of setting data_len before
the call to blk_end_request.
But no matter whether data_len is touched or not, we need a callback
for bidi.  So I would like to go with the current patch.

I explained the reason and some details below.


As far as I can see, rq-data_len is just referenced
by blk_add_trace_rq() in __end_that_request_first(), not modified.
And I don't change any logic around there in the block-layer.
So there shouldn't be any critical problem for scsi residual handing.
(although I'm not sure that scsi expectes cmd-resid to be traced
 by blk_trace.)

Anyway, I see that it is no critical problem for bidi to set cmd-resid
to rq-data_len before blk_end_request() call.
But if I do that, blk_end_request() can't get the next_rq's size
to complete in its code below.

 + /* Bidi request must be completed as a whole */
 + if (blk_bidi_rq(rq) 
 + __end_that_request_first(rq-next_rq, uptodate,
 +  blk_rq_bytes(rq-next_rq)))
 + return 1;

So I will have to move next_rq completion to bidi and use _callback()
anyway like the following.
-
static int dummy_cb(struct request *rq)
{
return 1;
}

void scsi_end_bidi_request(struct scsi_cmnd *cmd)
{
struct request *req = cmd-request;
unsigned int dlen = req-data_len;
unsigned int next_dlen = req-next_rq-data_len;
 
req-data_len = scsi_out(cmd)-resid;
req-next_rq-data_len = scsi_in(cmd)-resid;
 
/* Complete only DATA of next_rq using _callback and dummy function */
if (!blk_end_request_callback(req-next_rq, 1, next_dlen, dummy_cb))
BUG();
 
if (blk_end_request(req, 1, dlen))
BUG();

scsi_release_buffers(cmd);
scsi_next_command(cmd);
}
-

I prefer the current patch rather than the code like above,
since the code calls blk_end_request twice and looks trickier.
So I'd like to leave

Re: [PATCH 24/28] blk_end_request: changing ide normal caller (take 3)

2007-12-04 Thread Kiyoshi Ueda
Hi Bartlomiej,

On Tue, 4 Dec 2007 14:47:00 +0100, Bartlomiej Zolnierkiewicz wrote:
   Hmmm, this seems to change the old behavior (the request should
   be dequeued from the queue only if 'dequeue' variable is set)
   and AFAIR some error handling code (in ide-cd?) depends on the
   old behavior so please revisit this patch.
  
  blk_end_request() takes care of the dequeue like below,
  so I think no problem.  (Please see PATCH 01)
  
   + /* rq-queuelist of dequeued request should be list_empty() */
   + if (!list_empty(rq-queuelist))
   + blkdev_dequeue_request(rq);
  
  In the case of ide-cd,
o 'dequeue' variable is 1 only when the request is still linked
  to the queue (i.e. rq-queuelist is not empty)
o 'dequeue' variable is 0 only when the request has already been
  removed from the queue (i.e. rq-queuelist is empty)
  So blk_end_request() can handle it correctly.
 
 It would be helpful to add the above explanation to a patch description.
 
  If there are any drivers which don't want dequeue the queued request,
  the code above would not work.
  But, as far as I investigated, I have never seen such a requirement
  in device drivers.
  
  Do you think that ide may still gets a problem for the 'dequeue'?
 
 Everything seems to be fine now.
 
 Acked-by: Bartlomiej Zolnierkiewicz [EMAIL PROTECTED]

Thank you for the check.
OK, I'll add the explanation about the 'dequeue' to patch description.

Thanks,
Kiyoshi Ueda
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 01/28] blk_end_request: add new request completion interface (take 3)

2007-12-04 Thread Kiyoshi Ueda
Hi Boaz,

On Tue, 04 Dec 2007 15:56:32 +0200, Boaz Harrosh [EMAIL PROTECTED] wrote:
  +int blk_end_request(struct request *rq, int uptodate, int nr_bytes)
  +{
  +   struct request_queue *q = rq-q;
  +   unsigned long flags = 0UL;
  +
  +   if (blk_fs_request(rq) || blk_pc_request(rq)) {
  +   if (__end_that_request_first(rq, uptodate, nr_bytes))
  +   return 1;
  +   }
  +
  +   add_disk_randomness(rq-rq_disk);
  +
  +   spin_lock_irqsave(q-queue_lock, flags);
  +   complete_request(rq, uptodate);
  +   spin_unlock_irqrestore(q-queue_lock, flags);
  +
  +   return 0;
  +}
  +EXPORT_SYMBOL_GPL(blk_end_request);
  +
  +/**
  + * __blk_end_request - Helper function for drivers to complete the request.
  + *
  + * Description:
  + * Must be called with queue lock held unlike blk_end_request().
  + **/
  +int __blk_end_request(struct request *rq, int uptodate, int nr_bytes)
  +{
  +   if (blk_fs_request(rq) || blk_pc_request(rq)) {
  +   if (__end_that_request_first(rq, uptodate, nr_bytes))
  +   return 1;
  +   }
  +
  +   add_disk_randomness(rq-rq_disk);
  +
  +   complete_request(rq, uptodate);
  +
  +   return 0;
  +}
  +EXPORT_SYMBOL_GPL(__blk_end_request);
 
 I don't like it that you have two Identical but slightly different
 implementations  I wish you would do an internal-with-flags
 implementation and then API ones can call the internal one. Or maybe
 just hold the spin_lock just a bit longer and have one call the other.
 To prove my case see how hard it is to add new code like with
 the bidi patch, where you need to add exact same code in 3 places.
 (OK only 2 places actually, if _callback is gone)

As for the internal-with-flags implementation, I once proposed
something like below but it was rejected by Jens.
(http://marc.info/?l=linux-kernelm=118880584720600w=2)
--
static int internal_function(rq, needlock)
{
end_that_request_chunk(rq);

if (needlock)
spin_lock_irqsave();
end_that_request_last(rq);
if (needlock)
spin_unlock_irqrestore();
}

int blk_end_request(rq)
{
return internal_function(rq, 1);
}

int __blk_end_request(rq)
{
return internal_function(rq, 0);
}
--


As for the holding-queue-lock-longer implementation,
end_that_request_chunk() completes bios in the request and it can
reaches filesystem layer and may take time.
I guess many drivers like scsi are calling end_that_request_chunk()
without queue's lock because of the reason above.

I'll try to remove the duplication again by another patch-set
after blk_end_request interfaces are merged.
So I would like to leave the duplication for now.
Is it acceptable for you?

Thanks,
Kiyoshi Ueda
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 01/28] blk_end_request: add new request completion interface (take 3)

2007-12-04 Thread Kiyoshi Ueda
Hi Boaz and Jens,

On Tue, 04 Dec 2007 15:56:32 +0200, Boaz Harrosh [EMAIL PROTECTED] wrote:
  +/**
  + * blk_end_request - Helper function for drivers to complete the request.
  + * @rq:   the request being processed
  + * @uptodate: 1 for success, 0 for I/O error,  0 for specific error
  + * @nr_bytes: number of bytes to complete
  + *
  + * Description:
  + * Ends I/O on a number of bytes attached to @rq.
  + * If @rq has leftover, sets it up for the next range of segments.
  + *
  + * Return:
  + * 0 - we are done with this request
  + * 1 - still buffers pending for this request
  + **/
  +int blk_end_request(struct request *rq, int uptodate, int nr_bytes)
 
 I always hated that uptodate boolean with possible negative error value.
 I guess it was done for backward compatibility of then users of 
 end_that_request_first(). But since you are introducing a new API then
 this is not the case. Just have regular status int where 0 means ALL_OK
 and negative value means error code. 
 Just my $0.02.

Thank you for the comment.
I think it's quite reasonable.
By doing that, we don't need end_io_error() anymore.


Jens,
What do you think?
If you agree with the interface change above, I would prefer to
separate the patch-set from blk_end_request patch-set like below:
o blk_end_request: remove end_that_request_*
o change interface of 'uptodate' in blk_end_request to 'error'
It makes the purpose of blk_end_request patch-set clear
(and also, each patch of device drivers could be smaller).
But, it doubles your merging work.  So if you would like to get
the changes at once, I'll merge them into blk_end_request patch-set.
 
As for the patch inclusion, do you push the driver changes to Linus
all at once?  Or should I ask each maintainer to take the patch?

Thanks,
Kiyoshi Ueda
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 26/28] blk_end_request: changing ide-cd (take 3)

2007-12-03 Thread Kiyoshi Ueda
Hi Bartlomiej,

On Sat, 1 Dec 2007 23:42:51 +0100, Bartlomiej Zolnierkiewicz [EMAIL 
PROTECTED] wrote:
 On Saturday 01 December 2007, Kiyoshi Ueda wrote:
  This patch converts ide-cd (cdrom_newpc_intr()) to use blk_end_request().
  
  ide-cd (cdrom_newpc_intr()) has some tricky behaviors below which
  need to use blk_end_request_callback().
  Needs to:
1. call post_transform_command() to modify request contents
 
 Seems like post_transform_command() call can be removed (patch below).
 
2. wait completing request until DRQ_STAT is cleared
 
 Would be great if somebody convert cdrom_newpc_intr() to use scatterlists
 also for PIO transfers (ide_pio_sector() in ide-taskfile.c should serve
 as a good starting base to see how to do PIO transfers using scatterlists)
 so we could get rid of partial request completions in cdrom_newpc_intr()
 and just fully complete request when the transfer is done.  Shouldn't be
 difficult but I guess that we can live with blk_end_request_callback() for
 the time being...
 
  after end_that_request_first() and before end_that_request_last().
  
  As for the second one, ide-cd will wait for the interrupt from device.
  So blk_end_request_callback() has to return without completing request
  even if no leftover in the request.
  ide-cd uses a dummy callback function, which just returns value '1',
  to tell blk_end_request_callback() about that.
  
  Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
  Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
 
 [PATCH] ide-cd: remove dead post_transform_command()
 
 post_transform_command() call in cdrom_newpc_intr() has no effect because
 it is done after the request has already been fully completed (rq-bio and
 rq-data are always NULL).  It was verified to be true regardless whether
 INQUIRY command is using DMA or PIO to transfer data (by using modified
 Tejun Heo's test-shortsg.c utility and adding a few printk()-s to ide-cd).
 
 This was uncovered thanks to the blk_end_request: full I/O completion
 handler (take 3) patch series from Kiyoshi Ueda.
 
 Cc: [EMAIL PROTECTED]
 Cc: [EMAIL PROTECTED]
 Cc: Kiyoshi Ueda [EMAIL PROTECTED]
 Cc: Jun'ichi Nomura [EMAIL PROTECTED]
 Cc: Tejun Heo [EMAIL PROTECTED]
 Signed-off-by: Bartlomiej Zolnierkiewicz [EMAIL PROTECTED]
 ---
 Kiyoshi: please rebase your patch on top of this one (I'll send
 it to Linus in the next IDE update), should make your patch a bit
 simpler.
 
 Tejun: you had really good timing with posting test-shortsg.c
 (it saved me some time coding user-space SG_IO tester), thanks!
 
  drivers/ide/ide-cd.c |   28 
  1 file changed, 28 deletions(-)
 
 Index: b/drivers/ide/ide-cd.c
 ===
 --- a/drivers/ide/ide-cd.c
 +++ b/drivers/ide/ide-cd.c
 @@ -1650,31 +1650,6 @@ static int cdrom_write_check_ireason(ide
   return 1;
  }
  
 -static void post_transform_command(struct request *req)
 -{
 - u8 *c = req-cmd;
 - char *ibuf;
 -
 - if (!blk_pc_request(req))
 - return;
 -
 - if (req-bio)
 - ibuf = bio_data(req-bio);
 - else
 - ibuf = req-data;
 -
 - if (!ibuf)
 - return;
 -
 - /*
 -  * set ansi-revision and response data as atapi
 -  */
 - if (c[0] == GPCMD_INQUIRY) {
 - ibuf[2] |= 2;
 - ibuf[3] = (ibuf[3]  0xf0) | 2;
 - }
 -}
 -
  typedef void (xfer_func_t)(ide_drive_t *, void *, u32);
  
  /*
 @@ -1810,9 +1785,6 @@ static ide_startstop_t cdrom_newpc_intr(
   return ide_started;
  
  end_request:
 - if (!rq-data_len)
 - post_transform_command(rq);
 -
   spin_lock_irqsave(ide_lock, flags);
   blkdev_dequeue_request(rq);
   end_that_request_last(rq, 1);

Thank you for the comments.
I rebased my patch on top of 2.6.24-rc3-mm2 + the patch to remove
post_transform_command().

As a result, one callback function for DMA mode has been removed.
What do you think about the patch below?



Subject: [PATCH 26/28] blk_end_request: changing ide-cd (take 3)


This patch converts ide-cd (cdrom_newpc_intr()) to use blk_end_request
interfaces.

In PIO mode, ide-cd (cdrom_newpc_intr()) needs to defer
end_that_request_last() until the device clears DRQ_STAT and raises
an interrupt after end_that_request_first().
So blk_end_request() has to return without completing request
even if no leftover in the request.

ide-cd uses blk_end_request_callback() and a dummy callback function,
which just returns value '1', to tell blk_end_request_callback()
about that.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/ide/ide-cd.c |   49 +++--
 1 files changed, 35 insertions(+), 14 deletions(-)

Index: 2.6.24-rc3-mm2/drivers/ide/ide-cd.c
===
--- 2.6.24-rc3-mm2.orig/drivers/ide/ide-cd.c
+++ 2.6.24-rc3-mm2/drivers/ide/ide-cd.c

Re: [PATCH 09/28] blk_end_request: changing ps3disk (take 3)

2007-12-03 Thread Kiyoshi Ueda
Hi Geert,

On Sun, 2 Dec 2007 10:34:56 +0100 (CET), Geert Uytterhoeven [EMAIL PROTECTED] 
wrote:
 On Fri, 30 Nov 2007, Kiyoshi Ueda wrote:
  This patch converts ps3disk to use blk_end_request().
  ^^^
 Patch subject and description are inconsistent with actual change.
 
  Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
  Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
  ---
   drivers/block/ps3disk.c |6 +-
   1 files changed, 1 insertion(+), 5 deletions(-)
  
  Index: 2.6.24-rc3-mm2/drivers/block/ps3disk.c
  ===
  --- 2.6.24-rc3-mm2.orig/drivers/block/ps3disk.c
  +++ 2.6.24-rc3-mm2/drivers/block/ps3disk.c
  @@ -280,11 +280,7 @@ static irqreturn_t ps3disk_interrupt(int
  }
   
  spin_lock(priv-lock);
  -   if (!end_that_request_first(req, uptodate, num_sectors)) {
  -   add_disk_randomness(req-rq_disk);
  -   blkdev_dequeue_request(req);
  -   end_that_request_last(req, uptodate);
  -   }
  +   __blk_end_request(req, uptodate, num_sectors  9);
   ^

Thank you for the comment.
The description meant the blk_end_request family, not actual function,
blk_end_request().  But as you pointed out, it is misleading.
I'll change the description of all related patches.

Thanks,
Kiyoshi Ueda
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 24/28] blk_end_request: changing ide normal caller (take 3)

2007-12-03 Thread Kiyoshi Ueda
Hi Bartlomiej,

On Sat, 1 Dec 2007 23:53:05 +0100, Bartlomiej Zolnierkiewicz [EMAIL 
PROTECTED] wrote:
 On Saturday 01 December 2007, Kiyoshi Ueda wrote:
  This patch converts normal parts of ide to use blk_end_request().
  
  Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
  Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
  ---
   drivers/ide/ide-cd.c |6 +++---
   drivers/ide/ide-io.c |   17 ++---
   2 files changed, 9 insertions(+), 14 deletions(-)
 
 [...]
 
  Index: 2.6.24-rc3-mm2/drivers/ide/ide-io.c
  ===
  --- 2.6.24-rc3-mm2.orig/drivers/ide/ide-io.c
  +++ 2.6.24-rc3-mm2/drivers/ide/ide-io.c
  @@ -78,14 +78,9 @@ static int __ide_end_request(ide_drive_t
  ide_dma_on(drive);
  }
   
  -   if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
  -   add_disk_randomness(rq-rq_disk);
  -   if (dequeue) {
  -   if (!list_empty(rq-queuelist))
  -   blkdev_dequeue_request(rq);
  +   if (!__blk_end_request(rq, uptodate, nr_bytes)) {
  +   if (dequeue)
  HWGROUP(drive)-rq = NULL;
  -   }
  -   end_that_request_last(rq, uptodate);
  ret = 0;
  }
 
 Hmmm, this seems to change the old behavior (the request should
 be dequeued from the queue only if 'dequeue' variable is set)
 and AFAIR some error handling code (in ide-cd?) depends on the
 old behavior so please revisit this patch.

blk_end_request() takes care of the dequeue like below,
so I think no problem.  (Please see PATCH 01)

 + /* rq-queuelist of dequeued request should be list_empty() */
 + if (!list_empty(rq-queuelist))
 + blkdev_dequeue_request(rq);

In the case of ide-cd,
  o 'dequeue' variable is 1 only when the request is still linked
to the queue (i.e. rq-queuelist is not empty)
  o 'dequeue' variable is 0 only when the request has already been
removed from the queue (i.e. rq-queuelist is empty)
So blk_end_request() can handle it correctly.


If there are any drivers which don't want dequeue the queued request,
the code above would not work.
But, as far as I investigated, I have never seen such a requirement
in device drivers.

Do you think that ide may still gets a problem for the 'dequeue'?

Thanks,
Kiyoshi Ueda
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 03/28] blk_end_request: changing block layer core (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts core parts of block layer to use blk_end_request().

'dequeue' argument was originally introduced for end_dequeued_request(),
where no attempt should be made to dequeue the request as it's already
dequeued.
However, it's not necessary as it can be checked with
list_empty(rq-queuelist).
(Dequeued request has empty list and queued request doesn't.)

As a result of this patch, end_queued_request() and
end_dequeued_request() become identical.  Later patch will merge
and rename them and change users of those functions.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/ll_rw_blk.c |   25 ++---
 1 files changed, 10 insertions(+), 15 deletions(-)

Index: 2.6.24-rc3-mm2/block/ll_rw_blk.c
===
--- 2.6.24-rc3-mm2.orig/block/ll_rw_blk.c
+++ 2.6.24-rc3-mm2/block/ll_rw_blk.c
@@ -368,8 +368,8 @@ void blk_ordered_complete_seq(struct req
q-ordseq = 0;
rq = q-orig_bar_rq;
 
-   end_that_request_first(rq, uptodate, rq-hard_nr_sectors);
-   end_that_request_last(rq, uptodate);
+   if (__blk_end_request(rq, uptodate, blk_rq_bytes(rq)))
+   BUG();
 }
 
 static void pre_flush_end_io(struct request *rq, int error)
@@ -486,9 +486,9 @@ int blk_do_ordered(struct request_queue 
 * ORDERED_NONE while this request is on it.
 */
blkdev_dequeue_request(rq);
-   end_that_request_first(rq, -EOPNOTSUPP,
-  rq-hard_nr_sectors);
-   end_that_request_last(rq, -EOPNOTSUPP);
+   if (__blk_end_request(rq, -EOPNOTSUPP,
+ blk_rq_bytes(rq)))
+   BUG();
*rqp = NULL;
return 0;
}
@@ -3691,14 +3691,9 @@ void end_that_request_last(struct reques
 EXPORT_SYMBOL(end_that_request_last);
 
 static inline void __end_request(struct request *rq, int uptodate,
-unsigned int nr_bytes, int dequeue)
+unsigned int nr_bytes)
 {
-   if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
-   if (dequeue)
-   blkdev_dequeue_request(rq);
-   add_disk_randomness(rq-rq_disk);
-   end_that_request_last(rq, uptodate);
-   }
+   __blk_end_request(rq, uptodate, nr_bytes);
 }
 
 /**
@@ -3741,7 +3736,7 @@ EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
  **/
 void end_queued_request(struct request *rq, int uptodate)
 {
-   __end_request(rq, uptodate, blk_rq_bytes(rq), 1);
+   __end_request(rq, uptodate, blk_rq_bytes(rq));
 }
 EXPORT_SYMBOL(end_queued_request);
 
@@ -3758,7 +3753,7 @@ EXPORT_SYMBOL(end_queued_request);
  **/
 void end_dequeued_request(struct request *rq, int uptodate)
 {
-   __end_request(rq, uptodate, blk_rq_bytes(rq), 0);
+   __end_request(rq, uptodate, blk_rq_bytes(rq));
 }
 EXPORT_SYMBOL(end_dequeued_request);
 
@@ -3784,7 +3779,7 @@ EXPORT_SYMBOL(end_dequeued_request);
  **/
 void end_request(struct request *req, int uptodate)
 {
-   __end_request(req, uptodate, req-hard_cur_sectors  9, 1);
+   __end_request(req, uptodate, req-hard_cur_sectors  9);
 }
 EXPORT_SYMBOL(end_request);
 
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 26/28] blk_end_request: changing ide-cd (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts ide-cd (cdrom_newpc_intr()) to use blk_end_request().

ide-cd (cdrom_newpc_intr()) has some tricky behaviors below which
need to use blk_end_request_callback().
Needs to:
  1. call post_transform_command() to modify request contents
  2. wait completing request until DRQ_STAT is cleared
after end_that_request_first() and before end_that_request_last().

As for the second one, ide-cd will wait for the interrupt from device.
So blk_end_request_callback() has to return without completing request
even if no leftover in the request.
ide-cd uses a dummy callback function, which just returns value '1',
to tell blk_end_request_callback() about that.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/ide/ide-cd.c |   78 +++
 1 files changed, 61 insertions(+), 17 deletions(-)

Index: 2.6.24-rc3-mm2/drivers/ide/ide-cd.c
===
--- 2.6.24-rc3-mm2.orig/drivers/ide/ide-cd.c
+++ 2.6.24-rc3-mm2/drivers/ide/ide-cd.c
@@ -1669,6 +1669,37 @@ static void post_transform_command(struc
}
 }
 
+/*
+ * Called from blk_end_request_callback() after the data of the request
+ * is completed and before the request is completed.
+ */
+static int cdrom_newpc_intr_dma_cb(struct request *rq)
+{
+   ide_drive_t *drive = rq-q-queuedata;
+   spinlock_t *ide_lock = rq-q-queue_lock;
+   unsigned long flags = 0UL;
+
+   rq-data_len = 0;
+   post_transform_command(rq);
+
+   spin_lock_irqsave(ide_lock, flags);
+   HWGROUP(drive)-rq = NULL;
+   spin_unlock_irqrestore(ide_lock, flags);
+
+   return 0;
+}
+
+/*
+ * Called from blk_end_request_callback() after the data of the request
+ * is completed and before the request is completed.
+ * By returning value '1', blk_end_request_callback() returns immediately
+ * without completing the request.
+ */
+static int cdrom_newpc_intr_dummy_cb(struct request *rq)
+{
+   return 1;
+}
+
 typedef void (xfer_func_t)(ide_drive_t *, void *, u32);
 
 /*
@@ -1707,9 +1738,16 @@ static ide_startstop_t cdrom_newpc_intr(
return ide_error(drive, dma error, stat);
}
 
-   end_that_request_chunk(rq, 1, rq-data_len);
-   rq-data_len = 0;
-   goto end_request;
+   /*
+* post_transform_command() needs to be called after
+* the data of the request is completed, since it may
+* modify the data area of the request.
+* So use the callback special feature of blk_end_request().
+*/
+   if (blk_end_request_callback(rq, 1, rq-data_len,
+cdrom_newpc_intr_dma_cb))
+   BUG();
+   return ide_stopped;
}
 
/*
@@ -1727,8 +1765,18 @@ static ide_startstop_t cdrom_newpc_intr(
/*
 * If DRQ is clear, the command has completed.
 */
-   if ((stat  DRQ_STAT) == 0)
-   goto end_request;
+   if ((stat  DRQ_STAT) == 0) {
+   if (!rq-data_len)
+   post_transform_command(rq);
+
+   spin_lock_irqsave(ide_lock, flags);
+   if (__blk_end_request(rq, 1, 0))
+   BUG();
+   HWGROUP(drive)-rq = NULL;
+   spin_unlock_irqrestore(ide_lock, flags);
+
+   return ide_stopped;
+   }
 
/*
 * check which way to transfer data
@@ -1781,7 +1829,14 @@ static ide_startstop_t cdrom_newpc_intr(
rq-data_len -= blen;
 
if (rq-bio)
-   end_that_request_chunk(rq, 1, blen);
+   /*
+* The request can't be completed until DRQ is cleared.
+* So complete the data, but don't complete the request
+* using the dummy function for the callback feature
+* of blk_end_request().
+*/
+   blk_end_request_callback(rq, 1, blen,
+cdrom_newpc_intr_dummy_cb);
else
rq-data += blen;
}
@@ -1802,17 +1857,6 @@ static ide_startstop_t cdrom_newpc_intr(
 
ide_set_handler(drive, cdrom_newpc_intr, rq-timeout, NULL);
return ide_started;
-
-end_request:
-   if (!rq-data_len)
-   post_transform_command(rq);
-
-   spin_lock_irqsave(ide_lock, flags);
-   blkdev_dequeue_request(rq);
-   end_that_request_last(rq, 1);
-   HWGROUP(drive)-rq = NULL;
-   spin_unlock_irqrestore(ide_lock, flags);
-   return ide_stopped;
 }
 
 static ide_startstop_t cdrom_write_intr(ide_drive_t *drive)
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL

[PATCH 22/28] blk_end_request: changing cciss (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts cciss to use blk_end_request().

cciss is a little bit different from normal drivers.
cciss directly calls bio_endio() and disk_stat_add()
when completing request.  But those can be replaced with
__end_that_request_first().
After the replacement, request completion procedures of
those drivers become like the following:
o end_that_request_first()
o add_disk_randomness()
o end_that_request_last()
This can be converted to blk_end_request() by following
the rule (a) mentioned in the patch subject
[PATCH 01/28] blk_end_request: add new request completion interface.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/cciss.c |   25 +++--
 1 files changed, 3 insertions(+), 22 deletions(-)

Index: 2.6.24-rc3-mm2/drivers/block/cciss.c
===
--- 2.6.24-rc3-mm2.orig/drivers/block/cciss.c
+++ 2.6.24-rc3-mm2/drivers/block/cciss.c
@@ -1187,17 +1187,6 @@ static int cciss_ioctl(struct inode *ino
}
 }
 
-static inline void complete_buffers(struct bio *bio, int status)
-{
-   while (bio) {
-   struct bio *xbh = bio-bi_next;
-
-   bio-bi_next = NULL;
-   bio_endio(bio, status ? 0 : -EIO);
-   bio = xbh;
-   }
-}
-
 static void cciss_check_queues(ctlr_info_t *h)
 {
int start_queue = h-next_to_run;
@@ -1263,21 +1252,14 @@ static void cciss_softirq_done(struct re
pci_unmap_page(h-pdev, temp64.val, cmd-SG[i].Len, ddir);
}
 
-   complete_buffers(rq-bio, (rq-errors == 0));
-
-   if (blk_fs_request(rq)) {
-   const int rw = rq_data_dir(rq);
-
-   disk_stat_add(rq-rq_disk, sectors[rw], rq-nr_sectors);
-   }
-
 #ifdef CCISS_DEBUG
printk(Done with %p\n, rq);
 #endif /* CCISS_DEBUG */
 
-   add_disk_randomness(rq-rq_disk);
+   if (blk_end_request(rq, (rq-errors == 0), blk_rq_bytes(rq)))
+   BUG();
+
spin_lock_irqsave(h-lock, flags);
-   end_that_request_last(rq, (rq-errors == 0));
cmd_free(h, cmd, 1);
cciss_check_queues(h);
spin_unlock_irqrestore(h-lock, flags);
@@ -2544,7 +2526,6 @@ after_error_processing:
}
cmd-rq-data_len = 0;
cmd-rq-completion_data = cmd;
-   blk_add_trace_rq(cmd-rq-q, cmd-rq, BLK_TA_COMPLETE);
blk_complete_request(cmd-rq);
 }
 
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 20/28] blk_end_request: changing ide-scsi (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts ide-scsi to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/scsi/ide-scsi.c |8 
 1 files changed, 4 insertions(+), 4 deletions(-)

Index: 2.6.24-rc3-mm2/drivers/scsi/ide-scsi.c
===
--- 2.6.24-rc3-mm2.orig/drivers/scsi/ide-scsi.c
+++ 2.6.24-rc3-mm2/drivers/scsi/ide-scsi.c
@@ -918,8 +918,8 @@ static int idescsi_eh_reset (struct scsi
}
 
/* kill current request */
-   blkdev_dequeue_request(req);
-   end_that_request_last(req, 0);
+   if (__blk_end_request(req, 0, 0))
+   BUG();
if (blk_sense_request(req))
kfree(scsi-pc-buffer);
kfree(scsi-pc);
@@ -928,8 +928,8 @@ static int idescsi_eh_reset (struct scsi
 
/* now nuke the drive queue */
while ((req = elv_next_request(drive-queue))) {
-   blkdev_dequeue_request(req);
-   end_that_request_last(req, 0);
+   if (__blk_end_request(req, 0, 0))
+   BUG();
}
 
HWGROUP(drive)-rq = NULL;
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 19/28] blk_end_request: changing scsi (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts scsi mid-layer to use blk_end_request().

The comment above scsi_next_command() is not related to this change.
It had originally been there before scsi_next_command() was included
in scsi_finalize_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/scsi/scsi_lib.c |   10 +++---
 1 files changed, 7 insertions(+), 3 deletions(-)

Index: 2.6.24-rc3-mm2/drivers/scsi/scsi_lib.c
===
--- 2.6.24-rc3-mm2.orig/drivers/scsi/scsi_lib.c
+++ 2.6.24-rc3-mm2/drivers/scsi/scsi_lib.c
@@ -683,7 +683,7 @@ static struct scsi_cmnd *scsi_end_reques
 * If there are blocks left over at the end, set up the command
 * to queue the remainder of them.
 */
-   if (end_that_request_chunk(req, uptodate, bytes)) {
+   if (blk_end_request(req, uptodate, bytes)) {
int leftover = (req-hard_nr_sectors  9);
 
if (blk_pc_request(req))
@@ -691,7 +691,7 @@ static struct scsi_cmnd *scsi_end_reques
 
/* kill remainder if no retrys */
if (!uptodate  blk_noretry_request(req))
-   end_that_request_chunk(req, 0, leftover);
+   blk_end_request(req, 0, leftover);
else {
if (requeue) {
/*
@@ -706,7 +706,11 @@ static struct scsi_cmnd *scsi_end_reques
}
}
 
-   scsi_finalize_request(cmd, uptodate);
+   /*
+* This will goose the queue request function at the end, so we don't
+* need to worry about launching another command.
+*/
+   scsi_next_command(cmd);
return NULL;
 }
 
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 12/28] blk_end_request: changing ub (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts ub to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/ub.c |4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

Index: 2.6.24-rc3-mm2/drivers/block/ub.c
===
--- 2.6.24-rc3-mm2.orig/drivers/block/ub.c
+++ 2.6.24-rc3-mm2/drivers/block/ub.c
@@ -816,8 +816,8 @@ static void ub_end_rq(struct request *rq
uptodate = 0;
rq-errors = scsi_status;
}
-   end_that_request_first(rq, uptodate, rq-hard_nr_sectors);
-   end_that_request_last(rq, uptodate);
+   if (__blk_end_request(rq, uptodate, blk_rq_bytes(rq)))
+   BUG();
 }
 
 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 08/28] blk_end_request: changing nbd (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts nbd to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/nbd.c |4 +---
 1 files changed, 1 insertion(+), 3 deletions(-)

Index: 2.6.24-rc3-mm2/drivers/block/nbd.c
===
--- 2.6.24-rc3-mm2.orig/drivers/block/nbd.c
+++ 2.6.24-rc3-mm2/drivers/block/nbd.c
@@ -108,9 +108,7 @@ static void nbd_end_request(struct reque
req, uptodate? done: failed);
 
spin_lock_irqsave(q-queue_lock, flags);
-   if (!end_that_request_first(req, uptodate, req-nr_sectors)) {
-   end_that_request_last(req, uptodate);
-   }
+   __blk_end_request(req, uptodate, req-nr_sectors  9);
spin_unlock_irqrestore(q-queue_lock, flags);
 }
 
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 05/28] blk_end_request: changing um (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts um to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 arch/um/drivers/ubd_kern.c |   10 +-
 1 files changed, 1 insertion(+), 9 deletions(-)

Index: 2.6.24-rc3-mm2/arch/um/drivers/ubd_kern.c
===
--- 2.6.24-rc3-mm2.orig/arch/um/drivers/ubd_kern.c
+++ 2.6.24-rc3-mm2/arch/um/drivers/ubd_kern.c
@@ -481,15 +481,7 @@ int thread_fd = -1;
 
 static void ubd_end_request(struct request *req, int bytes, int uptodate)
 {
-   if (!end_that_request_first(req, uptodate, bytes  9)) {
-   struct ubd *dev = req-rq_disk-private_data;
-   unsigned long flags;
-
-   add_disk_randomness(req-rq_disk);
-   spin_lock_irqsave(dev-lock, flags);
-   end_that_request_last(req, uptodate);
-   spin_unlock_irqrestore(dev-lock, flags);
-   }
+   blk_end_request(req, uptodate, bytes);
 }
 
 /* Callable only from interrupt context - otherwise you need to do
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 23/28] blk_end_request: changing cpqarray (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts cpqarray to use blk_end_request().

cpqarray is a little bit different from normal drivers.
cpqarray directly calls bio_endio() and disk_stat_add()
when completing request.  But those can be replaced with
__end_that_request_first().
After the replacement, request completion procedures of
those drivers become like the following:
o end_that_request_first()
o add_disk_randomness()
o end_that_request_last()
This can be converted to blk_end_request() by following
the rule (b) mentioned in the patch subject
[PATCH 01/28] blk_end_request: add new request completion interface.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/cpqarray.c |   27 ++-
 1 files changed, 2 insertions(+), 25 deletions(-)

Index: 2.6.24-rc3-mm2/drivers/block/cpqarray.c
===
--- 2.6.24-rc3-mm2.orig/drivers/block/cpqarray.c
+++ 2.6.24-rc3-mm2/drivers/block/cpqarray.c
@@ -167,7 +167,6 @@ static void start_io(ctlr_info_t *h);
 
 static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
 static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
-static inline void complete_buffers(struct bio *bio, int ok);
 static inline void complete_command(cmdlist_t *cmd, int timeout);
 
 static irqreturn_t do_ida_intr(int irq, void *dev_id);
@@ -980,19 +979,6 @@ static void start_io(ctlr_info_t *h)
}
 }
 
-static inline void complete_buffers(struct bio *bio, int ok)
-{
-   struct bio *xbh;
-
-   while (bio) {
-   xbh = bio-bi_next;
-   bio-bi_next = NULL;
-   
-   bio_endio(bio, ok ? 0 : -EIO);
-
-   bio = xbh;
-   }
-}
 /*
  * Mark all buffers that cmd was responsible for
  */
@@ -1030,18 +1016,9 @@ static inline void complete_command(cmdl
 pci_unmap_page(hba[cmd-ctlr]-pci_dev, cmd-req.sg[i].addr,
cmd-req.sg[i].size, ddir);
 
-   complete_buffers(rq-bio, ok);
-
-   if (blk_fs_request(rq)) {
-   const int rw = rq_data_dir(rq);
-
-   disk_stat_add(rq-rq_disk, sectors[rw], rq-nr_sectors);
-   }
-
-   add_disk_randomness(rq-rq_disk);
-
DBGPX(printk(Done with %p\n, rq););
-   end_that_request_last(rq, ok ? 1 : -EIO);
+   if (__blk_end_request(rq, ok, blk_rq_bytes(rq)))
+   BUG();
 }
 
 /*
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 16/28] blk_end_request: changing i2o_block (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts i2o_block to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/message/i2o/i2o_block.c |8 ++--
 1 files changed, 2 insertions(+), 6 deletions(-)

Index: 2.6.24-rc3-mm2/drivers/message/i2o/i2o_block.c
===
--- 2.6.24-rc3-mm2.orig/drivers/message/i2o/i2o_block.c
+++ 2.6.24-rc3-mm2/drivers/message/i2o/i2o_block.c
@@ -426,22 +426,18 @@ static void i2o_block_end_request(struct
struct request_queue *q = req-q;
unsigned long flags;
 
-   if (end_that_request_chunk(req, uptodate, nr_bytes)) {
+   if (blk_end_request(req, uptodate, nr_bytes)) {
int leftover = (req-hard_nr_sectors  KERNEL_SECTOR_SHIFT);
 
if (blk_pc_request(req))
leftover = req-data_len;
 
if (end_io_error(uptodate))
-   end_that_request_chunk(req, 0, leftover);
+   blk_end_request(req, 0, leftover);
}
 
-   add_disk_randomness(req-rq_disk);
-
spin_lock_irqsave(q-queue_lock, flags);
 
-   end_that_request_last(req, uptodate);
-
if (likely(dev)) {
dev-open_queue_depth--;
list_del(ireq-queue);
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 09/28] blk_end_request: changing ps3disk (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts ps3disk to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/ps3disk.c |6 +-
 1 files changed, 1 insertion(+), 5 deletions(-)

Index: 2.6.24-rc3-mm2/drivers/block/ps3disk.c
===
--- 2.6.24-rc3-mm2.orig/drivers/block/ps3disk.c
+++ 2.6.24-rc3-mm2/drivers/block/ps3disk.c
@@ -280,11 +280,7 @@ static irqreturn_t ps3disk_interrupt(int
}
 
spin_lock(priv-lock);
-   if (!end_that_request_first(req, uptodate, num_sectors)) {
-   add_disk_randomness(req-rq_disk);
-   blkdev_dequeue_request(req);
-   end_that_request_last(req, uptodate);
-   }
+   __blk_end_request(req, uptodate, num_sectors  9);
priv-req = NULL;
ps3disk_do_request(dev, priv-queue);
spin_unlock(priv-lock);
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 00/28] blk_end_request: full I/O completion handler (take 3)

2007-11-30 Thread Kiyoshi Ueda
=linux-scsim=115520444515914w=2
  - http://marc.theaimsgroup.com/?l=linux-kernelm=116656637425880w=2
However, Jens pointed out that redesigning rq-end_io() as a full
completion handler would be better:

On Thu, 21 Dec 2006 08:49:47 +0100, Jens Axboe [EMAIL PROTECTED] wrote:
 Ok, I see what you are getting at. The current -end_io() is called when
 the request has fully completed, you want notification for each chunk
 potentially completed.
 
 I think a better design here would be to use -end_io() as the full
 completion handler, similar to how bio-bi_end_io() works. A request
 originating from __make_request() would set something ala:
.
 instead of calling the functions manually. That would allow you to get
 notification right at the beginning and do what you need, without adding
 a special hook for this.

I thought his comment was reasonable.
So I modified the patches based on his suggestion.


WHAT IS CHANGED
===
The change is basically illustlated by the following pseudo code:

[Before]
  if (end_that_request_{first/chunk} succeeds) { -- completes bios
 do something driver specific
 end_that_request_last() -- calls end_io()
 the request is free from the driver
  } else {
 the request was incomplete, retry for leftover or ignoring
  }

[After]
  if (blk_end_request() succeeds) { -- calls end_io(), completes bios
 the request is free from the driver
  } else {
 the request was incomplete, retry for leftover or ignoring
  }


In detail, request completion procedures are changed like below.

[Before]
  o 2 steps completion using end_that_request_{first/chunk}
and end_that_request_last().
  o Device drivers have ownership of a request until they
call end_that_request_last().
  o rq-end_io() is called at the last stage of
end_that_request_last() for some block layer codes need
specific request handling when completing it.

[After]
  o 1 step completion using blk_end_request().
(end_that_request_* are no longer used from device drivers.)
  o Device drivers give over ownership of a request
when calling blk_end_request().
If it returns 0, the request is completed.
If it returns 1, the request isn't completed and
the ownership is returned to the device driver again.
  o rq-end_io() is called at the top of blk_end_request() to
allow to hook all parts of request completion.
Existing users of rq-end_io() must be changed to do
all parts of request completion.

Thanks,
Kiyoshi Ueda
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 28/28] blk_end_request: remove/unexport end_that_request_* (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch removes the following functions:
  o end_that_request_first()
  o end_that_request_chunk()
and stops exporting the functions below:
  o end_that_request_last()

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/ll_rw_blk.c  |   61 -
 include/linux/blkdev.h |   15 
 2 files changed, 21 insertions(+), 55 deletions(-)

Index: 2.6.24-rc3-mm2/block/ll_rw_blk.c
===
--- 2.6.24-rc3-mm2.orig/block/ll_rw_blk.c
+++ 2.6.24-rc3-mm2/block/ll_rw_blk.c
@@ -3415,6 +3415,20 @@ static void blk_recalc_rq_sectors(struct
}
 }
 
+/**
+ * __end_that_request_first - end I/O on a request
+ * @req:  the request being processed
+ * @uptodate: 1 for success, 0 for I/O error,  0 for specific error
+ * @nr_bytes: number of bytes to complete
+ *
+ * Description:
+ * Ends I/O on a number of bytes attached to @req, and sets it up
+ * for the next range of segments (if any) in the cluster.
+ *
+ * Return:
+ * 0 - we are done with this request, call end_that_request_last()
+ * 1 - still buffers pending for this request
+ **/
 static int __end_that_request_first(struct request *req, int uptodate,
int nr_bytes)
 {
@@ -3531,49 +3545,6 @@ static int __end_that_request_first(stru
return 1;
 }
 
-/**
- * end_that_request_first - end I/O on a request
- * @req:  the request being processed
- * @uptodate: 1 for success, 0 for I/O error,  0 for specific error
- * @nr_sectors: number of sectors to end I/O on
- *
- * Description:
- * Ends I/O on a number of sectors attached to @req, and sets it up
- * for the next range of segments (if any) in the cluster.
- *
- * Return:
- * 0 - we are done with this request, call end_that_request_last()
- * 1 - still buffers pending for this request
- **/
-int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
-{
-   return __end_that_request_first(req, uptodate, nr_sectors  9);
-}
-
-EXPORT_SYMBOL(end_that_request_first);
-
-/**
- * end_that_request_chunk - end I/O on a request
- * @req:  the request being processed
- * @uptodate: 1 for success, 0 for I/O error,  0 for specific error
- * @nr_bytes: number of bytes to complete
- *
- * Description:
- * Ends I/O on a number of bytes attached to @req, and sets it up
- * for the next range of segments (if any). Like end_that_request_first(),
- * but deals with bytes instead of sectors.
- *
- * Return:
- * 0 - we are done with this request, call end_that_request_last()
- * 1 - still buffers pending for this request
- **/
-int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes)
-{
-   return __end_that_request_first(req, uptodate, nr_bytes);
-}
-
-EXPORT_SYMBOL(end_that_request_chunk);
-
 /*
  * splice the completion data to a local structure and hand off to
  * process_completion_queue() to complete the requests
@@ -3653,7 +3624,7 @@ EXPORT_SYMBOL(blk_complete_request);
 /*
  * queue lock must be held
  */
-void end_that_request_last(struct request *req, int uptodate)
+static void end_that_request_last(struct request *req, int uptodate)
 {
struct gendisk *disk = req-rq_disk;
int error;
@@ -3688,8 +3659,6 @@ void end_that_request_last(struct reques
__blk_put_request(req-q, req);
 }
 
-EXPORT_SYMBOL(end_that_request_last);
-
 static inline void __end_request(struct request *rq, int uptodate,
 unsigned int nr_bytes)
 {
Index: 2.6.24-rc3-mm2/include/linux/blkdev.h
===
--- 2.6.24-rc3-mm2.orig/include/linux/blkdev.h
+++ 2.6.24-rc3-mm2/include/linux/blkdev.h
@@ -717,19 +717,16 @@ static inline void blk_run_address_space
 }
 
 /*
- * end_request() and friends. Must be called with the request queue spinlock
- * acquired. All functions called within end_request() _must_be_ atomic.
+ * blk_end_request() and friends.
+ * __blk_end_request() and end_request() must be called with
+ * the request queue spinlock acquired.
  *
  * Several drivers define their own end_request and call
- * end_that_request_first() and end_that_request_last()
- * for parts of the original function. This prevents
- * code duplication in drivers.
+ * blk_end_request() for parts of the original function.
+ * This prevents code duplication in drivers.
  */
 extern int blk_end_request(struct request *rq, int uptodate, int nr_bytes);
 extern int __blk_end_request(struct request *rq, int uptodate, int nr_bytes);
-extern int end_that_request_first(struct request *, int, int);
-extern int end_that_request_chunk(struct request *, int, int);
-extern void end_that_request_last(struct request *, int);
 extern void end_request(struct request *, int);
 extern void end_queued_request(struct request *, int);
 extern void end_dequeued_request

[PATCH 21/28] blk_end_request: changing xsysace (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts xsysace to use blk_end_request().

xsysace is a little bit different from normal drivers.
xsysace driver has a state machine in it.
It calls end_that_request_first() and end_that_request_last()
from different states. (ACE_FSM_STATE_REQ_TRANSFER and
ACE_FSM_STATE_REQ_COMPLETE, respectively.)

However, those states are consecutive and without any interruption
inbetween.
So we can just follow the standard conversion rule (b) mentioned in
the patch subject [PATCH 01/27] blk_end_request: add new request
completion interface.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/xsysace.c |5 +
 1 files changed, 1 insertion(+), 4 deletions(-)

Index: 2.6.24-rc3-mm2/drivers/block/xsysace.c
===
--- 2.6.24-rc3-mm2.orig/drivers/block/xsysace.c
+++ 2.6.24-rc3-mm2/drivers/block/xsysace.c
@@ -703,7 +703,7 @@ static void ace_fsm_dostate(struct ace_d
 
/* bio finished; is there another one? */
i = ace-req-current_nr_sectors;
-   if (end_that_request_first(ace-req, 1, i)) {
+   if (__blk_end_request(ace-req, 1, i)) {
/* dev_dbg(ace-dev, next block; h=%li c=%i\n,
 *  ace-req-hard_nr_sectors,
 *  ace-req-current_nr_sectors);
@@ -718,9 +718,6 @@ static void ace_fsm_dostate(struct ace_d
break;
 
case ACE_FSM_STATE_REQ_COMPLETE:
-   /* Complete the block request */
-   blkdev_dequeue_request(ace-req);
-   end_that_request_last(ace-req, 1);
ace-req = NULL;
 
/* Finished request; go to idle state */
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 15/28] blk_end_request: changing viocd (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts viocd to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/cdrom/viocd.c |5 +
 1 files changed, 1 insertion(+), 4 deletions(-)

Index: 2.6.24-rc3-mm2/drivers/cdrom/viocd.c
===
--- 2.6.24-rc3-mm2.orig/drivers/cdrom/viocd.c
+++ 2.6.24-rc3-mm2/drivers/cdrom/viocd.c
@@ -302,11 +302,8 @@ static void viocd_end_request(struct req
if (!nsectors)
nsectors = 1;
 
-   if (end_that_request_first(req, uptodate, nsectors))
+   if (__blk_end_request(req, uptodate, nsectors  9))
BUG();
-   add_disk_randomness(req-rq_disk);
-   blkdev_dequeue_request(req);
-   end_that_request_last(req, uptodate);
 }
 
 static int rwreq;
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 11/28] blk_end_request: changing sx8 (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts sx8 to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/sx8.c |4 +---
 1 files changed, 1 insertion(+), 3 deletions(-)

Index: 2.6.24-rc3-mm2/drivers/block/sx8.c
===
--- 2.6.24-rc3-mm2.orig/drivers/block/sx8.c
+++ 2.6.24-rc3-mm2/drivers/block/sx8.c
@@ -749,11 +749,9 @@ static inline void carm_end_request_queu
struct request *req = crq-rq;
int rc;
 
-   rc = end_that_request_first(req, uptodate, req-hard_nr_sectors);
+   rc = __blk_end_request(req, uptodate, blk_rq_bytes(req));
assert(rc == 0);
 
-   end_that_request_last(req, uptodate);
-
rc = carm_put_request(host, crq);
assert(rc == 0);
 }
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 18/28] blk_end_request: changing s390 (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts s390 to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/s390/block/dasd.c  |4 +---
 drivers/s390/char/tape_block.c |3 +--
 2 files changed, 2 insertions(+), 5 deletions(-)

Index: 2.6.24-rc3-mm2/drivers/s390/block/dasd.c
===
--- 2.6.24-rc3-mm2.orig/drivers/s390/block/dasd.c
+++ 2.6.24-rc3-mm2/drivers/s390/block/dasd.c
@@ -1080,10 +1080,8 @@ dasd_int_handler(struct ccw_device *cdev
 static inline void
 dasd_end_request(struct request *req, int uptodate)
 {
-   if (end_that_request_first(req, uptodate, req-hard_nr_sectors))
+   if (__blk_end_request(req, uptodate, blk_rq_bytes(req)))
BUG();
-   add_disk_randomness(req-rq_disk);
-   end_that_request_last(req, uptodate);
 }
 
 /*
Index: 2.6.24-rc3-mm2/drivers/s390/char/tape_block.c
===
--- 2.6.24-rc3-mm2.orig/drivers/s390/char/tape_block.c
+++ 2.6.24-rc3-mm2/drivers/s390/char/tape_block.c
@@ -76,9 +76,8 @@ tapeblock_trigger_requeue(struct tape_de
 static void
 tapeblock_end_request(struct request *req, int uptodate)
 {
-   if (end_that_request_first(req, uptodate, req-hard_nr_sectors))
+   if (__blk_end_request(req, uptodate, blk_rq_bytes(req)))
BUG();
-   end_that_request_last(req, uptodate);
 }
 
 static void
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 17/28] blk_end_request: changing mmc (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts mmc to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/mmc/card/block.c |   24 +---
 drivers/mmc/card/queue.c |4 ++--
 2 files changed, 7 insertions(+), 21 deletions(-)

Index: 2.6.24-rc3-mm2/drivers/mmc/card/block.c
===
--- 2.6.24-rc3-mm2.orig/drivers/mmc/card/block.c
+++ 2.6.24-rc3-mm2/drivers/mmc/card/block.c
@@ -348,15 +348,7 @@ static int mmc_blk_issue_rq(struct mmc_q
 * A block was successfully transferred.
 */
spin_lock_irq(md-lock);
-   ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
-   if (!ret) {
-   /*
-* The whole request completed successfully.
-*/
-   add_disk_randomness(req-rq_disk);
-   blkdev_dequeue_request(req);
-   end_that_request_last(req, 1);
-   }
+   ret = __blk_end_request(req, 1, brq.data.bytes_xfered);
spin_unlock_irq(md-lock);
} while (ret);
 
@@ -386,27 +378,21 @@ static int mmc_blk_issue_rq(struct mmc_q
else
bytes = blocks  9;
spin_lock_irq(md-lock);
-   ret = end_that_request_chunk(req, 1, bytes);
+   ret = __blk_end_request(req, 1, bytes);
spin_unlock_irq(md-lock);
}
} else if (rq_data_dir(req) != READ 
   (card-host-caps  MMC_CAP_MULTIWRITE)) {
spin_lock_irq(md-lock);
-   ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
+   ret = __blk_end_request(req, 1, brq.data.bytes_xfered);
spin_unlock_irq(md-lock);
}
 
mmc_release_host(card-host);
 
spin_lock_irq(md-lock);
-   while (ret) {
-   ret = end_that_request_chunk(req, 0,
-   req-current_nr_sectors  9);
-   }
-
-   add_disk_randomness(req-rq_disk);
-   blkdev_dequeue_request(req);
-   end_that_request_last(req, 0);
+   while (ret)
+   ret = __blk_end_request(req, 0, blk_rq_cur_bytes(req));
spin_unlock_irq(md-lock);
 
return 0;
Index: 2.6.24-rc3-mm2/drivers/mmc/card/queue.c
===
--- 2.6.24-rc3-mm2.orig/drivers/mmc/card/queue.c
+++ 2.6.24-rc3-mm2/drivers/mmc/card/queue.c
@@ -94,8 +94,8 @@ static void mmc_request(struct request_q
printk(KERN_ERR MMC: killing requests for dead queue\n);
while ((req = elv_next_request(q)) != NULL) {
do {
-   ret = end_that_request_chunk(req, 0,
-   req-current_nr_sectors  9);
+   ret = __blk_end_request(req, 0,
+   blk_rq_cur_bytes(req));
} while (ret);
}
return;
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 02/28] blk_end_request: add/export functions to get request size (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch adds/exports functions to get the size of request in bytes.
They are useful because blk_end_request() takes bytes
as a completed I/O size instead of sectors.

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/ll_rw_blk.c  |   25 ++---
 include/linux/blkdev.h |8 
 2 files changed, 30 insertions(+), 3 deletions(-)

Index: 2.6.24-rc3-mm2/include/linux/blkdev.h
===
--- 2.6.24-rc3-mm2.orig/include/linux/blkdev.h
+++ 2.6.24-rc3-mm2/include/linux/blkdev.h
@@ -736,6 +736,14 @@ extern void end_dequeued_request(struct 
 extern void blk_complete_request(struct request *);
 
 /*
+ * blk_end_request() takes bytes instead of sectors as a complete size.
+ * blk_rq_bytes() returns bytes left to complete in the entire request.
+ * blk_rq_cur_bytes() returns bytes left to complete in the current segment.
+ */
+extern unsigned int blk_rq_bytes(struct request *rq);
+extern unsigned int blk_rq_cur_bytes(struct request *rq);
+
+/*
  * end_that_request_first/chunk() takes an uptodate argument. we account
  * any value = as an io error. 0 means -EIO for compatability reasons,
  * any other  0 value is the direct error type. An uptodate value of
Index: 2.6.24-rc3-mm2/block/ll_rw_blk.c
===
--- 2.6.24-rc3-mm2.orig/block/ll_rw_blk.c
+++ 2.6.24-rc3-mm2/block/ll_rw_blk.c
@@ -3701,13 +3701,32 @@ static inline void __end_request(struct 
}
 }
 
-static unsigned int rq_byte_size(struct request *rq)
+/**
+ * blk_rq_bytes - Returns bytes left to complete in the entire request
+ **/
+unsigned int blk_rq_bytes(struct request *rq)
 {
if (blk_fs_request(rq))
return rq-hard_nr_sectors  9;
 
return rq-data_len;
 }
+EXPORT_SYMBOL_GPL(blk_rq_bytes);
+
+/**
+ * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
+ **/
+unsigned int blk_rq_cur_bytes(struct request *rq)
+{
+   if (blk_fs_request(rq))
+   return rq-current_nr_sectors  9;
+
+   if (rq-bio)
+   return rq-bio-bi_size;
+
+   return rq-data_len;
+}
+EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
 
 /**
  * end_queued_request - end all I/O on a queued request
@@ -3722,7 +3741,7 @@ static unsigned int rq_byte_size(struct 
  **/
 void end_queued_request(struct request *rq, int uptodate)
 {
-   __end_request(rq, uptodate, rq_byte_size(rq), 1);
+   __end_request(rq, uptodate, blk_rq_bytes(rq), 1);
 }
 EXPORT_SYMBOL(end_queued_request);
 
@@ -3739,7 +3758,7 @@ EXPORT_SYMBOL(end_queued_request);
  **/
 void end_dequeued_request(struct request *rq, int uptodate)
 {
-   __end_request(rq, uptodate, rq_byte_size(rq), 0);
+   __end_request(rq, uptodate, blk_rq_bytes(rq), 0);
 }
 EXPORT_SYMBOL(end_dequeued_request);
 
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 04/28] blk_end_request: changing arm (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts arm to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---

 arch/arm/plat-omap/mailbox.c |9 ++---
 1 files changed, 6 insertions(+), 3 deletions(-)

Index: 2.6.24-rc3-mm2/arch/arm/plat-omap/mailbox.c
===
--- 2.6.24-rc3-mm2.orig/arch/arm/plat-omap/mailbox.c
+++ 2.6.24-rc3-mm2/arch/arm/plat-omap/mailbox.c
@@ -117,7 +117,8 @@ static void mbox_tx_work(struct work_str
 
spin_lock(q-queue_lock);
blkdev_dequeue_request(rq);
-   end_that_request_last(rq, 0);
+   if (__blk_end_request(rq, 0, 0))
+   BUG();
spin_unlock(q-queue_lock);
}
 }
@@ -151,7 +152,8 @@ static void mbox_rx_work(struct work_str
 
spin_lock_irqsave(q-queue_lock, flags);
blkdev_dequeue_request(rq);
-   end_that_request_last(rq, 0);
+   if (__blk_end_request(rq, 0, 0))
+   BUG();
spin_unlock_irqrestore(q-queue_lock, flags);
 
mbox-rxq-callback((void *)msg);
@@ -265,7 +267,8 @@ omap_mbox_read(struct device *dev, struc
 
spin_lock_irqsave(q-queue_lock, flags);
blkdev_dequeue_request(rq);
-   end_that_request_last(rq, 0);
+   if (__blk_end_request(rq, 0, 0))
+   BUG();
spin_unlock_irqrestore(q-queue_lock, flags);
 
if (unlikely(mbox_seq_test(mbox, *p))) {
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 07/28] blk_end_request: changing floppy (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts floppy to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/floppy.c |8 +++-
 1 files changed, 3 insertions(+), 5 deletions(-)

Index: 2.6.24-rc3-mm2/drivers/block/floppy.c
===
--- 2.6.24-rc3-mm2.orig/drivers/block/floppy.c
+++ 2.6.24-rc3-mm2/drivers/block/floppy.c
@@ -2290,18 +2290,16 @@ static int do_format(int drive, struct f
 static void floppy_end_request(struct request *req, int uptodate)
 {
unsigned int nr_sectors = current_count_sectors;
+   unsigned int drive = (unsigned int)req-rq_disk-private_data;
 
/* current_count_sectors can be zero if transfer failed */
if (!uptodate)
nr_sectors = req-current_nr_sectors;
-   if (end_that_request_first(req, uptodate, nr_sectors))
+   if (__blk_end_request(req, uptodate, nr_sectors  9))
return;
-   add_disk_randomness(req-rq_disk);
-   floppy_off((long)req-rq_disk-private_data);
-   blkdev_dequeue_request(req);
-   end_that_request_last(req, uptodate);
 
/* We're done with the request */
+   floppy_off(drive);
current_req = NULL;
 }
 
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 14/28] blk_end_request: changing xen-blkfront (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts xen-blkfront to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/xen-blkfront.c |5 ++---
 1 files changed, 2 insertions(+), 3 deletions(-)

Index: 2.6.24-rc3-mm2/drivers/block/xen-blkfront.c
===
--- 2.6.24-rc3-mm2.orig/drivers/block/xen-blkfront.c
+++ 2.6.24-rc3-mm2/drivers/block/xen-blkfront.c
@@ -494,10 +494,9 @@ static irqreturn_t blkif_interrupt(int i
dev_dbg(info-xbdev-dev, Bad return from 
blkdev data 
request: %x\n, bret-status);
 
-   ret = end_that_request_first(req, uptodate,
-   req-hard_nr_sectors);
+   ret = __blk_end_request(req, uptodate,
+   blk_rq_bytes(req));
BUG_ON(ret);
-   end_that_request_last(req, uptodate);
break;
default:
BUG();
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 24/28] blk_end_request: changing ide normal caller (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts normal parts of ide to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/ide/ide-cd.c |6 +++---
 drivers/ide/ide-io.c |   17 ++---
 2 files changed, 9 insertions(+), 14 deletions(-)

Index: 2.6.24-rc3-mm2/drivers/ide/ide-cd.c
===
--- 2.6.24-rc3-mm2.orig/drivers/ide/ide-cd.c
+++ 2.6.24-rc3-mm2/drivers/ide/ide-cd.c
@@ -655,9 +655,9 @@ static void cdrom_end_request (ide_drive
BUG();
} else {
spin_lock_irqsave(ide_lock, flags);
-   end_that_request_chunk(failed, 0,
-   failed-data_len);
-   end_that_request_last(failed, 0);
+   if (__blk_end_request(failed, 0,
+ failed-data_len))
+   BUG();
spin_unlock_irqrestore(ide_lock, flags);
}
} else
Index: 2.6.24-rc3-mm2/drivers/ide/ide-io.c
===
--- 2.6.24-rc3-mm2.orig/drivers/ide/ide-io.c
+++ 2.6.24-rc3-mm2/drivers/ide/ide-io.c
@@ -78,14 +78,9 @@ static int __ide_end_request(ide_drive_t
ide_dma_on(drive);
}
 
-   if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
-   add_disk_randomness(rq-rq_disk);
-   if (dequeue) {
-   if (!list_empty(rq-queuelist))
-   blkdev_dequeue_request(rq);
+   if (!__blk_end_request(rq, uptodate, nr_bytes)) {
+   if (dequeue)
HWGROUP(drive)-rq = NULL;
-   }
-   end_that_request_last(rq, uptodate);
ret = 0;
}
 
@@ -290,9 +285,9 @@ static void ide_complete_pm_request (ide
drive-blocked = 0;
blk_start_queue(drive-queue);
}
-   blkdev_dequeue_request(rq);
HWGROUP(drive)-rq = NULL;
-   end_that_request_last(rq, 1);
+   if (__blk_end_request(rq, 1, 0))
+   BUG();
spin_unlock_irqrestore(ide_lock, flags);
 }
 
@@ -402,10 +397,10 @@ void ide_end_drive_cmd (ide_drive_t *dri
}
 
spin_lock_irqsave(ide_lock, flags);
-   blkdev_dequeue_request(rq);
HWGROUP(drive)-rq = NULL;
rq-errors = err;
-   end_that_request_last(rq, !rq-errors);
+   if (__blk_end_request(rq, !rq-errors, 0))
+   BUG();
spin_unlock_irqrestore(ide_lock, flags);
 }
 
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 27/28] blk_end_request: changing scsi mid-layer for bidi (take 3)

2007-11-30 Thread Kiyoshi Ueda
This patch converts bidi of scsi mid-layer to use blk_end_request().

rq-next_rq represents a pair of bidi requests.
(There are no other use of 'next_rq' of struct request.)
For both requests in the pair, end_that_request_chunk() should be
called before end_that_request_last() is called for one of them.
Since the calls to end_that_request_first()/chunk() and
end_that_request_last() are packaged into blk_end_request(),
the handling of next_rq completion has to be moved into
blk_end_request(), too.

Bidi sets its specific value to rq-data_len before the request is
completed so that upper-layer can read it.
This setting must be between end_that_request_chunk() and
end_that_request_last(), because rq-data_len may be used
in end_that_request_chunk() by blk_trace and so on.
To satisfy the requirement, use blk_end_request_callback() which
is added in PATCH 25 only for the tricky drivers.

If bidi didn't reuse rq-data_len and added new members to request
for the specific value, it could set before end_that_request_chunk()
and use the standard blk_end_request() like below.

void scsi_end_bidi_request(struct scsi_cmnd *cmd)
{
struct request *req = cmd-request;

rq-resid = scsi_out(cmd)-resid;
rq-next_rq-resid = scsi_in(cmd)-resid;

if (blk_end_request(req, 1, req-data_len))
BUG();

scsi_release_buffers(cmd);
scsi_next_command(cmd);
}

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/ll_rw_blk.c   |   18 +
 drivers/scsi/scsi_lib.c |   66 
 2 files changed, 52 insertions(+), 32 deletions(-)

Index: 2.6.24-rc3-mm2/drivers/scsi/scsi_lib.c
===
--- 2.6.24-rc3-mm2.orig/drivers/scsi/scsi_lib.c
+++ 2.6.24-rc3-mm2/drivers/scsi/scsi_lib.c
@@ -629,28 +629,6 @@ void scsi_run_host_queues(struct Scsi_Ho
scsi_run_queue(sdev-request_queue);
 }
 
-static void scsi_finalize_request(struct scsi_cmnd *cmd, int uptodate)
-{
-   struct request_queue *q = cmd-device-request_queue;
-   struct request *req = cmd-request;
-   unsigned long flags;
-
-   add_disk_randomness(req-rq_disk);
-
-   spin_lock_irqsave(q-queue_lock, flags);
-   if (blk_rq_tagged(req))
-   blk_queue_end_tag(q, req);
-
-   end_that_request_last(req, uptodate);
-   spin_unlock_irqrestore(q-queue_lock, flags);
-
-   /*
-* This will goose the queue request function at the end, so we don't
-* need to worry about launching another command.
-*/
-   scsi_next_command(cmd);
-}
-
 /*
  * Function:scsi_end_request()
  *
@@ -921,6 +899,20 @@ void scsi_release_buffers(struct scsi_cm
 EXPORT_SYMBOL(scsi_release_buffers);
 
 /*
+ * Called from blk_end_request_callback() after all DATA in rq and its next_rq
+ * are completed before rq is completed/freed.
+ */
+static int scsi_end_bidi_request_cb(struct request *rq)
+{
+   struct scsi_cmnd *cmd = rq-special;
+
+   rq-data_len = scsi_out(cmd)-resid;
+   rq-next_rq-data_len = scsi_in(cmd)-resid;
+
+   return 0;
+}
+
+/*
  * Bidi commands Must be complete as a whole, both sides at once.
  * If part of the bytes were written and lld returned
  * scsi_in()-resid and/or scsi_out()-resid this information will be left
@@ -931,22 +923,32 @@ void scsi_end_bidi_request(struct scsi_c
 {
struct request *req = cmd-request;
 
-   end_that_request_chunk(req, 1, req-data_len);
-   req-data_len = scsi_out(cmd)-resid;
-
-   end_that_request_chunk(req-next_rq, 1, req-next_rq-data_len);
-   req-next_rq-data_len = scsi_in(cmd)-resid;
-
-   scsi_release_buffers(cmd);
-
/*
 *FIXME: If ll_rw_blk.c is changed to also put_request(req-next_rq)
-*   in end_that_request_last() then this WARN_ON must be removed.
+*   in blk_end_request() then this WARN_ON must be removed.
 *   for now, upper-driver must have registered an end_io.
 */
WARN_ON(!req-end_io);
 
-   scsi_finalize_request(cmd, 1);
+   /*
+* blk_end_request() family take care of data completion of next_rq.
+*
+* req-data_len and req-next_rq-data_len must be set after
+* all data are completed, since they may be referenced during
+* the data completion process.
+* So use the callback feature of blk_end_request() here.
+*
+* NOTE: If bidi doesn't reuse the data_len field for upper-layer's
+*   reference (e.g. adds new members for it to struct request),
+*   we can use the standard blk_end_request() interface here.
+*/
+   if (blk_end_request_callback(req, 1, req-data_len,
+scsi_end_bidi_request_cb))
+   /* req has not been completed */
+   BUG();
+
+   scsi_release_buffers(cmd

Re: [PATCH][RFC] Use bio markers for request callback

2007-09-11 Thread Kiyoshi Ueda
Hi Hannes,

On Tue, 11 Sep 2007 12:44:46 +0200, Hannes Reinecke [EMAIL PROTECTED] wrote:
 this is a proposal for a different implementation of request  
 callbacks. The existing -endio callback of a request is actually a  
 destructor function, to be called to terminate a request and free all  
 structures.
 
 However, on certain occasions (like request-based multipathing) it is  
 desirable to have a callback function for a request which is called  
 right after the request is finished, ie in end_that_request_first()  
 before any bio-bi_endio callback is called.
 
 So a simple solution for this is to clone the request and add a new  
 'marker' bio in front of the bio list of the request. This callback  
 will be attached a structure in bi_private which keeps a pointer to  
 the cloned and the original request, thus serving as a callback for  
 the request itself.

Thank you for another idea for request-based multipath.

However, I disagree with the idea.
I think the design (bio-bi_end_io() completes a request) is complex
a little bit and is breaking layer stacking.
Also, I think that the design of completion handling by 2 hooks,
bio-bi_end_io() and rq-end_io(), makes error handing in dm-multipath
complex.
e.g. Even if we detect an error on the first bio-bi_end_io() hook,
 the request can't be retried until the second rq-end_io() hook
 is called, because the ownership of the request is still on
 low level drivers until the low level driver calls
 end_that_request_last().


Although this is just implementation issue, I think that the patch
can't handle leftover.
SCSI, scsi_end_request(), may have some leftovers in the request.
Then, the marker bio is freed in bio-bi_end_io() and the request
is resubmitted in SCSI level.  So the resubmitted request
doesn't have the marker bio any more.

Thanks,
Kiyoshi Ueda
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 00/27] blk_end_request: full I/O completion handler (take 2)

2007-09-11 Thread Kiyoshi Ueda
 or ignoring
  }

[After]
  if (blk_end_request() succeeds) { -- calls end_io(), completes bios
 the request is free from the driver
  } else {
 the request was incomplete, retry for leftover or ignoring
  }


In detail, request completion procedures are changed like below.

[Before]
  o 2 steps completion using end_that_request_{first/chunk}
and end_that_request_last().
  o Device drivers have ownership of a request until they
call end_that_request_last().
  o rq-end_io() is called at the last stage of
end_that_request_last() for some block layer codes need
specific request handling when completing it.

[After]
  o 1 step completion using blk_end_request().
(end_that_request_* are no longer used from device drivers.)
  o Device drivers give over ownership of a request
when calling blk_end_request().
If it returns 0, the request is completed.
If it returns 1, the request isn't completed and
the ownership is returned to the device driver again.
  o rq-end_io() is called at the top of blk_end_request() to
allow to hook all parts of request completion.
Existing users of rq-end_io() must be changed to do
all parts of request completion.

Thanks,
Kiyoshi Ueda
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 03/27] blk_end_request: changing block layer core (take 2)

2007-09-11 Thread Kiyoshi Ueda
This patch converts core parts of block layer to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/ll_rw_blk.c |   15 +--
 1 files changed, 5 insertions(+), 10 deletions(-)

diff -rupN 02-rq-size-macro/block/ll_rw_blk.c 
03-blkcore-caller-change/block/ll_rw_blk.c
--- 02-rq-size-macro/block/ll_rw_blk.c  2007-09-10 17:42:56.0 -0400
+++ 03-blkcore-caller-change/block/ll_rw_blk.c  2007-09-10 17:57:59.0 
-0400
@@ -365,8 +365,8 @@ void blk_ordered_complete_seq(struct req
q-ordseq = 0;
rq = q-orig_bar_rq;
 
-   end_that_request_first(rq, uptodate, rq-hard_nr_sectors);
-   end_that_request_last(rq, uptodate);
+   if (__blk_end_request(rq, uptodate, blk_rq_size(rq)))
+   BUG();
 }
 
 static void pre_flush_end_io(struct request *rq, int error)
@@ -484,9 +484,8 @@ int blk_do_ordered(struct request_queue 
 * ORDERED_NONE while this request is on it.
 */
blkdev_dequeue_request(rq);
-   end_that_request_first(rq, -EOPNOTSUPP,
-  rq-hard_nr_sectors);
-   end_that_request_last(rq, -EOPNOTSUPP);
+   if (__blk_end_request(rq, -EOPNOTSUPP, blk_rq_size(rq)))
+   BUG();
*rqp = NULL;
return 0;
}
@@ -3720,11 +3719,7 @@ EXPORT_SYMBOL(end_that_request_last);
 static inline void __end_request(struct request *rq, int uptodate,
 unsigned int nr_bytes)
 {
-   if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
-   blkdev_dequeue_request(rq);
-   add_disk_randomness(rq-rq_disk);
-   end_that_request_last(rq, uptodate);
-   }
+   __blk_end_request(rq, uptodate, nr_bytes);
 }
 
 /**
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 01/27] blk_end_request: add new request completion interface (take 2)

2007-09-11 Thread Kiyoshi Ueda
This patch adds 2 new interfaces for request completion:
  o blk_end_request()   : called without queue lock
  o __blk_end_request() : called with queue lock held

Some device drivers call some generic functions below between
end_that_request_{first/chunk} and end_that_request_last().
  o add_disk_randomness()
  o blk_queue_end_tag()
  o blkdev_dequeue_request()
These are called in the blk_end_request() as a part of generic
request completion.
So all device drivers become to call above functions.

Normal drivers can be converted to use blk_end_request()
in a standard way shown below.

 a) end_that_request_{chunk/first}
spin_lock_irqsave()
(add_disk_randomness(), blk_queue_end_tag(), blkdev_dequeue_request())
end_that_request_last()
spin_unlock_irqrestore()
= blk_end_request()

 b) spin_lock_irqsave()
end_that_request_{chunk/first}
(add_disk_randomness(), blk_queue_end_tag(), blkdev_dequeue_request())
end_that_request_last()
spin_unlock_irqrestore()
= spin_lock_irqsave()
   __blk_end_request()
   spin_unlock_irqsave()

 c) end_that_request_last()
= __blk_end_request()

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 block/ll_rw_blk.c  |   74 
 include/linux/blkdev.h |2 +
 2 files changed, 76 insertions(+)

diff -rupN 2.6.23-rc4-mm1/block/ll_rw_blk.c 
01-blkendreq-interface/block/ll_rw_blk.c
--- 2.6.23-rc4-mm1/block/ll_rw_blk.c2007-09-10 17:32:11.0 -0400
+++ 01-blkendreq-interface/block/ll_rw_blk.c2007-09-10 17:42:56.0 
-0400
@@ -3776,6 +3776,80 @@ void end_request(struct request *req, in
 }
 EXPORT_SYMBOL(end_request);
 
+static void complete_request(struct request *rq, int uptodate)
+{
+   if (blk_rq_tagged(rq))
+   blk_queue_end_tag(rq-q, rq);
+
+   if (!list_empty(rq-queuelist))
+   blkdev_dequeue_request(rq);
+
+   end_that_request_last(rq, uptodate);
+}
+
+/**
+ * blk_end_request - Helper function for drivers to complete the request.
+ * @rq:   the request being processed
+ * @uptodate: 1 for success, 0 for I/O error,  0 for specific error
+ * @nr_bytes: number of bytes to complete
+ *
+ * Description:
+ * Ends I/O on a number of bytes attached to @rq.
+ * If @rq has leftover, sets it up for the next range of segments.
+ *
+ * Return:
+ * 0 - we are done with this request
+ * 1 - still buffers pending for this request
+ **/
+int blk_end_request(struct request *rq, int uptodate, int nr_bytes)
+{
+   struct request_queue *q = rq-q;
+   unsigned long flags = 0UL;
+
+   if (blk_fs_request(rq) || blk_pc_request(rq)) {
+   if (__end_that_request_first(rq, uptodate, nr_bytes))
+   return 1;
+   }
+
+   /*
+* No need to check the argument here because it is done
+* in add_disk_randomness().
+*/
+   add_disk_randomness(rq-rq_disk);
+
+   spin_lock_irqsave(q-queue_lock, flags);
+   complete_request(rq, uptodate);
+   spin_unlock_irqrestore(q-queue_lock, flags);
+
+   return 0;
+}
+EXPORT_SYMBOL_GPL(blk_end_request);
+
+/**
+ * __blk_end_request - Helper function for drivers to complete the request.
+ *
+ * Description:
+ * Must be called with queue lock held unlike blk_end_request().
+ **/
+int __blk_end_request(struct request *rq, int uptodate, int nr_bytes)
+{
+   if (blk_fs_request(rq) || blk_pc_request(rq)) {
+   if (__end_that_request_first(rq, uptodate, nr_bytes))
+   return 1;
+   }
+
+   /*
+* No need to check the argument here because it is done
+* in add_disk_randomness().
+*/
+   add_disk_randomness(rq-rq_disk);
+
+   complete_request(rq, uptodate);
+
+   return 0;
+}
+EXPORT_SYMBOL_GPL(__blk_end_request);
+
 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
 struct bio *bio)
 {
diff -rupN 2.6.23-rc4-mm1/include/linux/blkdev.h 
01-blkendreq-interface/include/linux/blkdev.h
--- 2.6.23-rc4-mm1/include/linux/blkdev.h   2007-09-10 17:32:18.0 
-0400
+++ 01-blkendreq-interface/include/linux/blkdev.h   2007-09-10 
17:42:56.0 -0400
@@ -727,6 +727,8 @@ static inline void blk_run_address_space
  * for parts of the original function. This prevents
  * code duplication in drivers.
  */
+extern int blk_end_request(struct request *rq, int uptodate, int nr_bytes);
+extern int __blk_end_request(struct request *rq, int uptodate, int nr_bytes);
 extern int end_that_request_first(struct request *, int, int);
 extern int end_that_request_chunk(struct request *, int, int);
 extern void end_that_request_last(struct request *, int);
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 04/27] blk_end_request: changing arm (take 2)

2007-09-11 Thread Kiyoshi Ueda
This patch converts arm to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---

 arch/arm/plat-omap/mailbox.c |9 ++---
 1 files changed, 6 insertions(+), 3 deletions(-)

diff -rupN 03-blkcore-caller-change/arch/arm/plat-omap/mailbox.c 
04-arm-caller-change/arch/arm/plat-omap/mailbox.c
--- 03-blkcore-caller-change/arch/arm/plat-omap/mailbox.c   2007-08-27 
21:32:35.0 -0400
+++ 04-arm-caller-change/arch/arm/plat-omap/mailbox.c   2007-09-10 
17:59:02.0 -0400
@@ -117,7 +117,8 @@ static void mbox_tx_work(struct work_str
 
spin_lock(q-queue_lock);
blkdev_dequeue_request(rq);
-   end_that_request_last(rq, 0);
+   if (__blk_end_request(rq, 0, 0))
+   BUG();
spin_unlock(q-queue_lock);
}
 }
@@ -151,7 +152,8 @@ static void mbox_rx_work(struct work_str
 
spin_lock_irqsave(q-queue_lock, flags);
blkdev_dequeue_request(rq);
-   end_that_request_last(rq, 0);
+   if (__blk_end_request(rq, 0, 0))
+   BUG();
spin_unlock_irqrestore(q-queue_lock, flags);
 
mbox-rxq-callback((void *)msg);
@@ -265,7 +267,8 @@ omap_mbox_read(struct device *dev, struc
 
spin_lock_irqsave(q-queue_lock, flags);
blkdev_dequeue_request(rq);
-   end_that_request_last(rq, 0);
+   if (__blk_end_request(rq, 0, 0))
+   BUG();
spin_unlock_irqrestore(q-queue_lock, flags);
 
if (unlikely(mbox_seq_test(mbox, *p))) {
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 05/27] blk_end_request: changing um (take 2)

2007-09-11 Thread Kiyoshi Ueda
This patch converts um to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 arch/um/drivers/ubd_kern.c |   10 +-
 1 files changed, 1 insertion(+), 9 deletions(-)

diff -rupN 04-arm-caller-change/arch/um/drivers/ubd_kern.c 
05-um-caller-change/arch/um/drivers/ubd_kern.c
--- 04-arm-caller-change/arch/um/drivers/ubd_kern.c 2007-08-27 
21:32:35.0 -0400
+++ 05-um-caller-change/arch/um/drivers/ubd_kern.c  2007-09-10 
17:59:35.0 -0400
@@ -476,15 +476,7 @@ int thread_fd = -1;
 
 static void ubd_end_request(struct request *req, int bytes, int uptodate)
 {
-   if (!end_that_request_first(req, uptodate, bytes  9)) {
-   struct ubd *dev = req-rq_disk-private_data;
-   unsigned long flags;
-
-   add_disk_randomness(req-rq_disk);
-   spin_lock_irqsave(dev-lock, flags);
-   end_that_request_last(req, uptodate);
-   spin_unlock_irqrestore(dev-lock, flags);
-   }
+   blk_end_request(req, uptodate, bytes);
 }
 
 /* Callable only from interrupt context - otherwise you need to do
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 06/27] blk_end_request: changing DAC960 (take 2)

2007-09-11 Thread Kiyoshi Ueda
This patch converts DAC960 to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/DAC960.c |5 +
 1 files changed, 1 insertion(+), 4 deletions(-)

diff -rupN 05-um-caller-change/drivers/block/DAC960.c 
06-dac960-caller-change/drivers/block/DAC960.c
--- 05-um-caller-change/drivers/block/DAC960.c  2007-09-10 17:32:11.0 
-0400
+++ 06-dac960-caller-change/drivers/block/DAC960.c  2007-09-10 
18:00:55.0 -0400
@@ -3460,10 +3460,7 @@ static inline bool DAC960_ProcessComplet
pci_unmap_sg(Command-Controller-PCIDevice, Command-cmd_sglist,
Command-SegmentCount, Command-DmaDirection);
 
-if (!end_that_request_first(Request, UpToDate, Command-BlockCount)) {
-   add_disk_randomness(Request-rq_disk);
-   end_that_request_last(Request, UpToDate);
-
+if (!__blk_end_request(Request, UpToDate, Command-BlockCount  9)) {
if (Command-Completion) {
complete(Command-Completion);
Command-Completion = NULL;
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 07/27] blk_end_request: changing floppy (take 2)

2007-09-11 Thread Kiyoshi Ueda
This patch converts floppy to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/floppy.c |8 +++-
 1 files changed, 3 insertions(+), 5 deletions(-)

diff -rupN 06-dac960-caller-change/drivers/block/floppy.c 
07-floppy-caller-change/drivers/block/floppy.c
--- 06-dac960-caller-change/drivers/block/floppy.c  2007-09-10 
17:32:11.0 -0400
+++ 07-floppy-caller-change/drivers/block/floppy.c  2007-09-10 
18:01:23.0 -0400
@@ -2290,18 +2290,16 @@ static int do_format(int drive, struct f
 static void floppy_end_request(struct request *req, int uptodate)
 {
unsigned int nr_sectors = current_count_sectors;
+   unsigned int drive = (unsigned int)req-rq_disk-private_data;
 
/* current_count_sectors can be zero if transfer failed */
if (!uptodate)
nr_sectors = req-current_nr_sectors;
-   if (end_that_request_first(req, uptodate, nr_sectors))
+   if (__blk_end_request(req, uptodate, nr_sectors  9))
return;
-   add_disk_randomness(req-rq_disk);
-   floppy_off((long)req-rq_disk-private_data);
-   blkdev_dequeue_request(req);
-   end_that_request_last(req, uptodate);
 
/* We're done with the request */
+   floppy_off(drive);
current_req = NULL;
 }
 
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 08/27] blk_end_request: changing lguest (take 2)

2007-09-11 Thread Kiyoshi Ueda
This patch converts lguest to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/lguest_blk.c |5 +
 1 files changed, 1 insertion(+), 4 deletions(-)

diff -rupN 07-floppy-caller-change/drivers/block/lguest_blk.c 
08-lguest-caller-change/drivers/block/lguest_blk.c
--- 07-floppy-caller-change/drivers/block/lguest_blk.c  2007-08-27 
21:32:35.0 -0400
+++ 08-lguest-caller-change/drivers/block/lguest_blk.c  2007-09-10 
18:01:53.0 -0400
@@ -77,11 +77,8 @@ struct blockdev
  * request.  This improved disk speed by 130%. */
 static void end_entire_request(struct request *req, int uptodate)
 {
-   if (end_that_request_first(req, uptodate, req-hard_nr_sectors))
+   if (__blk_end_request(req, uptodate, blk_rq_size(req)))
BUG();
-   add_disk_randomness(req-rq_disk);
-   blkdev_dequeue_request(req);
-   end_that_request_last(req, uptodate);
 }
 
 /* I'm told there are only two stories in the world worth telling: love and
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 09/27] blk_end_request: changing nbd (take 2)

2007-09-11 Thread Kiyoshi Ueda
This patch converts nbd to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/nbd.c |4 +---
 1 files changed, 1 insertion(+), 3 deletions(-)

diff -rupN 08-lguest-caller-change/drivers/block/nbd.c 
09-nbd-caller-change/drivers/block/nbd.c
--- 08-lguest-caller-change/drivers/block/nbd.c 2007-09-10 17:32:11.0 
-0400
+++ 09-nbd-caller-change/drivers/block/nbd.c2007-09-10 18:02:30.0 
-0400
@@ -107,9 +107,7 @@ static void nbd_end_request(struct reque
req, uptodate? done: failed);
 
spin_lock_irqsave(q-queue_lock, flags);
-   if (!end_that_request_first(req, uptodate, req-nr_sectors)) {
-   end_that_request_last(req, uptodate);
-   }
+   __blk_end_request(req, uptodate, req-nr_sectors  9);
spin_unlock_irqrestore(q-queue_lock, flags);
 }
 
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 10/27] blk_end_request: changing ps3disk (take 2)

2007-09-11 Thread Kiyoshi Ueda
This patch converts ps3disk to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/ps3disk.c |6 +-
 1 files changed, 1 insertion(+), 5 deletions(-)

diff -rupN 09-nbd-caller-change/drivers/block/ps3disk.c 
10-ps3disk-caller-change/drivers/block/ps3disk.c
--- 09-nbd-caller-change/drivers/block/ps3disk.c2007-08-27 
21:32:35.0 -0400
+++ 10-ps3disk-caller-change/drivers/block/ps3disk.c2007-09-10 
18:03:00.0 -0400
@@ -280,11 +280,7 @@ static irqreturn_t ps3disk_interrupt(int
}
 
spin_lock(priv-lock);
-   if (!end_that_request_first(req, uptodate, num_sectors)) {
-   add_disk_randomness(req-rq_disk);
-   blkdev_dequeue_request(req);
-   end_that_request_last(req, uptodate);
-   }
+   __blk_end_request(req, uptodate, num_sectors  9);
priv-req = NULL;
ps3disk_do_request(dev, priv-queue);
spin_unlock(priv-lock);
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 11/27] blk_end_request: changing sunvdc (take 2)

2007-09-11 Thread Kiyoshi Ueda
This patch converts sunvdc to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/sunvdc.c |5 +
 1 files changed, 1 insertion(+), 4 deletions(-)

diff -rupN 10-ps3disk-caller-change/drivers/block/sunvdc.c 
11-sunvdc-caller-change/drivers/block/sunvdc.c
--- 10-ps3disk-caller-change/drivers/block/sunvdc.c 2007-08-27 
21:32:35.0 -0400
+++ 11-sunvdc-caller-change/drivers/block/sunvdc.c  2007-09-10 
18:03:31.0 -0400
@@ -213,10 +213,7 @@ static void vdc_end_special(struct vdc_p
 
 static void vdc_end_request(struct request *req, int uptodate, int num_sectors)
 {
-   if (end_that_request_first(req, uptodate, num_sectors))
-   return;
-   add_disk_randomness(req-rq_disk);
-   end_that_request_last(req, uptodate);
+   __blk_end_request(req, uptodate, num_sectors  9);
 }
 
 static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 12/27] blk_end_request: changing sx8 (take 2)

2007-09-11 Thread Kiyoshi Ueda
This patch converts sx8 to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/sx8.c |4 +---
 1 files changed, 1 insertion(+), 3 deletions(-)

diff -rupN 11-sunvdc-caller-change/drivers/block/sx8.c 
12-sx8-caller-change/drivers/block/sx8.c
--- 11-sunvdc-caller-change/drivers/block/sx8.c 2007-08-27 21:32:35.0 
-0400
+++ 12-sx8-caller-change/drivers/block/sx8.c2007-09-10 18:04:02.0 
-0400
@@ -747,11 +747,9 @@ static inline void carm_end_request_queu
struct request *req = crq-rq;
int rc;
 
-   rc = end_that_request_first(req, uptodate, req-hard_nr_sectors);
+   rc = __blk_end_request(req, uptodate, blk_rq_size(req));
assert(rc == 0);
 
-   end_that_request_last(req, uptodate);
-
rc = carm_put_request(host, crq);
assert(rc == 0);
 }
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 13/27] blk_end_request: changing ub (take 2)

2007-09-11 Thread Kiyoshi Ueda
This patch converts ub to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/ub.c |4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

diff -rupN 12-sx8-caller-change/drivers/block/ub.c 
13-ub-caller-change/drivers/block/ub.c
--- 12-sx8-caller-change/drivers/block/ub.c 2007-08-27 21:32:35.0 
-0400
+++ 13-ub-caller-change/drivers/block/ub.c  2007-09-10 18:04:31.0 
-0400
@@ -814,8 +814,8 @@ static void ub_end_rq(struct request *rq
uptodate = 0;
rq-errors = scsi_status;
}
-   end_that_request_first(rq, uptodate, rq-hard_nr_sectors);
-   end_that_request_last(rq, uptodate);
+   if (__blk_end_request(rq, uptodate, blk_rq_size(rq)))
+   BUG();
 }
 
 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 14/27] blk_end_request: changing viodasd (take 2)

2007-09-11 Thread Kiyoshi Ueda
This patch converts viodasd to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/block/viodasd.c |5 +
 1 files changed, 1 insertion(+), 4 deletions(-)

diff -rupN 13-ub-caller-change/drivers/block/viodasd.c 
14-viodasd-caller-change/drivers/block/viodasd.c
--- 13-ub-caller-change/drivers/block/viodasd.c 2007-08-27 21:32:35.0 
-0400
+++ 14-viodasd-caller-change/drivers/block/viodasd.c2007-09-10 
18:05:02.0 -0400
@@ -275,10 +275,7 @@ static struct block_device_operations vi
 static void viodasd_end_request(struct request *req, int uptodate,
int num_sectors)
 {
-   if (end_that_request_first(req, uptodate, num_sectors))
-   return;
-   add_disk_randomness(req-rq_disk);
-   end_that_request_last(req, uptodate);
+   __blk_end_request(req, uptodate, num_sectors  9);
 }
 
 /*
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 16/27] blk_end_request: changing viocd (take 2)

2007-09-11 Thread Kiyoshi Ueda
This patch converts viocd to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/cdrom/viocd.c |5 +
 1 files changed, 1 insertion(+), 4 deletions(-)

diff -rupN 15-xen-caller-change/drivers/cdrom/viocd.c 
16-viocd-caller-change/drivers/cdrom/viocd.c
--- 15-xen-caller-change/drivers/cdrom/viocd.c  2007-08-27 21:32:35.0 
-0400
+++ 16-viocd-caller-change/drivers/cdrom/viocd.c2007-09-10 
18:06:02.0 -0400
@@ -389,11 +389,8 @@ static void viocd_end_request(struct req
if (!nsectors)
nsectors = 1;
 
-   if (end_that_request_first(req, uptodate, nsectors))
+   if (__blk_end_request(req, uptodate, nsectors  9))
BUG();
-   add_disk_randomness(req-rq_disk);
-   blkdev_dequeue_request(req);
-   end_that_request_last(req, uptodate);
 }
 
 static int rwreq;
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 17/27] blk_end_request: changing i2o_block (take 2)

2007-09-11 Thread Kiyoshi Ueda
This patch converts i2o_block to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/message/i2o/i2o_block.c |8 ++--
 1 files changed, 2 insertions(+), 6 deletions(-)

diff -rupN 16-viocd-caller-change/drivers/message/i2o/i2o_block.c 
17-i2o-caller-change/drivers/message/i2o/i2o_block.c
--- 16-viocd-caller-change/drivers/message/i2o/i2o_block.c  2007-09-10 
17:32:13.0 -0400
+++ 17-i2o-caller-change/drivers/message/i2o/i2o_block.c2007-09-10 
18:08:24.0 -0400
@@ -425,22 +425,18 @@ static void i2o_block_end_request(struct
struct request_queue *q = req-q;
unsigned long flags;
 
-   if (end_that_request_chunk(req, uptodate, nr_bytes)) {
+   if (blk_end_request(req, uptodate, nr_bytes)) {
int leftover = (req-hard_nr_sectors  KERNEL_SECTOR_SHIFT);
 
if (blk_pc_request(req))
leftover = req-data_len;
 
if (end_io_error(uptodate))
-   end_that_request_chunk(req, 0, leftover);
+   blk_end_request(req, 0, leftover);
}
 
-   add_disk_randomness(req-rq_disk);
-
spin_lock_irqsave(q-queue_lock, flags);
 
-   end_that_request_last(req, uptodate);
-
if (likely(dev)) {
dev-open_queue_depth--;
list_del(ireq-queue);
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 18/27] blk_end_request: changing mmc (take 2)

2007-09-11 Thread Kiyoshi Ueda
This patch converts mmc to use blk_end_request().

Signed-off-by: Kiyoshi Ueda [EMAIL PROTECTED]
Signed-off-by: Jun'ichi Nomura [EMAIL PROTECTED]
---
 drivers/mmc/card/block.c |   24 +---
 drivers/mmc/card/queue.c |4 ++--
 2 files changed, 7 insertions(+), 21 deletions(-)

diff -rupN 17-i2o-caller-change/drivers/mmc/card/block.c 
18-mmc-caller-change/drivers/mmc/card/block.c
--- 17-i2o-caller-change/drivers/mmc/card/block.c   2007-09-10 
17:32:13.0 -0400
+++ 18-mmc-caller-change/drivers/mmc/card/block.c   2007-09-10 
18:08:53.0 -0400
@@ -336,15 +336,7 @@ static int mmc_blk_issue_rq(struct mmc_q
 * A block was successfully transferred.
 */
spin_lock_irq(md-lock);
-   ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
-   if (!ret) {
-   /*
-* The whole request completed successfully.
-*/
-   add_disk_randomness(req-rq_disk);
-   blkdev_dequeue_request(req);
-   end_that_request_last(req, 1);
-   }
+   ret = __blk_end_request(req, 1, brq.data.bytes_xfered);
spin_unlock_irq(md-lock);
} while (ret);
 
@@ -374,27 +366,21 @@ static int mmc_blk_issue_rq(struct mmc_q
else
bytes = blocks  9;
spin_lock_irq(md-lock);
-   ret = end_that_request_chunk(req, 1, bytes);
+   ret = __blk_end_request(req, 1, bytes);
spin_unlock_irq(md-lock);
}
} else if (rq_data_dir(req) != READ 
   (card-host-caps  MMC_CAP_MULTIWRITE)) {
spin_lock_irq(md-lock);
-   ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
+   ret = __blk_end_request(req, 1, brq.data.bytes_xfered);
spin_unlock_irq(md-lock);
}
 
mmc_release_host(card-host);
 
spin_lock_irq(md-lock);
-   while (ret) {
-   ret = end_that_request_chunk(req, 0,
-   req-current_nr_sectors  9);
-   }
-
-   add_disk_randomness(req-rq_disk);
-   blkdev_dequeue_request(req);
-   end_that_request_last(req, 0);
+   while (ret)
+   ret = __blk_end_request(req, 0, blk_rq_cur_size(req));
spin_unlock_irq(md-lock);
 
return 0;
diff -rupN 17-i2o-caller-change/drivers/mmc/card/queue.c 
18-mmc-caller-change/drivers/mmc/card/queue.c
--- 17-i2o-caller-change/drivers/mmc/card/queue.c   2007-08-27 
21:32:35.0 -0400
+++ 18-mmc-caller-change/drivers/mmc/card/queue.c   2007-09-10 
18:08:53.0 -0400
@@ -93,8 +93,8 @@ static void mmc_request(struct request_q
printk(KERN_ERR MMC: killing requests for dead queue\n);
while ((req = elv_next_request(q)) != NULL) {
do {
-   ret = end_that_request_chunk(req, 0,
-   req-current_nr_sectors  9);
+   ret = __blk_end_request(req, 0,
+   blk_rq_cur_size(req));
} while (ret);
}
return;
-
To unsubscribe from this list: send the line unsubscribe linux-scsi in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


  1   2   >