Last patch makes plug work for multiple queue case. However it only
works for single disk case, because it assumes only one request in the
plug list. If a task is accessing multiple disks, eg MD/DM, the
assumption is wrong. Let blk_attempt_plug_merge() record request from
the same queue.

Cc: Jens Axboe <ax...@fb.com>
Cc: Christoph Hellwig <h...@lst.de>
Signed-off-by: Shaohua Li <s...@fb.com>
---
 block/blk-core.c | 10 +++++++---
 block/blk-mq.c   | 11 ++++++-----
 block/blk.h      |  3 ++-
 3 files changed, 15 insertions(+), 9 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index d51ed61..a5e1574 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1521,7 +1521,8 @@ bool bio_attempt_front_merge(struct request_queue *q, 
struct request *req,
  * Caller must ensure !blk_queue_nomerges(q) beforehand.
  */
 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
-                           unsigned int *request_count)
+                           unsigned int *request_count,
+                           struct request **same_queue_rq)
 {
        struct blk_plug *plug;
        struct request *rq;
@@ -1541,8 +1542,10 @@ bool blk_attempt_plug_merge(struct request_queue *q, 
struct bio *bio,
        list_for_each_entry_reverse(rq, plug_list, queuelist) {
                int el_ret;
 
-               if (rq->q == q)
+               if (rq->q == q) {
                        (*request_count)++;
+                       *same_queue_rq = rq;
+               }
 
                if (rq->q != q || !blk_rq_merge_ok(rq, bio))
                        continue;
@@ -1583,6 +1586,7 @@ void blk_queue_bio(struct request_queue *q, struct bio 
*bio)
        int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
        struct request *req;
        unsigned int request_count = 0;
+       struct request *same_queue_rq;
 
        /*
         * low level driver can indicate that it wants pages above a
@@ -1607,7 +1611,7 @@ void blk_queue_bio(struct request_queue *q, struct bio 
*bio)
         * any locks.
         */
        if (!blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &request_count))
+           blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
                return;
 
        spin_lock_irq(q->queue_lock);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 5545828..c45739c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1269,6 +1269,7 @@ static void blk_mq_make_request(struct request_queue *q, 
struct bio *bio)
        struct request *rq;
        unsigned int request_count = 0;
        struct blk_plug *plug;
+       struct request *same_queue_rq = NULL;
 
        blk_queue_bounce(q, &bio);
 
@@ -1278,7 +1279,7 @@ static void blk_mq_make_request(struct request_queue *q, 
struct bio *bio)
        }
 
        if (likely(!is_flush_fua) && !blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &request_count))
+           blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
                return;
 
        rq = blk_mq_map_request(q, bio, &data);
@@ -1308,9 +1309,8 @@ static void blk_mq_make_request(struct request_queue *q, 
struct bio *bio)
                 * issued. So the plug list will have one request at most
                 */
                if (plug) {
-                       if (!list_empty(&plug->mq_list)) {
-                               old_rq = list_first_entry(&plug->mq_list,
-                                       struct request, queuelist);
+                       if (same_queue_rq) {
+                               old_rq = same_queue_rq;
                                list_del_init(&old_rq->queuelist);
                        }
                        list_add_tail(&rq->queuelist, &plug->mq_list);
@@ -1350,6 +1350,7 @@ static void blk_sq_make_request(struct request_queue *q, 
struct bio *bio)
        unsigned int request_count = 0;
        struct blk_map_ctx data;
        struct request *rq;
+       struct request *same_queue_rq;
 
        blk_queue_bounce(q, &bio);
 
@@ -1359,7 +1360,7 @@ static void blk_sq_make_request(struct request_queue *q, 
struct bio *bio)
        }
 
        if (likely(!is_flush_fua) && !blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &request_count))
+           blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
                return;
 
        rq = blk_mq_map_request(q, bio, &data);
diff --git a/block/blk.h b/block/blk.h
index 43b0361..aa8633c 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -78,7 +78,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct 
request *req,
 bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
                            struct bio *bio);
 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
-                           unsigned int *request_count);
+                           unsigned int *request_count,
+                           struct request **same_queue_rq);
 
 void blk_account_io_start(struct request *req, bool new_io);
 void blk_account_io_completion(struct request *req, unsigned int bytes);
-- 
1.8.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to