It is not necessary to issue request directly with bypass 'true'
in blk_mq_sched_insert_requests and handle the non-issued requests
itself. Just set bypass to 'false' and let blk_mq_try_issue_directly
handle them totally. Remove the blk_rq_can_direct_dispatch check,
because blk_mq_try_issue_directly can handle it well.If request is
direct-issued unsuccessfully, insert the reset.

Signed-off-by: Jianchao Wang <jianchao.w.w...@oracle.com>
---
 block/blk-mq-sched.c |  8 +++-----
 block/blk-mq.c       | 20 +++++++++-----------
 2 files changed, 12 insertions(+), 16 deletions(-)

diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index f096d898..5b4d52d 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -417,12 +417,10 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx 
*hctx,
                 * busy in case of 'none' scheduler, and this way may save
                 * us one extra enqueue & dequeue to sw queue.
                 */
-               if (!hctx->dispatch_busy && !e && !run_queue_async) {
+               if (!hctx->dispatch_busy && !e && !run_queue_async)
                        blk_mq_try_issue_list_directly(hctx, list);
-                       if (list_empty(list))
-                               return;
-               }
-               blk_mq_insert_requests(hctx, ctx, list);
+               else
+                       blk_mq_insert_requests(hctx, ctx, list);
        }
 
        blk_mq_run_hw_queue(hctx, run_queue_async);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 261ff6d..c663102 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1877,22 +1877,20 @@ blk_status_t blk_mq_request_issue_directly(struct 
request *rq, bool last)
 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                struct list_head *list)
 {
+       blk_qc_t unused;
+       blk_status_t ret = BLK_STS_OK;
+
        while (!list_empty(list)) {
-               blk_status_t ret;
                struct request *rq = list_first_entry(list, struct request,
                                queuelist);
 
                list_del_init(&rq->queuelist);
-               ret = blk_mq_request_issue_directly(rq, list_empty(list));
-               if (ret != BLK_STS_OK) {
-                       if (ret == BLK_STS_RESOURCE ||
-                                       ret == BLK_STS_DEV_RESOURCE) {
-                               blk_mq_request_bypass_insert(rq,
+               if (ret == BLK_STS_OK)
+                       ret = blk_mq_try_issue_directly(hctx, rq, &unused,
+                                                       false,
                                                        list_empty(list));
-                               break;
-                       }
-                       blk_mq_end_request(rq, ret);
-               }
+               else
+                       blk_mq_sched_insert_request(rq, false, true, false);
        }
 
        /*
@@ -1900,7 +1898,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx 
*hctx,
         * the driver there was more coming, but that turned out to
         * be a lie.
         */
-       if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
+       if (ret != BLK_STS_OK && hctx->queue->mq_ops->commit_rqs)
                hctx->queue->mq_ops->commit_rqs(hctx);
 }
 
-- 
2.7.4

Reply via email to