It is not necessary to issue request directly with bypass 'true'
in blk_mq_sched_insert_requests and handle the non-issued requests
itself. Just set bypass to 'false' and let blk_mq_try_issue_directly
handle them totally. Remove the blk_rq_can_direct_dispatch check,
because blk_mq_try_issue_directly can handle it well.If request is
direct-issued unsuccessfully, insert the reset.

Signed-off-by: Jianchao Wang <jianchao.w.w...@oracle.com>
---
 block/blk-mq-sched.c |  8 +++-----
 block/blk-mq.c       | 29 ++++++++++-------------------
 2 files changed, 13 insertions(+), 24 deletions(-)

diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index f096d898..5b4d52d 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -417,12 +417,10 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx 
*hctx,
                 * busy in case of 'none' scheduler, and this way may save
                 * us one extra enqueue & dequeue to sw queue.
                 */
-               if (!hctx->dispatch_busy && !e && !run_queue_async) {
+               if (!hctx->dispatch_busy && !e && !run_queue_async)
                        blk_mq_try_issue_list_directly(hctx, list);
-                       if (list_empty(list))
-                               return;
-               }
-               blk_mq_insert_requests(hctx, ctx, list);
+               else
+                       blk_mq_insert_requests(hctx, ctx, list);
        }
 
        blk_mq_run_hw_queue(hctx, run_queue_async);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 88ee447..3265e30 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1904,32 +1904,23 @@ blk_status_t blk_mq_request_issue_directly(struct 
request *rq, bool last)
 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                struct list_head *list)
 {
+       blk_qc_t unused;
+       blk_status_t ret = BLK_STS_OK;
+
        while (!list_empty(list)) {
-               blk_status_t ret;
                struct request *rq = list_first_entry(list, struct request,
                                queuelist);
 
-               if (!blk_rq_can_direct_dispatch(rq))
-                       break;
-
                list_del_init(&rq->queuelist);
-               ret = blk_mq_request_issue_directly(rq, list_empty(list));
-               if (ret != BLK_STS_OK) {
-                       if (ret == BLK_STS_RESOURCE ||
-                                       ret == BLK_STS_DEV_RESOURCE) {
-                               list_add(&rq->queuelist, list);
-                               break;
-                       }
-                       blk_mq_end_request(rq, ret);
-               }
+               if (ret == BLK_STS_OK)
+                       ret = blk_mq_try_issue_directly(hctx, rq, &unused,
+                                                       false,
+                                                       list_empty(list));
+               else
+                       blk_mq_sched_insert_request(rq, false, true, false);
        }
 
-       /*
-        * If we didn't flush the entire list, we could have told
-        * the driver there was more coming, but that turned out to
-        * be a lie.
-        */
-       if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
+       if (ret != BLK_STS_OK && hctx->queue->mq_ops->commit_rqs)
                hctx->queue->mq_ops->commit_rqs(hctx);
 }
 
-- 
2.7.4

Reply via email to