Limit at most 8 queues are polled in each blk_pull(), avoid to
add extra latency when queue depth is high.

Signed-off-by: Ming Lei <ming....@redhat.com>
---
 block/blk-mq.c | 66 +++++++++++++++++++++++++++++++++++---------------
 1 file changed, 46 insertions(+), 20 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index f26950a51f4a..9c94b7f0bf4b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3870,33 +3870,31 @@ static blk_qc_t bio_get_poll_cookie(struct bio *bio)
        return bio->bi_iter.bi_private_data;
 }
 
-static int blk_mq_poll_io(struct bio *bio)
+#define POLL_HCTX_MAX_CNT 8
+
+static bool blk_add_unique_hctx(struct blk_mq_hw_ctx **data, int *cnt,
+               struct blk_mq_hw_ctx *hctx)
 {
-       struct request_queue *q = bio->bi_bdev->bd_disk->queue;
-       blk_qc_t cookie = bio_get_poll_cookie(bio);
-       int ret = 0;
+       int i;
 
-       if (!bio_flagged(bio, BIO_DONE) && blk_qc_t_valid(cookie)) {
-               struct blk_mq_hw_ctx *hctx =
-                       q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
+       for (i = 0; i < *cnt; i++) {
+               if (data[i] == hctx)
+                       goto exit;
+       }
 
-               ret += blk_mq_poll_hctx(q, hctx);
+       if (i < POLL_HCTX_MAX_CNT) {
+               data[i] = hctx;
+               (*cnt)++;
        }
-       return ret;
+ exit:
+       return *cnt == POLL_HCTX_MAX_CNT;
 }
 
-static int blk_bio_poll_and_end_io(struct request_queue *q,
-               struct blk_bio_poll_ctx *poll_ctx)
+static void blk_build_poll_queues(struct blk_bio_poll_ctx *poll_ctx,
+               struct blk_mq_hw_ctx **data, int *cnt)
 {
-       int ret = 0;
        int i;
 
-       /*
-        * Poll hw queue first.
-        *
-        * TODO: limit max poll times and make sure to not poll same
-        * hw queue one more time.
-        */
        for (i = 0; i < poll_ctx->pq->max_nr_grps; i++) {
                struct bio_grp_list_data *grp = &poll_ctx->pq->head[i];
                struct bio *bio;
@@ -3904,9 +3902,37 @@ static int blk_bio_poll_and_end_io(struct request_queue 
*q,
                if (bio_grp_list_grp_empty(grp))
                        continue;
 
-               for (bio = grp->list.head; bio; bio = bio->bi_poll)
-                       ret += blk_mq_poll_io(bio);
+               for (bio = grp->list.head; bio; bio = bio->bi_poll) {
+                       blk_qc_t  cookie;
+                       struct blk_mq_hw_ctx *hctx;
+                       struct request_queue *q;
+
+                       if (bio_flagged(bio, BIO_DONE))
+                               continue;
+                       cookie = bio_get_poll_cookie(bio);
+                       if (!blk_qc_t_valid(cookie))
+                               continue;
+
+                       q = bio->bi_bdev->bd_disk->queue;
+                       hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
+                       if (blk_add_unique_hctx(data, cnt, hctx))
+                               return;
+               }
        }
+}
+
+static int blk_bio_poll_and_end_io(struct request_queue *q,
+               struct blk_bio_poll_ctx *poll_ctx)
+{
+       int ret = 0;
+       int i;
+       struct blk_mq_hw_ctx *hctx[POLL_HCTX_MAX_CNT];
+       int cnt = 0;
+
+       blk_build_poll_queues(poll_ctx, hctx, &cnt);
+
+       for (i = 0; i < cnt; i++)
+               ret += blk_mq_poll_hctx(hctx[i]->queue, hctx[i]);
 
        /* reap bios */
        for (i = 0; i < poll_ctx->pq->max_nr_grps; i++) {
-- 
2.29.2

--
dm-devel mailing list
dm-devel@redhat.com
https://listman.redhat.com/mailman/listinfo/dm-devel

Reply via email to