Ming Lei <ming....@canonical.com> writes:

> Most of times, flush plug should be the hottest I/O path,
> so mark ctx as pending after all requests in the list are
> inserted.
>
> Signed-off-by: Ming Lei <ming....@canonical.com>

I agree this looks like a better approach.  Whether it makes a
difference or not, I can't say.

Reviewed-by: Jeff Moyer <jmo...@redhat.com>

> ---
>  block/blk-mq.c | 18 +++++++++++++-----
>  1 file changed, 13 insertions(+), 5 deletions(-)
>
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index a5e33bc..b169e2d 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -990,18 +990,25 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, 
> unsigned long msecs)
>  }
>  EXPORT_SYMBOL(blk_mq_delay_queue);
>  
> -static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
> -                                 struct request *rq, bool at_head)
> +static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
> +                                         struct blk_mq_ctx *ctx,
> +                                         struct request *rq,
> +                                         bool at_head)
>  {
> -     struct blk_mq_ctx *ctx = rq->mq_ctx;
> -
>       trace_block_rq_insert(hctx->queue, rq);
>  
>       if (at_head)
>               list_add(&rq->queuelist, &ctx->rq_list);
>       else
>               list_add_tail(&rq->queuelist, &ctx->rq_list);
> +}
> +
> +static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
> +                                 struct request *rq, bool at_head)
> +{
> +     struct blk_mq_ctx *ctx = rq->mq_ctx;
>  
> +     __blk_mq_insert_req_list(hctx, ctx, rq, at_head);
>       blk_mq_hctx_mark_pending(hctx, ctx);
>  }
>  
> @@ -1057,8 +1064,9 @@ static void blk_mq_insert_requests(struct request_queue 
> *q,
>               rq = list_first_entry(list, struct request, queuelist);
>               list_del_init(&rq->queuelist);
>               rq->mq_ctx = ctx;
> -             __blk_mq_insert_request(hctx, rq, false);
> +             __blk_mq_insert_req_list(hctx, ctx, rq, false);
>       }
> +     blk_mq_hctx_mark_pending(hctx, ctx);
>       spin_unlock(&ctx->lock);
>  
>       blk_mq_run_hw_queue(hctx, from_schedule);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to