On legacy request_queues, request allocation through request_list associates each request with its blkcg_gq. However, blk-mq doesn't use request_list and its requests aren't associated with the matching blkcg_gqs, preventing cgroup-aware tracking blk-mq IOs.
This patch adds blk_rq_[dis]associate_blkg() and use them in blk-mq request alloc/free paths so that every request is associated with its blkcg_gq while in-flight. The added overhead is minimal and per-cpu in hot paths. In the future, we also should be able to remove the more frequent per-bio blkcg_gq operations in blkcg_bio_issue_check() for blk-mq. * ->blkcg is added to blk_mq_alloc_data to carry the target blkcg in the allocation path. I didn't #ifdef it for code simplicity. The only overhead when cgroup is disabled is the extra pointer in the data structure, which shouldn't matter. * Moved set_start_time_ns() out of CONFIG_BLK_CGROUP together with blk_rq_associate_blkg(). Both functions provide dummy implementations when !CONFIG_BLK_CGROUP. Signed-off-by: Tejun Heo <t...@kernel.org> --- block/blk-mq.c | 9 ++++++--- block/blk-mq.h | 1 + include/linux/blk-cgroup.h | 42 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 49 insertions(+), 3 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index af958c4..7cc64de 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -305,9 +305,10 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, rq->rq_disk = NULL; rq->part = NULL; rq->start_time = jiffies; -#ifdef CONFIG_BLK_CGROUP - rq->blkg = NULL; + + blk_rq_associate_blkg(rq, data->blkcg); set_start_time_ns(rq); +#ifdef CONFIG_BLK_CGROUP rq->io_start_time_ns = 0; #endif rq->nr_phys_segments = 0; @@ -472,6 +473,8 @@ void blk_mq_free_request(struct request *rq) } } + blk_rq_disassociate_blkg(rq); + ctx->rq_completed[rq_is_sync(rq)]++; if (rq->rq_flags & RQF_MQ_INFLIGHT) atomic_dec(&hctx->nr_active); @@ -1599,7 +1602,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) { const int is_sync = op_is_sync(bio->bi_opf); const int is_flush_fua = op_is_flush(bio->bi_opf); - struct blk_mq_alloc_data data = { .flags = 0 }; + struct blk_mq_alloc_data data = { .flags = 0, .blkcg = bio_blkcg(bio) }; struct request *rq; unsigned int request_count = 0; struct blk_plug *plug; diff --git a/block/blk-mq.h b/block/blk-mq.h index 4933af9..dfb1e1d 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -111,6 +111,7 @@ struct blk_mq_alloc_data { struct request_queue *q; unsigned int flags; unsigned int shallow_depth; + struct blkcg *blkcg; /* input & output parameter */ struct blk_mq_ctx *ctx; diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 1de5158..96eed0f 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -725,6 +725,45 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, return !throtl; } +/** + * blk_rq_associate_blkg - lookup and associate a request with its blkcg_gq + * @rq: request of interest + * @blkcg: target blkcg + * + * Associate @rq with the matching blkcg_cq of @blkcg. This is used by + * blk-mq to associate requests directly with blkgs without going through + * request_list. + */ +static inline void blk_rq_associate_blkg(struct request *rq, struct blkcg *blkcg) +{ + struct request_queue *q = rq->q; + struct blkcg_gq *blkg; + + if (!blkcg) { + rq->blkg = q->root_blkg; + return; + } + + rcu_read_lock(); + blkg = blkg_lookup_create(blkcg, q); + if (likely(blkg == q->root_blkg || percpu_ref_tryget(&blkg->refcnt))) + rq->blkg = blkg; + else + rq->blkg = q->root_blkg; + rcu_read_unlock(); +} + +/** + * blk_rq_disassociate_blkg - undo blk_rq_associate_blkg() + * @rq: request of interest + */ +static inline void blk_rq_disassociate_blkg(struct request *rq) +{ + if (rq->blkg && rq->blkg != rq->q->root_blkg) + blkg_put(rq->blkg); + rq->blkg = NULL; +} + #else /* CONFIG_BLK_CGROUP */ struct blkcg { @@ -781,6 +820,9 @@ static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q static inline bool blkcg_bio_issue_check(struct request_queue *q, struct bio *bio) { return true; } +static inline void blk_rq_associate_blkg(struct request *rq, struct blkcg *blkcg) { } +static inline void blk_rq_disassociate_blkg(struct request *rq) { } + #define blk_queue_for_each_rl(rl, q) \ for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) -- 2.9.5