This is in preparation for allowing to share the tags, and thus request
allocation between multiple queues.
Also remove blk_mq_tag_to_rq, as it was unused and thus untestable. If we
need it back it can easil be re-added as a non-inline function.
Note that we also now straight out fail queue initialization if we can't
allocate tags - keeping track of a reduced queue_depth over a more complex
call chain isn't easil possible and this shouldn't happen on an of todays
systems.
Signed-off-by: Christoph Hellwig h...@lst.de
---
block/blk-mq-tag.c | 13
block/blk-mq.c | 84 +---
block/blk-mq.h | 18 +++
include/linux/blk-mq.h |8 -
4 files changed, 61 insertions(+), 62 deletions(-)
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 83ae96c..108f82b 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -7,19 +7,6 @@
#include blk-mq.h
#include blk-mq-tag.h
-/*
- * Per tagged queue (tag address space) map
- */
-struct blk_mq_tags {
- unsigned int nr_tags;
- unsigned int nr_reserved_tags;
- unsigned int nr_batch_move;
- unsigned int nr_max_cache;
-
- struct percpu_ida free_tags;
- struct percpu_ida reserved_tags;
-};
-
void blk_mq_wait_for_tags(struct blk_mq_tags *tags)
{
int tag = blk_mq_get_tag(tags, __GFP_WAIT, false);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ec0c276..f1b5d52 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -81,7 +81,7 @@ static struct request *__blk_mq_alloc_request(struct
blk_mq_hw_ctx *hctx,
tag = blk_mq_get_tag(hctx-tags, gfp, reserved);
if (tag != BLK_MQ_TAG_FAIL) {
- rq = hctx-rqs[tag];
+ rq = hctx-tags-rqs[tag];
blk_rq_init(hctx-queue, rq);
rq-tag = tag;
@@ -406,7 +406,9 @@ static void blk_mq_timeout_check(void *__data, unsigned
long *free_tags)
if (tag = hctx-queue_depth)
break;
- rq = hctx-rqs[tag++];
+ rq = hctx-tags-rqs[tag++];
+ if (rq-q != hctx-queue)
+ continue;
if (!test_bit(REQ_ATOM_STARTED, rq-atomic_flags))
continue;
@@ -993,7 +995,7 @@ static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx
*hctx,
int ret = 0;
for (i = 0; i hctx-queue_depth; i++) {
- struct request *rq = hctx-rqs[i];
+ struct request *rq = hctx-tags-rqs[i];
ret = init(data, hctx, rq, i);
if (ret)
@@ -1030,7 +1032,7 @@ static void blk_mq_free_hw_commands(struct blk_mq_hw_ctx
*hctx,
unsigned int i;
for (i = 0; i hctx-queue_depth; i++) {
- struct request *rq = hctx-rqs[i];
+ struct request *rq = hctx-tags-rqs[i];
free(data, hctx, rq, i);
}
@@ -1049,20 +1051,19 @@ void blk_mq_free_commands(struct request_queue *q,
}
EXPORT_SYMBOL(blk_mq_free_commands);
-static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx)
+static void blk_mq_free_rq_map(struct blk_mq_tags *tags)
{
struct page *page;
- while (!list_empty(hctx-page_list)) {
- page = list_first_entry(hctx-page_list, struct page, lru);
+ while (!list_empty(tags-page_list)) {
+ page = list_first_entry(tags-page_list, struct page, lru);
list_del_init(page-lru);
__free_pages(page, page-private);
}
- kfree(hctx-rqs);
+ kfree(tags-rqs);
- if (hctx-tags)
- blk_mq_free_tags(hctx-tags);
+ blk_mq_free_tags(tags);
}
static size_t order_to_size(unsigned int order)
@@ -1075,28 +1076,35 @@ static size_t order_to_size(unsigned int order)
return ret;
}
-static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx,
- unsigned int reserved_tags, int node)
+static struct blk_mq_tags *blk_mq_init_rq_map(unsigned int total_tags,
+ unsigned int reserved_tags, unsigned int cmd_size, int node)
{
+ struct blk_mq_tags *tags;
unsigned int i, j, entries_per_page, max_order = 4;
size_t rq_size, left;
- INIT_LIST_HEAD(hctx-page_list);
+ tags = blk_mq_init_tags(total_tags, reserved_tags, node);
+ if (!tags)
+ return NULL;
+
+ INIT_LIST_HEAD(tags-page_list);
- hctx-rqs = kmalloc_node(hctx-queue_depth * sizeof(struct request *),
+ tags-rqs = kmalloc_node(total_tags * sizeof(struct request *),
GFP_KERNEL, node);
- if (!hctx-rqs)
- return -ENOMEM;
+ if (!tags-rqs) {
+ blk_mq_free_tags(tags);
+ return NULL;
+ }
/*
* rq_size is the size of the request plus driver payload, rounded
* to the cacheline size
*/
- rq_size = round_up(sizeof(struct