On 2/12/24 14:46, Christoph Hellwig wrote:
Pass a queue_limits to blk_alloc_queue and apply it after validating and
capping the values using blk_validate_limits.  This will allow allocating
queues with valid queue limits instead of setting the values one at a
time later.

Signed-off-by: Christoph Hellwig <[email protected]>
---
  block/blk-core.c | 26 ++++++++++++++++++--------
  block/blk-mq.c   |  7 ++++---
  block/genhd.c    |  5 +++--
  3 files changed, 25 insertions(+), 13 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index cb56724a8dfb25..a16b5abdbbf56f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -394,24 +394,34 @@ static void blk_timeout_work(struct work_struct *work)
  {
  }
-struct request_queue *blk_alloc_queue(int node_id)
+struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
  {
        struct request_queue *q;
+       int error;
q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO,
                                  node_id);
        if (!q)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
q->last_merge = NULL; q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
-       if (q->id < 0)
+       if (q->id < 0) {
+               error = q->id;
                goto fail_q;
+       }
q->stats = blk_alloc_queue_stats();
-       if (!q->stats)
+       if (!q->stats) {
+               error = -ENOMEM;
                goto fail_id;
+       }
+
+       error = blk_set_default_limits(lim);
+       if (error)
+               goto fail_stats;
+       q->limits = *lim;
q->node = node_id; @@ -436,12 +446,12 @@ struct request_queue *blk_alloc_queue(int node_id)
         * Init percpu_ref in atomic mode so that it's faster to shutdown.
         * See blk_register_queue() for details.
         */
-       if (percpu_ref_init(&q->q_usage_counter,
+       error = percpu_ref_init(&q->q_usage_counter,
                                blk_queue_usage_counter_release,
-                               PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
+                               PERCPU_REF_INIT_ATOMIC, GFP_KERNEL);
+       if (error)
                goto fail_stats;
- blk_set_default_limits(&q->limits);
        q->nr_requests = BLKDEV_DEFAULT_RQ;
return q;
@@ -452,7 +462,7 @@ struct request_queue *blk_alloc_queue(int node_id)
        ida_free(&blk_queue_ida, q->id);
  fail_q:
        kmem_cache_free(blk_requestq_cachep, q);
-       return NULL;
+       return ERR_PTR(error);
  }
/**
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 6d2f7b5caa01d8..9dd8055cc5246d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4086,12 +4086,13 @@ void blk_mq_release(struct request_queue *q)
  static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set 
*set,
                void *queuedata)
  {
+       struct queue_limits lim = { };
        struct request_queue *q;
        int ret;
- q = blk_alloc_queue(set->numa_node);
-       if (!q)
-               return ERR_PTR(-ENOMEM);
+       q = blk_alloc_queue(&lim, set->numa_node);
+       if (IS_ERR(q))
+               return q;
        q->queuedata = queuedata;
        ret = blk_mq_init_allocated_queue(set, q);
        if (ret) {
diff --git a/block/genhd.c b/block/genhd.c
index d74fb5b4ae6818..7a8fd57c51f73c 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1393,11 +1393,12 @@ struct gendisk *__alloc_disk_node(struct request_queue 
*q, int node_id,
struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
  {
+       struct queue_limits lim = { };
        struct request_queue *q;
        struct gendisk *disk;
- q = blk_alloc_queue(node);
-       if (!q)
+       q = blk_alloc_queue(&lim, node);
+       if (IS_ERR(q))
                return NULL;
disk = __alloc_disk_node(q, node, lkclass);
Ah, here it is.

Please move the declaration from patch #4 to this one.

Cheers,

Hannes
--
Dr. Hannes Reinecke                Kernel Storage Architect
[email protected]                              +49 911 74053 688
SUSE Software Solutions GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), GF: Ivo Totev, Andrew McDonald,
Werner Knoblich


Reply via email to