Drop the unused q argument, and invert the check to move the exception
into a branch and the regular path as the normal return.

Signed-off-by: Christoph Hellwig <h...@lst.de>
Reviewed-by: Chaitanya Kulkarni <k...@nvidia.com>
Reviewed-by: Damien Le Moal <damien.lem...@opensource.wdc.com>
Reviewed-by: Johannes Thumshirn <johannes.thumsh...@wdc.com>
---
 block/blk-core.c  |  2 +-
 block/blk-merge.c |  2 +-
 block/blk-mq.c    |  2 +-
 block/blk-mq.h    | 18 ++++++++----------
 4 files changed, 11 insertions(+), 13 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index 6bcca0b686de4..bc16e9bae2dc4 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -719,7 +719,7 @@ void submit_bio_noacct(struct bio *bio)
 
        might_sleep();
 
-       plug = blk_mq_plug(q, bio);
+       plug = blk_mq_plug(bio);
        if (plug && plug->nowait)
                bio->bi_opf |= REQ_NOWAIT;
 
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 0f5f42ebd0bb0..5abf5aa5a5f0e 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -1051,7 +1051,7 @@ bool blk_attempt_plug_merge(struct request_queue *q, 
struct bio *bio,
        struct blk_plug *plug;
        struct request *rq;
 
-       plug = blk_mq_plug(q, bio);
+       plug = blk_mq_plug(bio);
        if (!plug || rq_list_empty(plug->mq_list))
                return false;
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 15c7c5c4ad222..dc714dff73001 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2804,7 +2804,7 @@ static void bio_set_ioprio(struct bio *bio)
 void blk_mq_submit_bio(struct bio *bio)
 {
        struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-       struct blk_plug *plug = blk_mq_plug(q, bio);
+       struct blk_plug *plug = blk_mq_plug(bio);
        const int is_sync = op_is_sync(bio->bi_opf);
        struct request *rq;
        unsigned int nr_segs = 1;
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 31d75a83a562d..e694ec67d646a 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -294,7 +294,6 @@ static inline void blk_mq_clear_mq_map(struct 
blk_mq_queue_map *qmap)
 
 /*
  * blk_mq_plug() - Get caller context plug
- * @q: request queue
  * @bio : the bio being submitted by the caller context
  *
  * Plugging, by design, may delay the insertion of BIOs into the elevator in
@@ -305,23 +304,22 @@ static inline void blk_mq_clear_mq_map(struct 
blk_mq_queue_map *qmap)
  * order. While this is not a problem with regular block devices, this ordering
  * change can cause write BIO failures with zoned block devices as these
  * require sequential write patterns to zones. Prevent this from happening by
- * ignoring the plug state of a BIO issuing context if the target request queue
- * is for a zoned block device and the BIO to plug is a write operation.
+ * ignoring the plug state of a BIO issuing context if it is for a zoned block
+ * device and the BIO to plug is a write operation.
  *
  * Return current->plug if the bio can be plugged and NULL otherwise
  */
-static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
-                                          struct bio *bio)
+static inline struct blk_plug *blk_mq_plug( struct bio *bio)
 {
+       /* Zoned block device write operation case: do not plug the BIO */
+       if (bdev_is_zoned(bio->bi_bdev) && op_is_write(bio_op(bio)))
+               return NULL;
+
        /*
         * For regular block devices or read operations, use the context plug
         * which may be NULL if blk_start_plug() was not executed.
         */
-       if (!bdev_is_zoned(bio->bi_bdev) || !op_is_write(bio_op(bio)))
-               return current->plug;
-
-       /* Zoned block device write operation case: do not plug the BIO */
-       return NULL;
+       return current->plug;
 }
 
 /* Free all requests on the list */
-- 
2.30.2

--
dm-devel mailing list
dm-devel@redhat.com
https://listman.redhat.com/mailman/listinfo/dm-devel

Reply via email to