If bio_split() isn't involved, it is a bit overkill to link dm_io into hlist,
given there is only single dm_io in the list, so convert to single list
for holding all dm_io instances associated with this bio.

Signed-off-by: Ming Lei <ming....@redhat.com>
---
 drivers/md/dm-core.h |  2 +-
 drivers/md/dm.c      | 46 +++++++++++++++++++++++---------------------
 2 files changed, 25 insertions(+), 23 deletions(-)

diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 811c0ccbc63d..7f51957913e8 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -257,7 +257,7 @@ struct dm_io {
        spinlock_t lock;
        unsigned long start_time;
        void *data;
-       struct hlist_node node;
+       struct dm_io *next;
        struct task_struct *map_task;
        struct dm_stats_aux stats_aux;
        /* last member of dm_target_io is 'struct bio' */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 2987f7cf7b47..db23efd6bbf6 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1492,7 +1492,7 @@ static bool __process_abnormal_io(struct clone_info *ci, 
struct dm_target *ti,
 }
 
 /*
- * Reuse ->bi_private as hlist head for storing all dm_io instances
+ * Reuse ->bi_private as dm_io list head for storing all dm_io instances
  * associated with this bio, and this bio's bi_private needs to be
  * stored in dm_io->data before the reuse.
  *
@@ -1500,14 +1500,14 @@ static bool __process_abnormal_io(struct clone_info 
*ci, struct dm_target *ti,
  * touch it after splitting. Meantime it won't be changed by anyone after
  * bio is submitted. So this reuse is safe.
  */
-static inline struct hlist_head *dm_get_bio_hlist_head(struct bio *bio)
+static inline struct dm_io **dm_poll_list_head(struct bio *bio)
 {
-       return (struct hlist_head *)&bio->bi_private;
+       return (struct dm_io **)&bio->bi_private;
 }
 
 static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
 {
-       struct hlist_head *head = dm_get_bio_hlist_head(bio);
+       struct dm_io **head = dm_poll_list_head(bio);
 
        if (!(bio->bi_opf & REQ_DM_POLL_LIST)) {
                bio->bi_opf |= REQ_DM_POLL_LIST;
@@ -1517,19 +1517,20 @@ static void dm_queue_poll_io(struct bio *bio, struct 
dm_io *io)
                 */
                io->data = bio->bi_private;
 
-               INIT_HLIST_HEAD(head);
-
                /* tell block layer to poll for completion */
                bio->bi_cookie = ~BLK_QC_T_NONE;
+
+               io->next = NULL;
        } else {
                /*
                 * bio recursed due to split, reuse original poll list,
                 * and save bio->bi_private too.
                 */
-               io->data = hlist_entry(head->first, struct dm_io, node)->data;
+               io->data = (*head)->data;
+               io->next = *head;
        }
 
-       hlist_add_head(&io->node, head);
+       *head = io;
 }
 
 /*
@@ -1682,18 +1683,16 @@ static bool dm_poll_dm_io(struct dm_io *io, struct 
io_comp_batch *iob,
 static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
                       unsigned int flags)
 {
-       struct hlist_head *head = dm_get_bio_hlist_head(bio);
-       struct hlist_head tmp = HLIST_HEAD_INIT;
-       struct hlist_node *next;
-       struct dm_io *io;
+       struct dm_io **head = dm_poll_list_head(bio);
+       struct dm_io *list = *head;
+       struct dm_io *tmp = NULL;
+       struct dm_io *curr, *next;
 
        /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */
        if (!(bio->bi_opf & REQ_DM_POLL_LIST))
                return 0;
 
-       WARN_ON_ONCE(hlist_empty(head));
-
-       hlist_move_list(head, &tmp);
+       WARN_ON_ONCE(!list);
 
        /*
         * Restore .bi_private before possibly completing dm_io.
@@ -1704,24 +1703,27 @@ static int dm_poll_bio(struct bio *bio, struct 
io_comp_batch *iob,
         * clearing REQ_DM_POLL_LIST here.
         */
        bio->bi_opf &= ~REQ_DM_POLL_LIST;
-       bio->bi_private = hlist_entry(tmp.first, struct dm_io, node)->data;
+       bio->bi_private = list->data;
 
-       hlist_for_each_entry_safe(io, next, &tmp, node) {
-               if (dm_poll_dm_io(io, iob, flags)) {
-                       hlist_del_init(&io->node);
+       for (curr = list, next = curr->next; curr; curr = next, next =
+                       curr ? curr->next : NULL) {
+               if (dm_poll_dm_io(curr, iob, flags)) {
                        /*
                         * clone_endio() has already occurred, so passing
                         * error as 0 here doesn't override io->status
                         */
-                       dm_io_dec_pending(io, 0);
+                       dm_io_dec_pending(curr, 0);
+               } else {
+                       curr->next = tmp;
+                       tmp = curr;
                }
        }
 
        /* Not done? */
-       if (!hlist_empty(&tmp)) {
+       if (tmp) {
                bio->bi_opf |= REQ_DM_POLL_LIST;
                /* Reset bio->bi_private to dm_io list head */
-               hlist_move_list(&tmp, head);
+               *head = tmp;
                return 0;
        }
        return 1;
-- 
2.31.1

--
dm-devel mailing list
dm-devel@redhat.com
https://listman.redhat.com/mailman/listinfo/dm-devel

Reply via email to