blk_set_queue_dying() does not acquire queue lock before it calls
blk_queue_for_each_rl(). This allows a racing blkg_destroy() to
remove blkg->q_node from the linked list and have
blk_queue_for_each_rl() loop infitely over the removed blkg->q_node
list node.

Signed-off-by: Tahsin Erdogan <tah...@google.com>
---
 block/blk-core.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/block/blk-core.c b/block/blk-core.c
index 61ba08c58b64..0488a2b55bf0 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -525,12 +525,14 @@ void blk_set_queue_dying(struct request_queue *q)
        else {
                struct request_list *rl;
 
+               spin_lock_irq(q->queue_lock);
                blk_queue_for_each_rl(rl, q) {
                        if (rl->rq_pool) {
                                wake_up(&rl->wait[BLK_RW_SYNC]);
                                wake_up(&rl->wait[BLK_RW_ASYNC]);
                        }
                }
+               spin_unlock_irq(q->queue_lock);
        }
 }
 EXPORT_SYMBOL_GPL(blk_set_queue_dying);
-- 
2.11.0.483.g087da7b7c-goog

Reply via email to