On 3/5/21 7:14 AM, John Garry wrote: > diff --git a/block/blk.h b/block/blk.h > index 3b53e44b967e..1a948bfd91e4 100644 > --- a/block/blk.h > +++ b/block/blk.h > @@ -201,10 +201,29 @@ void elv_unregister_queue(struct request_queue *q); > static inline void elevator_exit(struct request_queue *q, > struct elevator_queue *e) > { > + struct blk_mq_tag_set *set = q->tag_set; > + struct request_queue *tmp; > + > lockdep_assert_held(&q->sysfs_lock); > > + mutex_lock(&set->tag_list_lock); > + list_for_each_entry(tmp, &set->tag_list, tag_set_list) { > + if (tmp == q) > + continue; > + blk_mq_freeze_queue(tmp); > + blk_mq_quiesce_queue(tmp); > + } > + > blk_mq_sched_free_requests(q); > __elevator_exit(q, e); > + > + list_for_each_entry(tmp, &set->tag_list, tag_set_list) { > + if (tmp == q) > + continue; > + blk_mq_unquiesce_queue(tmp); > + blk_mq_unfreeze_queue(tmp); > + } > + mutex_unlock(&set->tag_list_lock); > }
This patch introduces nesting of tag_list_lock inside sysfs_lock. The latter is per request queue while the former can be shared across multiple request queues. Has it been analyzed whether this is safe? Thanks, Bart.