On Fri, 16 May 2025, Mikulas Patocka wrote:
>
>
> On Thu, 15 May 2025, Benjamin Marzinski wrote:
>
> > @@ -2077,35 +2095,55 @@ static int probe_path(struct pgpath *pgpath)
> > static int probe_active_paths(struct multipath *m)
> > {
> > struct pgpath *pgpath;
> > - struct priority_group *pg;
> > + struct priority_group *pg = NULL;
> > unsigned long flags;
> > int r = 0;
> >
> > - mutex_lock(&m->work_mutex);
> > -
> > spin_lock_irqsave(&m->lock, flags);
>
> Hi
>
> I suggest replacing spin_lock_irqsave/spin_unlock_irqrestore with
> spin_lock_irq/spin_unlock_irq here and in some other places where it is
> known that interrupts are enabled (for example __map_bio,
> process_queued_bios, multipath_ctr, flush_multipath_work,
> multipath_resume, multipath_status, multipath_prepare_ioctl, ...).
>
> I accepted this patch, so you can send the spinlock changes in a follow-up
> patch.
>
> Mikulas
Hi
I've created a patch for this. Please test it (with spinlock debugging
enabled), if it passes the tests, I'll stage it.
Mikulas
From: Mikulas Patocka <[email protected]>
Replace spin_lock_irqsave/spin_unlock_irqrestore with
spin_lock_irq/spin_unlock_irq at places where it is known that interrupts
are enabled.
Signed-off-by: Mikulas Patocka <[email protected]>
---
drivers/md/dm-mpath.c | 61 ++++++++++++++++++++------------------------------
1 file changed, 25 insertions(+), 36 deletions(-)
Index: linux-2.6/drivers/md/dm-mpath.c
===================================================================
--- linux-2.6.orig/drivers/md/dm-mpath.c
+++ linux-2.6/drivers/md/dm-mpath.c
@@ -612,7 +612,6 @@ static void multipath_queue_bio(struct m
static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
{
struct pgpath *pgpath;
- unsigned long flags;
/* Do we need to select a new pgpath? */
pgpath = READ_ONCE(m->current_pgpath);
@@ -620,12 +619,12 @@ static struct pgpath *__map_bio(struct m
pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
if (!pgpath) {
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
__multipath_queue_bio(m, bio);
pgpath = ERR_PTR(-EAGAIN);
}
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
} else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
@@ -688,7 +687,6 @@ static void process_queued_io_list(struc
static void process_queued_bios(struct work_struct *work)
{
int r;
- unsigned long flags;
struct bio *bio;
struct bio_list bios;
struct blk_plug plug;
@@ -697,16 +695,16 @@ static void process_queued_bios(struct w
bio_list_init(&bios);
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (bio_list_empty(&m->queued_bios)) {
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
return;
}
bio_list_merge_init(&bios, &m->queued_bios);
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
blk_start_plug(&plug);
while ((bio = bio_list_pop(&bios))) {
@@ -1190,7 +1188,6 @@ static int multipath_ctr(struct dm_targe
struct dm_arg_set as;
unsigned int pg_count = 0;
unsigned int next_pg_num;
- unsigned long flags;
as.argc = argc;
as.argv = argv;
@@ -1255,9 +1252,9 @@ static int multipath_ctr(struct dm_targe
goto bad;
}
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
enable_nopath_timeout(m);
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
@@ -1292,23 +1289,21 @@ static void multipath_wait_for_pg_init_c
static void flush_multipath_work(struct multipath *m)
{
if (m->hw_handler_name) {
- unsigned long flags;
-
if (!atomic_read(&m->pg_init_in_progress))
goto skip;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (atomic_read(&m->pg_init_in_progress) &&
!test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) {
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
flush_workqueue(kmpath_handlerd);
multipath_wait_for_pg_init_completion(m);
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
}
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
}
skip:
if (m->queue_mode == DM_TYPE_BIO_BASED)
@@ -1370,11 +1365,10 @@ out:
static int reinstate_path(struct pgpath *pgpath)
{
int r = 0, run_queue = 0;
- unsigned long flags;
struct multipath *m = pgpath->pg->m;
unsigned int nr_valid_paths;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (pgpath->is_active)
goto out;
@@ -1404,7 +1398,7 @@ static int reinstate_path(struct pgpath
schedule_work(&m->trigger_event);
out:
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
if (run_queue) {
dm_table_run_md_queue_async(m->ti->table);
process_queued_io_list(m);
@@ -1461,7 +1455,6 @@ static int switch_pg_num(struct multipat
{
struct priority_group *pg;
unsigned int pgnum;
- unsigned long flags;
char dummy;
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
@@ -1470,7 +1463,7 @@ static int switch_pg_num(struct multipat
return -EINVAL;
}
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
list_for_each_entry(pg, &m->priority_groups, list) {
pg->bypassed = false;
if (--pgnum)
@@ -1480,7 +1473,7 @@ static int switch_pg_num(struct multipat
m->current_pg = NULL;
m->next_pg = pg;
}
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
schedule_work(&m->trigger_event);
return 0;
@@ -1762,9 +1755,8 @@ static void multipath_postsuspend(struct
static void multipath_resume(struct dm_target *ti)
{
struct multipath *m = ti->private;
- unsigned long flags;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) {
set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
@@ -1775,7 +1767,7 @@ static void multipath_resume(struct dm_t
test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
}
/*
@@ -1798,14 +1790,13 @@ static void multipath_status(struct dm_t
unsigned int status_flags, char *result, unsigned
int maxlen)
{
int sz = 0, pg_counter, pgpath_counter;
- unsigned long flags;
struct multipath *m = ti->private;
struct priority_group *pg;
struct pgpath *p;
unsigned int pg_num;
char state;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
/* Features */
if (type == STATUSTYPE_INFO)
@@ -1951,7 +1942,7 @@ static void multipath_status(struct dm_t
break;
}
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
}
static int multipath_message(struct dm_target *ti, unsigned int argc, char
**argv,
@@ -1961,7 +1952,6 @@ static int multipath_message(struct dm_t
dev_t dev;
struct multipath *m = ti->private;
action_fn action;
- unsigned long flags;
mutex_lock(&m->work_mutex);
@@ -1973,9 +1963,9 @@ static int multipath_message(struct dm_t
if (argc == 1) {
if (!strcasecmp(argv[0], "queue_if_no_path")) {
r = queue_if_no_path(m, true, false, __func__);
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
enable_nopath_timeout(m);
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
goto out;
} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
r = queue_if_no_path(m, false, false, __func__);
@@ -2026,7 +2016,6 @@ static int multipath_prepare_ioctl(struc
{
struct multipath *m = ti->private;
struct pgpath *pgpath;
- unsigned long flags;
int r;
pgpath = READ_ONCE(m->current_pgpath);
@@ -2044,10 +2033,10 @@ static int multipath_prepare_ioctl(struc
} else {
/* No path is available */
r = -EIO;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
r = -ENOTCONN;
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
}
if (r == -ENOTCONN) {
@@ -2055,10 +2044,10 @@ static int multipath_prepare_ioctl(struc
/* Path status changed, redo selection */
(void) choose_pgpath(m, 0);
}
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
(void) __pg_init_all_paths(m);
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
dm_table_run_md_queue_async(m->ti->table);
process_queued_io_list(m);
}