loop_clr_fd() can be run piggyback with lo_release(), and
under this situation, reread partition may always fail because
bd_mutex has been held already.

This patch detects the situation by the reference count, and
call __blkdev_reread_part() to avoid acquiring the lock again.

In the meantime, this patch switches to new kernel APIs
of blkdev_reread_part() and __blkdev_reread_part().

Reviewed-by: Christoph Hellwig <[email protected]>
Tested-by: Jarod Wilson <[email protected]>
Acked-by: Jarod Wilson <[email protected]>
Signed-off-by: Jarod Wilson <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
---
 drivers/block/loop.c | 30 ++++++++++++++++++++++++++----
 1 file changed, 26 insertions(+), 4 deletions(-)

diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index b3e294e..2b99e34 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -474,6 +474,28 @@ static int loop_flush(struct loop_device *lo)
        return loop_switch(lo, NULL);
 }
 
+static void loop_reread_partitions(struct loop_device *lo,
+                                  struct block_device *bdev)
+{
+       int rc;
+
+       /*
+        * bd_mutex has been held already in release path, so don't
+        * acquire it if this function is called in such case.
+        *
+        * If the reread partition isn't from release path, lo_refcnt
+        * must be at least one and it can only become zero when the
+        * current holder is released.
+        */
+       if (!atomic_read(&lo->lo_refcnt))
+               rc = __blkdev_reread_part(bdev);
+       else
+               rc = blkdev_reread_part(bdev);
+       if (rc)
+               pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
+                       __func__, lo->lo_number, lo->lo_file_name, rc);
+}
+
 /*
  * loop_change_fd switched the backing store of a loopback device to
  * a new file. This is useful for operating system installers to free up
@@ -522,7 +544,7 @@ static int loop_change_fd(struct loop_device *lo, struct 
block_device *bdev,
 
        fput(old_file);
        if (lo->lo_flags & LO_FLAGS_PARTSCAN)
-               ioctl_by_bdev(bdev, BLKRRPART, 0);
+               loop_reread_partitions(lo, bdev);
        return 0;
 
  out_putf:
@@ -759,7 +781,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
        if (part_shift)
                lo->lo_flags |= LO_FLAGS_PARTSCAN;
        if (lo->lo_flags & LO_FLAGS_PARTSCAN)
-               ioctl_by_bdev(bdev, BLKRRPART, 0);
+               loop_reread_partitions(lo, bdev);
 
        /* Grab the block_device to prevent its destruction after we
         * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
@@ -877,7 +899,7 @@ static int loop_clr_fd(struct loop_device *lo)
        blk_mq_unfreeze_queue(lo->lo_queue);
 
        if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
-               ioctl_by_bdev(bdev, BLKRRPART, 0);
+               loop_reread_partitions(lo, bdev);
        lo->lo_flags = 0;
        if (!part_shift)
                lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
@@ -954,7 +976,7 @@ loop_set_status(struct loop_device *lo, const struct 
loop_info64 *info)
             !(lo->lo_flags & LO_FLAGS_PARTSCAN)) {
                lo->lo_flags |= LO_FLAGS_PARTSCAN;
                lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
-               ioctl_by_bdev(lo->lo_device, BLKRRPART, 0);
+               loop_reread_partitions(lo, lo->lo_device);
        }
 
        lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
-- 
1.9.1


------------------------------------------------------------------------------
One dashboard for servers and applications across Physical-Virtual-Cloud 
Widest out-of-the-box monitoring support with 50+ applications
Performance metrics, stats and reports that give you Actionable Insights
Deep dive visibility with transaction tracing using APM Insight.
http://ad.doubleclick.net/ddm/clk/290420510;117567292;y
_______________________________________________
Nbd-general mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/nbd-general

Reply via email to