If the backing device for a loop device is a block device,
then mirror the discard properties of the underlying block
device into the loop device. While in there, differentiate
between REQ_OP_DISCARD and REQ_OP_WRITE_ZEROES, which are
different for block devices, but which the loop device had
just been lumping together.

This change fixes blktest block/003, and removes an extraneous
error print in block/013 when testing on a loop device backed
by a block device that does not support discard.

Signed-off-by: Evan Green <evgr...@chromium.org>
---

 drivers/block/loop.c | 61 +++++++++++++++++++++++++++++---------------
 1 file changed, 41 insertions(+), 20 deletions(-)

diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index a1ba555e3b92..8c2b6885db56 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -417,19 +417,14 @@ static int lo_read_transfer(struct loop_device *lo, 
struct request *rq,
        return ret;
 }
 
-static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos)
+static int lo_discard(struct loop_device *lo, struct request *rq,
+               int mode, loff_t pos)
 {
-       /*
-        * We use punch hole to reclaim the free space used by the
-        * image a.k.a. discard. However we do not support discard if
-        * encryption is enabled, because it may give an attacker
-        * useful information.
-        */
        struct file *file = lo->lo_backing_file;
-       int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
+       struct request_queue *q = lo->lo_queue;
        int ret;
 
-       if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) {
+       if (!blk_queue_discard(q)) {
                ret = -EOPNOTSUPP;
                goto out;
        }
@@ -602,8 +597,13 @@ static int do_req_filebacked(struct loop_device *lo, 
struct request *rq)
        case REQ_OP_FLUSH:
                return lo_req_flush(lo, rq);
        case REQ_OP_DISCARD:
+               return lo_discard(lo, rq,
+                       FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, pos);
+
        case REQ_OP_WRITE_ZEROES:
-               return lo_discard(lo, rq, pos);
+               return lo_discard(lo, rq,
+                       FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE, pos);
+
        case REQ_OP_WRITE:
                if (lo->transfer)
                        return lo_write_transfer(lo, rq, pos);
@@ -857,6 +857,25 @@ static void loop_config_discard(struct loop_device *lo)
        struct file *file = lo->lo_backing_file;
        struct inode *inode = file->f_mapping->host;
        struct request_queue *q = lo->lo_queue;
+       struct request_queue *backingq;
+
+       /*
+        * If the backing device is a block device, mirror its discard
+        * capabilities.
+        */
+       if (S_ISBLK(inode->i_mode)) {
+               backingq = bdev_get_queue(inode->i_bdev);
+               blk_queue_max_discard_sectors(q,
+                       backingq->limits.max_discard_sectors);
+
+               blk_queue_max_write_zeroes_sectors(q,
+                       backingq->limits.max_write_zeroes_sectors);
+
+               q->limits.discard_granularity =
+                       backingq->limits.discard_granularity;
+
+               q->limits.discard_alignment =
+                       backingq->limits.discard_alignment;
 
        /*
         * We use punch hole to reclaim the free space used by the
@@ -864,22 +883,24 @@ static void loop_config_discard(struct loop_device *lo)
         * encryption is enabled, because it may give an attacker
         * useful information.
         */
-       if ((!file->f_op->fallocate) ||
-           lo->lo_encrypt_key_size) {
+       } else if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) {
                q->limits.discard_granularity = 0;
                q->limits.discard_alignment = 0;
                blk_queue_max_discard_sectors(q, 0);
                blk_queue_max_write_zeroes_sectors(q, 0);
-               blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
-               return;
-       }
 
-       q->limits.discard_granularity = inode->i_sb->s_blocksize;
-       q->limits.discard_alignment = 0;
+       } else {
+               q->limits.discard_granularity = inode->i_sb->s_blocksize;
+               q->limits.discard_alignment = 0;
+
+               blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
+               blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
+       }
 
-       blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
-       blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
-       blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+       if (q->limits.max_discard_sectors || q->limits.max_write_zeroes_sectors)
+               blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+       else
+               blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
 }
 
 static void loop_unprepare_queue(struct loop_device *lo)
-- 
2.20.1

Reply via email to