[RFC 1/3] block, mm: add support for boosting urgent asynchronous writeback io

2016-08-16 Thread Daeho Jeong
We define an async I/O as urgent async I/O when a process starts to
wait for its writeback completion and can easily detect the moment in
wait_on_page_writeback().

To convert urgent async I/O to sync I/O, we need to check whether the
page is under async I/O writeback in wait_on_page_writeback(), first.
If it is, we have to request for I/O scheduler to find a request
relating to the page, but we need to more wait in case of that the I/O
of the page still stays in the plug list. After found the async I/O
request, we allocate a new sync I/O request copying the properties of
the async I/O request, if possible. Otherwise, just re-insert the async
I/O request setting with REQ_PRIO.

Added two page flags as follows:
PG_asyncwb: represents the page is under async I/O writeback
PG_plugged: represents the I/O related to this page stays in the
plug list

Signed-off-by: Daeho Jeong 
---
 block/Kconfig.iosched  |9 
 block/blk-core.c   |   28 ++
 block/elevator.c   |  117 
 include/linux/blk_types.h  |3 ++
 include/linux/elevator.h   |   10 
 include/linux/page-flags.h |   12 +
 include/linux/pagemap.h|   12 +
 include/trace/events/mmflags.h |   10 +++-
 mm/filemap.c   |   39 ++
 9 files changed, 239 insertions(+), 1 deletion(-)

diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 421bef9..c21ae30 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -39,6 +39,15 @@ config CFQ_GROUP_IOSCHED
---help---
  Enable group IO scheduling in CFQ.
 
+config BOOST_URGENT_ASYNC_WB
+   bool "Enable boosting urgent asynchronous writeback (EXPERIMENTAL)"
+   default n
+   ---help---
+ Enabling this option allows I/O scheduler convert the urgent
+ asynchronous I/Os, which are flushed by kworker but its completion
+ is still being waited by another process, into synchronous I/Os for
+ better responsiveness.
+
 choice
prompt "Default I/O scheduler"
default DEFAULT_CFQ
diff --git a/block/blk-core.c b/block/blk-core.c
index 2475b1c7..f8ce24a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1694,6 +1694,23 @@ out:
return ret;
 }
 
+#ifdef CONFIG_BOOST_URGENT_ASYNC_WB
+void clear_plugged_flag_in_bio(struct bio *bio)
+{
+   if (bio_flagged(bio, BIO_ASYNC_WB)) {
+   struct bio_vec bv;
+   struct bvec_iter iter;
+
+   bio_for_each_segment(bv, bio, iter) {
+   if (TestClearPagePlugged(bv.bv_page)) {
+   smp_mb__after_atomic();
+   wake_up_page(bv.bv_page, PG_plugged);
+   }
+   }
+   }
+}
+#endif
+
 void init_request_from_bio(struct request *req, struct bio *bio)
 {
req->cmd_type = REQ_TYPE_FS;
@@ -1702,6 +1719,11 @@ void init_request_from_bio(struct request *req, struct 
bio *bio)
if (bio->bi_rw & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;
 
+#ifdef CONFIG_BOOST_URGENT_ASYNC_WB
+   if (bio_flagged(bio, BIO_ASYNC_WB))
+   req->cmd_flags |= REQ_ASYNC_WB;
+#endif
+
req->errors = 0;
req->__sector = bio->bi_iter.bi_sector;
req->ioprio = bio_prio(bio);
@@ -1752,6 +1774,9 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, 
struct bio *bio)
el_ret = elv_merge(q, &req, bio);
if (el_ret == ELEVATOR_BACK_MERGE) {
if (bio_attempt_back_merge(q, req, bio)) {
+#ifdef CONFIG_BOOST_URGENT_ASYNC_WB
+   clear_plugged_flag_in_bio(bio);
+#endif
elv_bio_merged(q, req, bio);
if (!attempt_back_merge(q, req))
elv_merged_request(q, req, el_ret);
@@ -1759,6 +1784,9 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, 
struct bio *bio)
}
} else if (el_ret == ELEVATOR_FRONT_MERGE) {
if (bio_attempt_front_merge(q, req, bio)) {
+#ifdef CONFIG_BOOST_URGENT_ASYNC_WB
+   clear_plugged_flag_in_bio(bio);
+#endif
elv_bio_merged(q, req, bio);
if (!attempt_front_merge(q, req))
elv_merged_request(q, req, el_ret);
diff --git a/block/elevator.c b/block/elevator.c
index c3555c9..e4081ce 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -598,6 +598,20 @@ void __elv_add_request(struct request_queue *q, struct 
request *rq, int where)
 
rq->q = q;
 
+#ifdef CONFIG_BOOST_URGENT_ASYNC_WB
+   if (rq->cmd_flags & REQ_ASYNC_WB) {
+   struct req_iterator iter;
+   struct bio_vec bvec;
+
+   rq_for_each_segment(bvec, rq, iter) {
+   if (TestClearPagePlugged(bvec.bv_page)) {
+   smp_mb__after_atomic();
+ 

[RFC 2/3] cfq: add cfq_find_async_wb_req

2016-08-16 Thread Daeho Jeong
Implemented a function to find asynchronous writeback I/O with a
specified sector number and remove the found I/O from the queue
and return that to the caller.

Signed-off-by: Daeho Jeong 
---
 block/cfq-iosched.c  |   29 +
 block/elevator.c |   24 
 include/linux/elevator.h |3 +++
 3 files changed, 56 insertions(+)

diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 4a34978..69355e2 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2524,6 +2524,32 @@ static void cfq_remove_request(struct request *rq)
}
 }
 
+#ifdef CONFIG_BOOST_URGENT_ASYNC_WB
+static struct request *
+cfq_find_async_wb_req(struct request_queue *q, sector_t sector)
+{
+   struct cfq_data *cfqd = q->elevator->elevator_data;
+   struct cfq_queue *cfqq;
+   struct request *found_req = NULL;
+   int i;
+
+   for (i = 0; i < IOPRIO_BE_NR; i++) {
+   cfqq = cfqd->root_group->async_cfqq[1][i];
+   if (cfqq) {
+   if (cfqq->queued[0])
+   found_req = elv_rb_find_incl(&cfqq->sort_list,
+ sector);
+   if (found_req) {
+   cfq_remove_request(found_req);
+   return found_req;
+   }
+   }
+   }
+
+   return NULL;
+}
+#endif
+
 static int cfq_merge(struct request_queue *q, struct request **req,
 struct bio *bio)
 {
@@ -4735,6 +4761,9 @@ static struct elevator_type iosched_cfq = {
.elevator_add_req_fn =  cfq_insert_request,
.elevator_activate_req_fn = cfq_activate_request,
.elevator_deactivate_req_fn =   cfq_deactivate_request,
+#ifdef CONFIG_BOOST_URGENT_ASYNC_WB
+   .elevator_find_async_wb_req_fn = cfq_find_async_wb_req,
+#endif
.elevator_completed_req_fn =cfq_completed_request,
.elevator_former_req_fn =   elv_rb_former_request,
.elevator_latter_req_fn =   elv_rb_latter_request,
diff --git a/block/elevator.c b/block/elevator.c
index e4081ce..d34267a 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -343,6 +343,30 @@ struct request *elv_rb_find(struct rb_root *root, sector_t 
sector)
 }
 EXPORT_SYMBOL(elv_rb_find);
 
+#ifdef CONFIG_BOOST_URGENT_ASYNC_WB
+struct request *elv_rb_find_incl(struct rb_root *root, sector_t sector)
+{
+   struct rb_node *n = root->rb_node;
+   struct request *rq;
+
+   while (n) {
+   rq = rb_entry(n, struct request, rb_node);
+
+   if (sector < blk_rq_pos(rq))
+   n = n->rb_left;
+   else if (sector > blk_rq_pos(rq)) {
+   if (sector < blk_rq_pos(rq) + blk_rq_sectors(rq))
+   return rq;
+   n = n->rb_right;
+   } else
+   return rq;
+   }
+
+   return NULL;
+}
+EXPORT_SYMBOL(elv_rb_find_incl);
+#endif
+
 /*
  * Insert rq into dispatch queue of q.  Queue lock must be held on
  * entry.  rq is sort instead into the dispatch queue. To be used by
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 08ce155..efc202a 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -183,6 +183,9 @@ extern struct request *elv_rb_latter_request(struct 
request_queue *, struct requ
 extern void elv_rb_add(struct rb_root *, struct request *);
 extern void elv_rb_del(struct rb_root *, struct request *);
 extern struct request *elv_rb_find(struct rb_root *, sector_t);
+#ifdef CONFIG_BOOST_URGENT_ASYNC_WB
+extern struct request *elv_rb_find_incl(struct rb_root *, sector_t);
+#endif
 
 /*
  * Return values from elevator merger
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC 3/3] ext4: tag asynchronous writeback io

2016-08-16 Thread Daeho Jeong
Set the page with PG_asyncwb and PG_plugged, and set the bio with
BIO_ASYNC_WB when submitting asynchronous writeback I/O in order to
mark which pages are flushed as asynchronous writeback I/O and which
one stays in the plug list.

Signed-off-by: Daeho Jeong 
---
 fs/ext4/page-io.c |   11 +++
 1 file changed, 11 insertions(+)

diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 2a01df9..5912e59 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -370,6 +370,10 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
bio->bi_private = ext4_get_io_end(io->io_end);
io->io_bio = bio;
io->io_next_block = bh->b_blocknr;
+#ifdef CONFIG_BOOST_URGENT_ASYNC_WB
+   if (io->io_wbc->sync_mode == WB_SYNC_NONE)
+   bio->bi_flags |= (1 << BIO_ASYNC_WB);
+#endif
return 0;
 }
 
@@ -416,6 +420,13 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
 
+#ifdef CONFIG_BOOST_URGENT_ASYNC_WB
+   if (wbc->sync_mode == WB_SYNC_NONE) {
+   SetPagePlugged(page);
+   SetPageAsyncWB(page);
+   }
+#endif
+
if (keep_towrite)
set_page_writeback_keepwrite(page);
else
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC 0/3] Add the feature of boosting urgent asynchronous writeback I/O

2016-08-16 Thread Daeho Jeong
This is the draft version of the feature to boost urgent async
writeback I/O and this is developed based on kernel 4.7.

We can experience an unexpected dalay when we execute fsync() in the
situation of that tons of async I/O are being flushed out in the system
and, by this kind of fsync() delay, mobile users can see the
application's hiccups frequently.

To finish the fsync() operation, fsync() normally flushes out the
previous buffered data of the file as sync I/O, however, if there are
too many dirty pages in the page cache, the buffered data can be
flushed out as async I/O with other dirty pages by kworker before
fsync() directly flushes out that, and fysnc() might wait until all the
asynchronously issued I/Os are done.

To minimize this kind of delay, we convert async I/Os whose completion
are waited by other processes into sync I/Os for better responsiveness.

We made two micro benchmarks using fsync() and evaluated the effect of
this feature on the mobile device having four 2.3GHz Exynos M1 ARM
cores and four 1.6GHz Cortex-A53 ARM cores, 4GB RAM and 32GB UFS
storage.

The first benchmark is iterating that 4KB data write() and holding on
for 100ms for giving more chances for kworker to flush the buffered
data and executing fsync(), 100 times with the intensive background I/O.
(Its total execution time is 1.06s without the background I/O.)

   =>  
fsync exec. time(sec.)  0.2894890.031048
0.2826810.031255
0.2903740.034004
0.2353800.026512
(...)   (...)
0.2304880.044029
0.3370350.054402
0.3775750.025746
Total exec. time(sec.)  21.78   3.24 (85.1% decreased)

The second one is iterating that 8MB data write() and fsync(), 50 times
with the intensive background I/O.
(Its total execution time is 5.23s without the background I/O.)

   =>  
fsync exec. time(sec.)  0.2583740.125503
0.3112170.127392
0.2555430.117327
0.2378110.154037
(...)   (...)
0.2050520.131991
0.2064690.107791
0.2636190.155979
Total exec. time(sec.)  14.61   11.28 (22.8% decreased)

Daeho Jeong (3):
  block, mm: add support for boosting urgent asynchronous writeback io
  cfq: add cfq_find_async_wb_req
  ext4: tag asynchronous writeback io

 block/Kconfig.iosched  |9 +++
 block/blk-core.c   |   28 
 block/cfq-iosched.c|   29 +
 block/elevator.c   |  141 
 fs/ext4/page-io.c  |   11 
 include/linux/blk_types.h  |3 +
 include/linux/elevator.h   |   13 
 include/linux/page-flags.h |   12 
 include/linux/pagemap.h|   12 
 include/trace/events/mmflags.h |   10 ++-
 mm/filemap.c   |   39 +++
 11 files changed, 306 insertions(+), 1 deletion(-)

-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] block: Fix race triggered by blk_set_queue_dying()

2016-08-16 Thread Jens Axboe

On 08/16/2016 05:48 PM, Bart Van Assche wrote:

blk_set_queue_dying() can be called while another thread is
submitting I/O or changing queue flags, e.g. through dm_stop_queue().
Hence protect the QUEUE_FLAG_DYING flag change with locking.


Added, thanks.

--
Jens Axboe
--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] block: Fix secure erase

2016-08-16 Thread Christoph Hellwig
On Tue, Aug 16, 2016 at 10:20:25AM +0300, Adrian Hunter wrote:
> On 15/08/16 21:14, Christoph Hellwig wrote:
> > On Mon, Aug 15, 2016 at 11:43:12AM -0500, Shaun Tancheff wrote:
> >> Hmm ... Since REQ_SECURE implied REQ_DISCARD doesn't this
> >> mean that we should include REQ_OP_SECURE_ERASE checking
> >> wherever REQ_OP_DISCARD is being checked now in drivers/scsi/sd.c ?
> >>
> >> (It's only in 3 spots so it's a quickie patch)
> > 
> > SCSI doesn't support secure erase operations.  Only MMC really
> > supports it, plus the usual cargo culting in Xen blkfront that's
> > probably never been tested..
> > 
> 
> I left SCSI out because support does not exist at the moment.
> However there is UFS which is seen as the replacement for eMMC.
> And there is a patch to add support for BLKSECDISCARD:
> 
>   http://marc.info/?l=linux-scsi&m=146953519016056
> 
> So SCSI will need updating if that is to go in.

That patch is complete crap and if anyone thinks they'd get shit like
that in they are on the same crack that apparently the authors of the
UFS spec are on.

If you want secure discard supported in UFS get a command for into
SBC instead of bypassing the command set.
--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] block: Fix race triggered by blk_set_queue_dying()

2016-08-16 Thread Bart Van Assche
blk_set_queue_dying() can be called while another thread is
submitting I/O or changing queue flags, e.g. through dm_stop_queue().
Hence protect the QUEUE_FLAG_DYING flag change with locking.

Signed-off-by: Bart Van Assche 
Cc: Christoph Hellwig 
Cc: Mike Snitzer 
Cc: stable 
---
 block/blk-core.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index e0bc563..96d5835 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -515,7 +515,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
 
 void blk_set_queue_dying(struct request_queue *q)
 {
-   queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+   spin_lock_irq(q->queue_lock);
+   queue_flag_set(QUEUE_FLAG_DYING, q);
+   spin_unlock_irq(q->queue_lock);
 
if (q->mq_ops)
blk_mq_wake_waiters(q);
-- 
2.9.2

--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH V2] block: Fix secure erase

2016-08-16 Thread Christoph Hellwig
Looks fine,

Reviewed-by: Christoph Hellwig 
--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [BUG] Deadlock in blk_mq_register_disk error path

2016-08-16 Thread Jinpu Wang
On Mon, Aug 15, 2016 at 6:22 PM, Bart Van Assche
 wrote:
> On 08/15/2016 09:01 AM, Jinpu Wang wrote:
>>
>> It's more likely you hit another bug, my colleague Roman fix that:
>>
>> http://www.spinics.net/lists/linux-block/msg04552.html
>
>
> Hello Jinpu,
>
> Interesting. However, I see that wrote the following: "Firstly this wrong
> sequence raises two kernel warnings: 1st. WARNING at
> lib/percpu-recount.c:309 percpu_ref_kill_and_confirm called more than once
> 2nd. WARNING at lib/percpu-refcount.c:331". I haven't seen any of these
> kernel warnings ...
>
> Thanks,
>
> Bart.
>

The warning happened from time to time, but your hung tasks are
similar with ours.
We injected some delay in order to reproduce easily.


-- 
Mit freundlichen Grüßen,
Best Regards,

Jack Wang

Linux Kernel Developer Storage
ProfitBricks GmbH  The IaaS-Company.

ProfitBricks GmbH
Greifswalder Str. 207
D - 10405 Berlin
Tel: +49 30 5770083-42
Fax: +49 30 5770085-98
Email: jinpu.w...@profitbricks.com
URL: http://www.profitbricks.de

Sitz der Gesellschaft: Berlin.
Registergericht: Amtsgericht Charlottenburg, HRB 125506 B.
Geschäftsführer: Andreas Gauger, Achim Weiss.
--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH V2] block: Fix secure erase

2016-08-16 Thread Adrian Hunter
Commit 288dab8a35a0 ("block: add a separate operation type for secure
erase") split REQ_OP_SECURE_ERASE from REQ_OP_DISCARD without considering
all the places REQ_OP_DISCARD was being used to mean either. Fix those.

Signed-off-by: Adrian Hunter 
Fixes: 288dab8a35a0 ("block: add a separate operation type for secure erase")
---


Changes in V2:
In elv_dispatch_sort() don't allow requests with different ops to pass
one another.


 block/bio.c  | 21 +++--
 block/blk-merge.c| 33 +++--
 block/elevator.c |  2 +-
 drivers/mmc/card/block.c |  1 +
 drivers/mmc/card/queue.c |  3 ++-
 drivers/mmc/card/queue.h |  4 +++-
 include/linux/bio.h  | 10 --
 include/linux/blkdev.h   |  6 --
 kernel/trace/blktrace.c  |  2 +-
 9 files changed, 50 insertions(+), 32 deletions(-)

diff --git a/block/bio.c b/block/bio.c
index f39477538fef..aa7354088008 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -667,18 +667,19 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t 
gfp_mask,
bio->bi_iter.bi_sector  = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size= bio_src->bi_iter.bi_size;
 
-   if (bio_op(bio) == REQ_OP_DISCARD)
-   goto integrity_clone;
-
-   if (bio_op(bio) == REQ_OP_WRITE_SAME) {
+   switch (bio_op(bio)) {
+   case REQ_OP_DISCARD:
+   case REQ_OP_SECURE_ERASE:
+   break;
+   case REQ_OP_WRITE_SAME:
bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
-   goto integrity_clone;
+   break;
+   default:
+   bio_for_each_segment(bv, bio_src, iter)
+   bio->bi_io_vec[bio->bi_vcnt++] = bv;
+   break;
}
 
-   bio_for_each_segment(bv, bio_src, iter)
-   bio->bi_io_vec[bio->bi_vcnt++] = bv;
-
-integrity_clone:
if (bio_integrity(bio_src)) {
int ret;
 
@@ -1788,7 +1789,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
 * Discards need a mutable bio_vec to accommodate the payload
 * required by the DSM TRIM and UNMAP commands.
 */
-   if (bio_op(bio) == REQ_OP_DISCARD)
+   if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
split = bio_clone_bioset(bio, gfp, bs);
else
split = bio_clone_fast(bio, gfp, bs);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 3eec75a9e91d..72627e3cf91e 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -172,12 +172,18 @@ void blk_queue_split(struct request_queue *q, struct bio 
**bio,
struct bio *split, *res;
unsigned nsegs;
 
-   if (bio_op(*bio) == REQ_OP_DISCARD)
+   switch (bio_op(*bio)) {
+   case REQ_OP_DISCARD:
+   case REQ_OP_SECURE_ERASE:
split = blk_bio_discard_split(q, *bio, bs, &nsegs);
-   else if (bio_op(*bio) == REQ_OP_WRITE_SAME)
+   break;
+   case REQ_OP_WRITE_SAME:
split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
-   else
+   break;
+   default:
split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
+   break;
+   }
 
/* physical segments can be figured out during splitting */
res = split ? split : *bio;
@@ -213,7 +219,7 @@ static unsigned int __blk_recalc_rq_segments(struct 
request_queue *q,
 * This should probably be returning 0, but blk_add_request_payload()
 * (Christoph)
 */
-   if (bio_op(bio) == REQ_OP_DISCARD)
+   if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
return 1;
 
if (bio_op(bio) == REQ_OP_WRITE_SAME)
@@ -385,7 +391,9 @@ static int __blk_bios_map_sg(struct request_queue *q, 
struct bio *bio,
nsegs = 0;
cluster = blk_queue_cluster(q);
 
-   if (bio_op(bio) == REQ_OP_DISCARD) {
+   switch (bio_op(bio)) {
+   case REQ_OP_DISCARD:
+   case REQ_OP_SECURE_ERASE:
/*
 * This is a hack - drivers should be neither modifying the
 * biovec, nor relying on bi_vcnt - but because of
@@ -393,19 +401,16 @@ static int __blk_bios_map_sg(struct request_queue *q, 
struct bio *bio,
 * a payload we need to set up here (thank you Christoph) and
 * bi_vcnt is really the only way of telling if we need to.
 */
-
-   if (bio->bi_vcnt)
-   goto single_segment;
-
-   return 0;
-   }
-
-   if (bio_op(bio) == REQ_OP_WRITE_SAME) {
-single_segment:
+   if (!bio->bi_vcnt)
+   return 0;
+   /* Fall through */
+   case REQ_OP_WRITE_SAME:
*sg = sglist;
bvec = bio_iovec(bio);
sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
return 1;
+   def

Re: [PATCH] block: Fix secure erase

2016-08-16 Thread Adrian Hunter
On 15/08/16 21:14, Christoph Hellwig wrote:
> On Mon, Aug 15, 2016 at 11:43:12AM -0500, Shaun Tancheff wrote:
>> Hmm ... Since REQ_SECURE implied REQ_DISCARD doesn't this
>> mean that we should include REQ_OP_SECURE_ERASE checking
>> wherever REQ_OP_DISCARD is being checked now in drivers/scsi/sd.c ?
>>
>> (It's only in 3 spots so it's a quickie patch)
> 
> SCSI doesn't support secure erase operations.  Only MMC really
> supports it, plus the usual cargo culting in Xen blkfront that's
> probably never been tested..
> 

I left SCSI out because support does not exist at the moment.
However there is UFS which is seen as the replacement for eMMC.
And there is a patch to add support for BLKSECDISCARD:

http://marc.info/?l=linux-scsi&m=146953519016056

So SCSI will need updating if that is to go in.

--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html