Re: [PATCH 5/9] nowait aio: return on congested block device

2017-04-12 Thread Goldwyn Rodrigues


On 04/12/2017 03:36 AM, Christoph Hellwig wrote:
> As mentioned last time around, this should be a REQ_NOWAIT flag so
> that it can be easily passed dow? n to the request layer.
> 
>> +static inline void bio_wouldblock_error(struct bio *bio)
>> +{
>> +bio->bi_error = -EAGAIN;
>> +bio_endio(bio);
>> +}
> 
> Please skip this helper..

Why? It is being called three times?
I am incorporating all the rest of the comments, besides this one. Thanks.

> 
>> +#define QUEUE_FLAG_NOWAIT  28   /* queue supports BIO_NOWAIT */
> 
> Please make the flag name a little more descriptive, this sounds like
> it will never wait.
> 

-- 
Goldwyn
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 5/9] nowait aio: return on congested block device

2017-04-12 Thread Christoph Hellwig
As mentioned last time around, this should be a REQ_NOWAIT flag so
that it can be easily passed down to the request layer.

> +static inline void bio_wouldblock_error(struct bio *bio)
> +{
> + bio->bi_error = -EAGAIN;
> + bio_endio(bio);
> +}

Please skip this helper..

> +#define QUEUE_FLAG_NOWAIT  28/* queue supports BIO_NOWAIT */

Please make the flag name a little more descriptive, this sounds like
it will never wait.
--
To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 5/9] nowait aio: return on congested block device

2017-04-11 Thread Goldwyn Rodrigues
From: Goldwyn Rodrigues 

A new flag BIO_NOWAIT is introduced to identify bio's
orignating from iocb with IOCB_NOWAIT. This flag indicates
to return immediately if a request cannot be made instead
of retrying.

To facilitate this, QUEUE_FLAG_NOWAIT is set to devices
which support this. While currently this is set to
virtio and sd only. Support to more devices will be added soon.

Signed-off-by: Goldwyn Rodrigues 
---
 block/blk-core.c   | 24 ++--
 block/blk-mq-sched.c   |  3 +++
 block/blk-mq.c |  4 
 drivers/block/virtio_blk.c |  3 +++
 drivers/scsi/sd.c  |  3 +++
 fs/direct-io.c | 11 +--
 include/linux/bio.h|  6 ++
 include/linux/blk_types.h  |  1 +
 include/linux/blkdev.h |  2 ++
 9 files changed, 53 insertions(+), 4 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index d772c221cc17..95a9b18f38a3 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1232,6 +1232,11 @@ static struct request *get_request(struct request_queue 
*q, unsigned int op,
if (!IS_ERR(rq))
return rq;
 
+   if (bio && bio_flagged(bio, BIO_NOWAIT)) {
+   blk_put_rl(rl);
+   return ERR_PTR(-EAGAIN);
+   }
+
if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) 
{
blk_put_rl(rl);
return rq;
@@ -1870,6 +1875,18 @@ generic_make_request_checks(struct bio *bio)
goto end_io;
}
 
+   if (bio_flagged(bio, BIO_NOWAIT)) {
+   if (!blk_queue_nowait(q)) {
+   err = -EOPNOTSUPP;
+   goto end_io;
+   }
+   if (!(bio->bi_opf & (REQ_SYNC | REQ_IDLE))) {
+   err = -EINVAL;
+   goto end_io;
+   }
+   }
+
+
part = bio->bi_bdev->bd_part;
if (should_fail_request(part, bio->bi_iter.bi_size) ||
should_fail_request(_to_disk(part)->part0,
@@ -2021,7 +2038,7 @@ blk_qc_t generic_make_request(struct bio *bio)
do {
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 
-   if (likely(blk_queue_enter(q, false) == 0)) {
+   if (likely(blk_queue_enter(q, bio_flagged(bio, BIO_NOWAIT)) == 
0)) {
struct bio_list lower, same;
 
/* Create a fresh bio_list for all subordinate requests 
*/
@@ -2046,7 +2063,10 @@ blk_qc_t generic_make_request(struct bio *bio)
bio_list_merge(_list_on_stack[0], );
bio_list_merge(_list_on_stack[0], 
_list_on_stack[1]);
} else {
-   bio_io_error(bio);
+   if (unlikely(!blk_queue_dying(q) && bio_flagged(bio, 
BIO_NOWAIT)))
+   bio_wouldblock_error(bio);
+   else
+   bio_io_error(bio);
}
bio = bio_list_pop(_list_on_stack[0]);
} while (bio);
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index c974a1bbf4cb..c0d3bbf293ec 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -119,6 +119,9 @@ struct request *blk_mq_sched_get_request(struct 
request_queue *q,
if (likely(!data->hctx))
data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
 
+   if (likely(bio) && bio_flagged(bio, BIO_NOWAIT))
+   data->flags |= BLK_MQ_REQ_NOWAIT;
+
if (e) {
data->flags |= BLK_MQ_REQ_INTERNAL;
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 572966f49596..f20e802b0e15 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1538,6 +1538,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue 
*q, struct bio *bio)
rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, );
if (unlikely(!rq)) {
__wbt_done(q->rq_wb, wb_acct);
+   if (bio && bio_flagged(bio, BIO_NOWAIT))
+   bio_wouldblock_error(bio);
return BLK_QC_T_NONE;
}
 
@@ -1662,6 +1664,8 @@ static blk_qc_t blk_sq_make_request(struct request_queue 
*q, struct bio *bio)
rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, );
if (unlikely(!rq)) {
__wbt_done(q->rq_wb, wb_acct);
+   if (bio && bio_flagged(bio, BIO_NOWAIT))
+   bio_wouldblock_error(bio);
return BLK_QC_T_NONE;
}
 
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 1d4c9f8bc1e1..7481124c5025 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -731,6 +731,9 @@ static int virtblk_probe(struct virtio_device *vdev)
/* No real sector limit. */
blk_queue_max_hw_sectors(q, -1U);
 
+   /* Request queue supports BIO_NOWAIT */
+   queue_flag_set_unlocked(QUEUE_FLAG_NOWAIT, q);
+
/*