On 10/20/19 6:23 PM, Jackie Liu wrote:
> 
> 
>> 2019年10月17日 23:24,Jens Axboe <[email protected]> 写道:
>>
>> We've got two issues with the non-regular file handling for non-blocking
>> IO:
>>
>> 1) We don't want to re-do a short read in full for a non-regular file,
>>    as we can't just read the data again.
>> 2) For non-regular files that don't support non-blocking IO attempts,
>>    we need to punt to async context even if the file is opened as
>>    non-blocking. Otherwise the caller always gets -EAGAIN.
>>
>> Add two new request flags to handle these cases. One is just a cache
>> of the inode S_ISREG() status, the other tells io_uring that we always
>> need to punt this request to async context, even if REQ_F_NOWAIT is set.
>>
>> Cc: [email protected]
>> Reported-by: Hrvoje Zeba <[email protected]>
>> Signed-off-by: Jens Axboe <[email protected]>
>> ---
>> fs/io_uring.c | 55 +++++++++++++++++++++++++++++++++++----------------
>> 1 file changed, 38 insertions(+), 17 deletions(-)
>>
>> diff --git a/fs/io_uring.c b/fs/io_uring.c
>> index d2cb277da2f4..a4ee5436cb61 100644
>> --- a/fs/io_uring.c
>> +++ b/fs/io_uring.c
>> @@ -322,6 +322,8 @@ struct io_kiocb {
>> #define REQ_F_FAIL_LINK              256     /* fail rest of links */
>> #define REQ_F_SHADOW_DRAIN   512     /* link-drain shadow req */
>> #define REQ_F_TIMEOUT                1024    /* timeout request */
>> +#define REQ_F_ISREG         2048    /* regular file */
>> +#define REQ_F_MUST_PUNT             4096    /* must be punted even for 
>> NONBLOCK */
>>      u64                     user_data;
>>      u32                     result;
>>      u32                     sequence;
>> @@ -914,26 +916,26 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, 
>> unsigned *nr_events,
>>      return ret;
>> }
>>
>> -static void kiocb_end_write(struct kiocb *kiocb)
>> +static void kiocb_end_write(struct io_kiocb *req)
>> {
>> -    if (kiocb->ki_flags & IOCB_WRITE) {
>> -            struct inode *inode = file_inode(kiocb->ki_filp);
>> +    /*
>> +     * Tell lockdep we inherited freeze protection from submission
>> +     * thread.
>> +     */
>> +    if (req->flags & REQ_F_ISREG) {
>> +            struct inode *inode = file_inode(req->file);
>>
>> -            /*
>> -             * Tell lockdep we inherited freeze protection from submission
>> -             * thread.
>> -             */
>> -            if (S_ISREG(inode->i_mode))
>> -                    __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
>> -            file_end_write(kiocb->ki_filp);
>> +            __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
>>      }
>> +    file_end_write(req->file);
>> }
>>
>> static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
>> {
>>      struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
>>
>> -    kiocb_end_write(kiocb);
>> +    if (kiocb->ki_flags & IOCB_WRITE)
>> +            kiocb_end_write(req);
>>
>>      if ((req->flags & REQ_F_LINK) && res != req->result)
>>              req->flags |= REQ_F_FAIL_LINK;
>> @@ -945,7 +947,8 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, 
>> long res, long res2)
>> {
>>      struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
>>
>> -    kiocb_end_write(kiocb);
>> +    if (kiocb->ki_flags & IOCB_WRITE)
>> +            kiocb_end_write(req);
>>
>>      if ((req->flags & REQ_F_LINK) && res != req->result)
>>              req->flags |= REQ_F_FAIL_LINK;
>> @@ -1059,8 +1062,17 @@ static int io_prep_rw(struct io_kiocb *req, const 
>> struct sqe_submit *s,
>>      if (!req->file)
>>              return -EBADF;
>>
>> -    if (force_nonblock && !io_file_supports_async(req->file))
>> +    if (S_ISREG(file_inode(req->file)->i_mode))
>> +            req->flags |= REQ_F_ISREG;
>> +
>> +    /*
>> +     * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
>> +     * we know to async punt it even if it was opened O_NONBLOCK
>> +     */
>> +    if (force_nonblock && !io_file_supports_async(req->file)) {
>>              force_nonblock = false;
>> +            req->flags |= REQ_F_MUST_PUNT;
>> +    }
> 
> Hello Jens. that is your new version.
> 
> +     /*
> +      * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
> +      * we know to async punt it even if it was opened O_NONBLOCK
> +      */
> +     if (force_nonblock && !io_file_supports_async(req->file)) {
> +             req->flags |= REQ_F_MUST_PUNT;
> +             return -EAGAIN;
> +     }
> 
> So, if req->file don't support async, we always return EAGAIN immediately.

Right

>>      kiocb->ki_pos = READ_ONCE(sqe->off);
>>      kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
>> @@ -1081,7 +1093,8 @@ static int io_prep_rw(struct io_kiocb *req, const 
>> struct sqe_submit *s,
>>              return ret;
>>
>>      /* don't allow async punt if RWF_NOWAIT was requested */
>> -    if (kiocb->ki_flags & IOCB_NOWAIT)
>> +    if ((kiocb->ki_flags & IOCB_NOWAIT) ||
>> +        (req->file->f_flags & O_NONBLOCK))
> 
> I think if we return -EAGAIN immediately, and using the work queue to execute 
> this context,
> this is unnecessary.
> 
>>              req->flags |= REQ_F_NOWAIT;
>>
>>      if (force_nonblock)
>> @@ -1382,7 +1395,9 @@ static int io_read(struct io_kiocb *req, const struct 
>> sqe_submit *s,
>>               * need async punt anyway, so it's more efficient to do it
>>               * here.
>>               */
>> -            if (force_nonblock && ret2 > 0 && ret2 < read_size)
>> +            if (force_nonblock && !(req->flags & REQ_F_NOWAIT) &&
>> +                (req->flags & REQ_F_ISREG) &&
>> +                ret2 > 0 && ret2 < read_size)
>>                      ret2 = -EAGAIN;
> 
> This is also unnecessary because force_nonblock is always false.

I totally agree, with the above direct return change, these aren't
strictly necessary. But I kept them to be on the safe side.

-- 
Jens Axboe

Reply via email to