On Mon, Aug 10, 2020 at 09:55:17AM -0600, Jens Axboe wrote:
> On 8/10/20 9:36 AM, syzbot wrote:
> > Hello,
> > 
> > syzbot found the following issue on:
> > 
> > HEAD commit:    449dc8c9 Merge tag 'for-v5.9' of git://git.kernel.org/pub/..
> > git tree:       upstream
> > console output: https://syzkaller.appspot.com/x/log.txt?x=14d41e02900000
> > kernel config:  https://syzkaller.appspot.com/x/.config?x=9d25235bf0162fbc
> > dashboard link: https://syzkaller.appspot.com/bug?extid=996f91b6ec3812c48042
> > compiler:       clang version 10.0.0 (https://github.com/llvm/llvm-project/ 
> > c2443155a0fb245c8f17f2c1c72b6ea391e86e81)
> > syz repro:      https://syzkaller.appspot.com/x/repro.syz?x=133c9006900000
> > C reproducer:   https://syzkaller.appspot.com/x/repro.c?x=1191cb1a900000
> > 
> > IMPORTANT: if you fix the issue, please add the following tag to the commit:
> > Reported-by: [email protected]
> 
> Thanks, the below should fix this one.

Yeah, it seems right to me, since only __io_queue_deferred() (invoked by
io_commit_cqring()) can be called with 'completion_lock' held.

Just out of curiosity, while exploring the code I noticed that we call
io_commit_cqring() always with the 'completion_lock' held, except in the
io_poll_* functions.

That's because then there can't be any concurrency?

Thanks,
Stefano

> 
> 
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index 443eecdfeda9..f9be665d1c5e 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -898,6 +898,7 @@ static void io_put_req(struct io_kiocb *req);
>  static void io_double_put_req(struct io_kiocb *req);
>  static void __io_double_put_req(struct io_kiocb *req);
>  static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
> +static void __io_queue_linked_timeout(struct io_kiocb *req);
>  static void io_queue_linked_timeout(struct io_kiocb *req);
>  static int __io_sqe_files_update(struct io_ring_ctx *ctx,
>                                struct io_uring_files_update *ip,
> @@ -1179,7 +1180,7 @@ static void io_prep_async_link(struct io_kiocb *req)
>                       io_prep_async_work(cur);
>  }
>  
> -static void __io_queue_async_work(struct io_kiocb *req)
> +static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
>  {
>       struct io_ring_ctx *ctx = req->ctx;
>       struct io_kiocb *link = io_prep_linked_timeout(req);
> @@ -1187,16 +1188,19 @@ static void __io_queue_async_work(struct io_kiocb 
> *req)
>       trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
>                                       &req->work, req->flags);
>       io_wq_enqueue(ctx->io_wq, &req->work);
> -
> -     if (link)
> -             io_queue_linked_timeout(link);
> +     return link;
>  }
>  
>  static void io_queue_async_work(struct io_kiocb *req)
>  {
> +     struct io_kiocb *link;
> +
>       /* init ->work of the whole link before punting */
>       io_prep_async_link(req);
> -     __io_queue_async_work(req);
> +     link = __io_queue_async_work(req);
> +
> +     if (link)
> +             io_queue_linked_timeout(link);
>  }
>  
>  static void io_kill_timeout(struct io_kiocb *req)
> @@ -1229,12 +1233,19 @@ static void __io_queue_deferred(struct io_ring_ctx 
> *ctx)
>       do {
>               struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
>                                               struct io_defer_entry, list);
> +             struct io_kiocb *link;
>  
>               if (req_need_defer(de->req, de->seq))
>                       break;
>               list_del_init(&de->list);
>               /* punt-init is done before queueing for defer */
> -             __io_queue_async_work(de->req);
> +             link = __io_queue_async_work(de->req);
> +             if (link) {
> +                     __io_queue_linked_timeout(link);
> +                     /* drop submission reference */
> +                     link->flags |= REQ_F_COMP_LOCKED;
> +                     io_put_req(link);
> +             }
>               kfree(de);
>       } while (!list_empty(&ctx->defer_list));
>  }
> @@ -5945,15 +5956,12 @@ static enum hrtimer_restart io_link_timeout_fn(struct 
> hrtimer *timer)
>       return HRTIMER_NORESTART;
>  }
>  
> -static void io_queue_linked_timeout(struct io_kiocb *req)
> +static void __io_queue_linked_timeout(struct io_kiocb *req)
>  {
> -     struct io_ring_ctx *ctx = req->ctx;
> -
>       /*
>        * If the list is now empty, then our linked request finished before
>        * we got a chance to setup the timer
>        */
> -     spin_lock_irq(&ctx->completion_lock);
>       if (!list_empty(&req->link_list)) {
>               struct io_timeout_data *data = &req->io->timeout;
>  
> @@ -5961,6 +5969,14 @@ static void io_queue_linked_timeout(struct io_kiocb 
> *req)
>               hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
>                               data->mode);
>       }
> +}
> +
> +static void io_queue_linked_timeout(struct io_kiocb *req)
> +{
> +     struct io_ring_ctx *ctx = req->ctx;
> +
> +     spin_lock_irq(&ctx->completion_lock);
> +     __io_queue_linked_timeout(req);
>       spin_unlock_irq(&ctx->completion_lock);
>  
>       /* drop submission reference */
> 
> -- 
> Jens Axboe
> 

Reply via email to