From: Pavel Begunkov <asml.sile...@gmail.com>

[ Upstream commit c98de08c990e190fc7cc3aaf8079b4a0674c6425 ]

As tasks now cancel only theirs requests, and inflight_wait is awaited
only in io_uring_cancel_files(), which should be called with ->in_idle
set, instead of keeping a separate inflight_wait use tctx->wait.

That will add some spurious wakeups but actually is safer from point of
not hanging the task.

e.g.
task1                   | IRQ
                        | *start* io_complete_rw_common(link)
                        |        link: req1 -> req2 -> req3(with files)
*cancel_files()         |
io_wq_cancel(), etc.    |
                        | put_req(link), adds to io-wq req2
schedule()              |

So, task1 will never try to cancel req2 or req3. If req2 is
long-standing (e.g. read(empty_pipe)), this may hang.

Signed-off-by: Pavel Begunkov <asml.sile...@gmail.com>
Signed-off-by: Jens Axboe <ax...@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gre...@linuxfoundation.org>
---
 fs/io_uring.c |   13 ++++++-------
 1 file changed, 6 insertions(+), 7 deletions(-)

--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -286,7 +286,6 @@ struct io_ring_ctx {
                struct list_head        timeout_list;
                struct list_head        cq_overflow_list;
 
-               wait_queue_head_t       inflight_wait;
                struct io_uring_sqe     *sq_sqes;
        } ____cacheline_aligned_in_smp;
 
@@ -1220,7 +1219,6 @@ static struct io_ring_ctx *io_ring_ctx_a
        INIT_LIST_HEAD(&ctx->iopoll_list);
        INIT_LIST_HEAD(&ctx->defer_list);
        INIT_LIST_HEAD(&ctx->timeout_list);
-       init_waitqueue_head(&ctx->inflight_wait);
        spin_lock_init(&ctx->inflight_lock);
        INIT_LIST_HEAD(&ctx->inflight_list);
        INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work);
@@ -5894,6 +5892,7 @@ static int io_req_defer(struct io_kiocb
 static void io_req_drop_files(struct io_kiocb *req)
 {
        struct io_ring_ctx *ctx = req->ctx;
+       struct io_uring_task *tctx = req->task->io_uring;
        unsigned long flags;
 
        if (req->work.flags & IO_WQ_WORK_FILES) {
@@ -5905,8 +5904,8 @@ static void io_req_drop_files(struct io_
        spin_unlock_irqrestore(&ctx->inflight_lock, flags);
        req->flags &= ~REQ_F_INFLIGHT;
        req->work.flags &= ~IO_WQ_WORK_FILES;
-       if (waitqueue_active(&ctx->inflight_wait))
-               wake_up(&ctx->inflight_wait);
+       if (atomic_read(&tctx->in_idle))
+               wake_up(&tctx->wait);
 }
 
 static void __io_clean_op(struct io_kiocb *req)
@@ -8605,8 +8604,8 @@ static void io_uring_cancel_files(struct
                        break;
                }
                if (found)
-                       prepare_to_wait(&ctx->inflight_wait, &wait,
-                                               TASK_UNINTERRUPTIBLE);
+                       prepare_to_wait(&task->io_uring->wait, &wait,
+                                       TASK_UNINTERRUPTIBLE);
                spin_unlock_irq(&ctx->inflight_lock);
 
                /* We need to keep going until we don't find a matching req */
@@ -8619,7 +8618,7 @@ static void io_uring_cancel_files(struct
                /* cancellations _may_ trigger task work */
                io_run_task_work();
                schedule();
-               finish_wait(&ctx->inflight_wait, &wait);
+               finish_wait(&task->io_uring->wait, &wait);
        }
 }
 


Reply via email to