From: Pavel Begunkov <asml.sile...@gmail.com>

[ Upstream commit e1915f76a8981f0a750cf56515df42582a37c4b0 ]

As io_uring_cancel_files() and others let SQO to run between
io_uring_try_cancel_requests(), SQO may generate new deferred requests,
so it's safer to try to cancel them in it.

Signed-off-by: Pavel Begunkov <asml.sile...@gmail.com>
Signed-off-by: Jens Axboe <ax...@kernel.dk>
Signed-off-by: Sasha Levin <sas...@kernel.org>
---
 fs/io_uring.c | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 241313278e5a..89708ffc1c2b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -8848,11 +8848,11 @@ static bool io_cancel_task_cb(struct io_wq_work *work, 
void *data)
        return ret;
 }
 
-static void io_cancel_defer_files(struct io_ring_ctx *ctx,
+static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
                                  struct task_struct *task,
                                  struct files_struct *files)
 {
-       struct io_defer_entry *de = NULL;
+       struct io_defer_entry *de;
        LIST_HEAD(list);
 
        spin_lock_irq(&ctx->completion_lock);
@@ -8863,6 +8863,8 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
                }
        }
        spin_unlock_irq(&ctx->completion_lock);
+       if (list_empty(&list))
+               return false;
 
        while (!list_empty(&list)) {
                de = list_first_entry(&list, struct io_defer_entry, list);
@@ -8872,6 +8874,7 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
                io_req_complete(de->req, -ECANCELED);
                kfree(de);
        }
+       return true;
 }
 
 static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
@@ -8898,6 +8901,7 @@ static void io_uring_try_cancel_requests(struct 
io_ring_ctx *ctx,
                        }
                }
 
+               ret |= io_cancel_defer_files(ctx, task, files);
                ret |= io_poll_remove_all(ctx, task, files);
                ret |= io_kill_timeouts(ctx, task, files);
                ret |= io_run_task_work();
@@ -8976,8 +8980,6 @@ static void io_uring_cancel_task_requests(struct 
io_ring_ctx *ctx,
                io_sq_thread_park(ctx->sq_data);
        }
 
-       io_cancel_defer_files(ctx, task, files);
-
        io_uring_cancel_files(ctx, task, files);
        if (!files)
                io_uring_try_cancel_requests(ctx, task, NULL);
-- 
2.30.1

Reply via email to