From: Greg Kroah-Hartman <gre...@linuxfoundation.org>

From: Pavel Begunkov <asml.sile...@gmail.com>

commit eab30c4d20dc761d463445e5130421863ff81505 upstream

When io_req_task_work_add() fails, the request will be cancelled by
enqueueing via task_works of io-wq. Extract a function for that.

Signed-off-by: Pavel Begunkov <asml.sile...@gmail.com>
Signed-off-by: Jens Axboe <ax...@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gre...@linuxfoundation.org>
---
 fs/io_uring.c |   46 +++++++++++++++++-----------------------------
 1 file changed, 17 insertions(+), 29 deletions(-)

--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2172,6 +2172,16 @@ static int io_req_task_work_add(struct i
        return ret;
 }
 
+static void io_req_task_work_add_fallback(struct io_kiocb *req,
+                                         void (*cb)(struct callback_head *))
+{
+       struct task_struct *tsk = io_wq_get_task(req->ctx->io_wq);
+
+       init_task_work(&req->task_work, cb);
+       task_work_add(tsk, &req->task_work, TWA_NONE);
+       wake_up_process(tsk);
+}
+
 static void __io_req_task_cancel(struct io_kiocb *req, int error)
 {
        struct io_ring_ctx *ctx = req->ctx;
@@ -2229,14 +2239,8 @@ static void io_req_task_queue(struct io_
        percpu_ref_get(&req->ctx->refs);
 
        ret = io_req_task_work_add(req);
-       if (unlikely(ret)) {
-               struct task_struct *tsk;
-
-               init_task_work(&req->task_work, io_req_task_cancel);
-               tsk = io_wq_get_task(req->ctx->io_wq);
-               task_work_add(tsk, &req->task_work, TWA_NONE);
-               wake_up_process(tsk);
-       }
+       if (unlikely(ret))
+               io_req_task_work_add_fallback(req, io_req_task_cancel);
 }
 
 static inline void io_queue_next(struct io_kiocb *req)
@@ -2354,13 +2358,8 @@ static void io_free_req_deferred(struct
 
        init_task_work(&req->task_work, io_put_req_deferred_cb);
        ret = io_req_task_work_add(req);
-       if (unlikely(ret)) {
-               struct task_struct *tsk;
-
-               tsk = io_wq_get_task(req->ctx->io_wq);
-               task_work_add(tsk, &req->task_work, TWA_NONE);
-               wake_up_process(tsk);
-       }
+       if (unlikely(ret))
+               io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
 }
 
 static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
@@ -3439,15 +3438,8 @@ static int io_async_buf_func(struct wait
        /* submit ref gets dropped, acquire a new one */
        refcount_inc(&req->refs);
        ret = io_req_task_work_add(req);
-       if (unlikely(ret)) {
-               struct task_struct *tsk;
-
-               /* queue just for cancelation */
-               init_task_work(&req->task_work, io_req_task_cancel);
-               tsk = io_wq_get_task(req->ctx->io_wq);
-               task_work_add(tsk, &req->task_work, TWA_NONE);
-               wake_up_process(tsk);
-       }
+       if (unlikely(ret))
+               io_req_task_work_add_fallback(req, io_req_task_cancel);
        return 1;
 }
 
@@ -5159,12 +5151,8 @@ static int __io_async_wake(struct io_kio
         */
        ret = io_req_task_work_add(req);
        if (unlikely(ret)) {
-               struct task_struct *tsk;
-
                WRITE_ONCE(poll->canceled, true);
-               tsk = io_wq_get_task(req->ctx->io_wq);
-               task_work_add(tsk, &req->task_work, TWA_NONE);
-               wake_up_process(tsk);
+               io_req_task_work_add_fallback(req, func);
        }
        return 1;
 }


Reply via email to