io_uring is the only user of io-wq, and now it uses only io-wq callback
for all its requests, namely io_wq_submit_work(). Instead of storing
work->runner callback in each instance of io_wq_work, keep it in io-wq
itself.

pros:
- reduces io_wq_work size
- more robust -- ->func won't be invalidated with mem{cpy,set}(req)
- helps other work

Signed-off-by: Pavel Begunkov <[email protected]>
---
 fs/io-wq.c    | 10 ++++++----
 fs/io-wq.h    |  7 ++++---
 fs/io_uring.c |  3 ++-
 3 files changed, 12 insertions(+), 8 deletions(-)

diff --git a/fs/io-wq.c b/fs/io-wq.c
index 2bfa9117bc28..a44ad3b98886 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -112,6 +112,7 @@ struct io_wq {
        unsigned long state;
 
        free_work_fn *free_work;
+       io_wq_work_fn *do_work;
 
        struct task_struct *manager;
        struct user_struct *user;
@@ -528,7 +529,7 @@ static void io_worker_handle_work(struct io_worker *worker)
 
                        hash = io_get_work_hash(work);
                        linked = old_work = work;
-                       linked->func(&linked);
+                       wq->do_work(&linked);
                        linked = (old_work == linked) ? NULL : linked;
 
                        work = next_hashed;
@@ -785,7 +786,7 @@ static void io_run_cancel(struct io_wq_work *work, struct 
io_wqe *wqe)
                struct io_wq_work *old_work = work;
 
                work->flags |= IO_WQ_WORK_CANCEL;
-               work->func(&work);
+               wq->do_work(&work);
                work = (work == old_work) ? NULL : work;
                wq->free_work(old_work);
        } while (work);
@@ -1027,7 +1028,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct 
io_wq_data *data)
        int ret = -ENOMEM, node;
        struct io_wq *wq;
 
-       if (WARN_ON_ONCE(!data->free_work))
+       if (WARN_ON_ONCE(!data->free_work || !data->do_work))
                return ERR_PTR(-EINVAL);
 
        wq = kzalloc(sizeof(*wq), GFP_KERNEL);
@@ -1041,6 +1042,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct 
io_wq_data *data)
        }
 
        wq->free_work = data->free_work;
+       wq->do_work = data->do_work;
 
        /* caller must already hold a reference to this */
        wq->user = data->user;
@@ -1097,7 +1099,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct 
io_wq_data *data)
 
 bool io_wq_get(struct io_wq *wq, struct io_wq_data *data)
 {
-       if (data->free_work != wq->free_work)
+       if (data->free_work != wq->free_work || data->do_work != wq->do_work)
                return false;
 
        return refcount_inc_not_zero(&wq->use_refs);
diff --git a/fs/io-wq.h b/fs/io-wq.h
index df8a4cd3236d..f3bb596f5a3f 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -85,7 +85,6 @@ static inline void wq_list_del(struct io_wq_work_list *list,
 
 struct io_wq_work {
        struct io_wq_work_node list;
-       void (*func)(struct io_wq_work **);
        struct files_struct *files;
        struct mm_struct *mm;
        const struct cred *creds;
@@ -94,9 +93,9 @@ struct io_wq_work {
        pid_t task_pid;
 };
 
-#define INIT_IO_WORK(work, _func)                              \
+#define INIT_IO_WORK(work)                                     \
        do {                                                    \
-               *(work) = (struct io_wq_work){ .func = _func }; \
+               *(work) = (struct io_wq_work){};                \
        } while (0)                                             \
 
 static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
@@ -108,10 +107,12 @@ static inline struct io_wq_work *wq_next_work(struct 
io_wq_work *work)
 }
 
 typedef void (free_work_fn)(struct io_wq_work *);
+typedef void (io_wq_work_fn)(struct io_wq_work **);
 
 struct io_wq_data {
        struct user_struct *user;
 
+       io_wq_work_fn *do_work;
        free_work_fn *free_work;
 };
 
diff --git a/fs/io_uring.c b/fs/io_uring.c
index adf18ff9fdb9..b4ca6026269c 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5880,7 +5880,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct 
io_kiocb *req,
        refcount_set(&req->refs, 2);
        req->task = NULL;
        req->result = 0;
-       INIT_IO_WORK(&req->work, io_wq_submit_work);
+       INIT_IO_WORK(&req->work);
 
        if (unlikely(req->opcode >= IORING_OP_LAST))
                return -EINVAL;
@@ -6896,6 +6896,7 @@ static int io_init_wq_offload(struct io_ring_ctx *ctx,
 
        data.user = ctx->user;
        data.free_work = io_free_work;
+       data.do_work = io_wq_submit_work;
 
        if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
                /* Do QD, or 4 * CPUS, whatever is smallest */
-- 
2.24.0

Reply via email to