From: SelvaKumar S <selvakuma...@samsung.com>

Repurpose [cqe->res, cqe->flags] into cqe->res64 (signed) to report
64bit written-offset for zone-append. The appending-write which requires
reporting written-location (conveyed by IOCB_ZONE_APPEND flag) is
ensured not to be a short-write; this avoids the need to report
number-of-bytes-copied.
append-offset is returned by lower-layer to io-uring via ret2 of
ki_complete interface. Make changes to collect it and send to user-space
via cqe->res64.

Signed-off-by: SelvaKumar S <selvakuma...@samsung.com>
Signed-off-by: Kanchan Joshi <josh...@samsung.com>
Signed-off-by: Nitesh Shetty <nj.she...@samsung.com>
Signed-off-by: Javier Gonzalez <javier.g...@samsung.com>
---
 fs/io_uring.c                 | 49 ++++++++++++++++++++++++++++++++++++-------
 include/uapi/linux/io_uring.h |  9 ++++++--
 2 files changed, 48 insertions(+), 10 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 7809ab2..6510cf5 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -401,7 +401,14 @@ struct io_rw {
        /* NOTE: kiocb has the file as the first member, so don't do it here */
        struct kiocb                    kiocb;
        u64                             addr;
-       u64                             len;
+       union {
+               /*
+                * len is used only during submission.
+                * append_offset is used only during completion.
+                */
+               u64                     len;
+               u64                     append_offset;
+       };
 };
 
 struct io_connect {
@@ -541,6 +548,7 @@ enum {
        REQ_F_NO_FILE_TABLE_BIT,
        REQ_F_QUEUE_TIMEOUT_BIT,
        REQ_F_WORK_INITIALIZED_BIT,
+       REQ_F_ZONE_APPEND_BIT,
 
        /* not a real bit, just to check we're not overflowing the space */
        __REQ_F_LAST_BIT,
@@ -598,6 +606,8 @@ enum {
        REQ_F_QUEUE_TIMEOUT     = BIT(REQ_F_QUEUE_TIMEOUT_BIT),
        /* io_wq_work is initialized */
        REQ_F_WORK_INITIALIZED  = BIT(REQ_F_WORK_INITIALIZED_BIT),
+       /* to return zone append offset */
+       REQ_F_ZONE_APPEND = BIT(REQ_F_ZONE_APPEND_BIT),
 };
 
 struct async_poll {
@@ -1244,8 +1254,15 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx 
*ctx, bool force)
                req->flags &= ~REQ_F_OVERFLOW;
                if (cqe) {
                        WRITE_ONCE(cqe->user_data, req->user_data);
-                       WRITE_ONCE(cqe->res, req->result);
-                       WRITE_ONCE(cqe->flags, req->cflags);
+                       if (unlikely(req->flags & REQ_F_ZONE_APPEND)) {
+                               if (likely(req->result > 0))
+                                       WRITE_ONCE(cqe->res64, 
req->rw.append_offset);
+                               else
+                                       WRITE_ONCE(cqe->res64, req->result);
+                       } else {
+                               WRITE_ONCE(cqe->res, req->result);
+                               WRITE_ONCE(cqe->flags, req->cflags);
+                       }
                } else {
                        WRITE_ONCE(ctx->rings->cq_overflow,
                                atomic_inc_return(&ctx->cached_cq_overflow));
@@ -1284,8 +1301,15 @@ static void __io_cqring_fill_event(struct io_kiocb *req, 
long res, long cflags)
        cqe = io_get_cqring(ctx);
        if (likely(cqe)) {
                WRITE_ONCE(cqe->user_data, req->user_data);
-               WRITE_ONCE(cqe->res, res);
-               WRITE_ONCE(cqe->flags, cflags);
+               if (unlikely(req->flags & REQ_F_ZONE_APPEND)) {
+                       if (likely(res > 0))
+                               WRITE_ONCE(cqe->res64, req->rw.append_offset);
+                       else
+                               WRITE_ONCE(cqe->res64, res);
+               } else {
+                       WRITE_ONCE(cqe->res, res);
+                       WRITE_ONCE(cqe->flags, cflags);
+               }
        } else if (ctx->cq_overflow_flushed) {
                WRITE_ONCE(ctx->rings->cq_overflow,
                                atomic_inc_return(&ctx->cached_cq_overflow));
@@ -1943,7 +1967,7 @@ static inline void req_set_fail_links(struct io_kiocb 
*req)
                req->flags |= REQ_F_FAIL_LINK;
 }
 
-static void io_complete_rw_common(struct kiocb *kiocb, long res)
+static void io_complete_rw_common(struct kiocb *kiocb, long res, long long 
res2)
 {
        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
        int cflags = 0;
@@ -1955,6 +1979,9 @@ static void io_complete_rw_common(struct kiocb *kiocb, 
long res)
                req_set_fail_links(req);
        if (req->flags & REQ_F_BUFFER_SELECTED)
                cflags = io_put_kbuf(req);
+       if (req->flags & REQ_F_ZONE_APPEND)
+               req->rw.append_offset = res2;
+
        __io_cqring_add_event(req, res, cflags);
 }
 
@@ -1962,7 +1989,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res, 
long long res2)
 {
        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
 
-       io_complete_rw_common(kiocb, res);
+       io_complete_rw_common(kiocb, res, res2);
        io_put_req(req);
 }
 
@@ -1976,8 +2003,11 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, 
long res, long long res2)
        if (res != req->result)
                req_set_fail_links(req);
        req->result = res;
-       if (res != -EAGAIN)
+       if (res != -EAGAIN) {
+               if (req->flags & REQ_F_ZONE_APPEND)
+                       req->rw.append_offset =  res2;
                WRITE_ONCE(req->iopoll_completed, 1);
+       }
 }
 
 /*
@@ -2739,6 +2769,9 @@ static int io_write(struct io_kiocb *req, bool 
force_nonblock)
                                                SB_FREEZE_WRITE);
                }
                kiocb->ki_flags |= IOCB_WRITE;
+               /* zone-append requires few extra steps during completion */
+               if (kiocb->ki_flags & IOCB_ZONE_APPEND)
+                       req->flags |= REQ_F_ZONE_APPEND;
 
                if (!force_nonblock)
                        current->signal->rlim[RLIMIT_FSIZE].rlim_cur = 
req->fsize;
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 92c2269..2580d93 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -156,8 +156,13 @@ enum {
  */
 struct io_uring_cqe {
        __u64   user_data;      /* sqe->data submission passed back */
-       __s32   res;            /* result code for this event */
-       __u32   flags;
+       union {
+               struct {
+                       __s32   res;    /* result code for this event */
+                       __u32   flags;
+               };
+               __s64   res64;  /* appending offset for zone append */
+       };
 };
 
 /*
-- 
2.7.4

Reply via email to