Add a second xarray struct to netfs_io_request for the purposes of holding
a bounce buffer for when we have to deal with encrypted/compressed data or
if we have to up/download data in blocks larger than we were asked for.

Signed-off-by: David Howells <dhowe...@redhat.com>
cc: Jeff Layton <jlay...@kernel.org>
cc: linux-cachefs@redhat.com
cc: linux-fsde...@vger.kernel.org
cc: linux...@kvack.org
---
 fs/netfs/io.c         | 6 +++++-
 fs/netfs/objects.c    | 3 +++
 include/linux/netfs.h | 2 ++
 3 files changed, 10 insertions(+), 1 deletion(-)

diff --git a/fs/netfs/io.c b/fs/netfs/io.c
index e9d408e211b8..d8e9cd6ce338 100644
--- a/fs/netfs/io.c
+++ b/fs/netfs/io.c
@@ -643,7 +643,11 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool 
sync)
                return -EIO;
        }
 
-       rreq->io_iter = rreq->iter;
+       if (test_bit(NETFS_RREQ_USE_BOUNCE_BUFFER, &rreq->flags))
+               iov_iter_xarray(&rreq->io_iter, ITER_DEST, &rreq->bounce,
+                               rreq->start, rreq->len);
+       else
+               rreq->io_iter = rreq->iter;
 
        INIT_WORK(&rreq->work, netfs_rreq_work);
 
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
index 4396318081bf..0782a284dda8 100644
--- a/fs/netfs/objects.c
+++ b/fs/netfs/objects.c
@@ -35,6 +35,7 @@ struct netfs_io_request *netfs_alloc_request(struct 
address_space *mapping,
        rreq->inode     = inode;
        rreq->i_size    = i_size_read(inode);
        rreq->debug_id  = atomic_inc_return(&debug_ids);
+       xa_init(&rreq->bounce);
        INIT_LIST_HEAD(&rreq->subrequests);
        refcount_set(&rreq->ref, 1);
        __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
@@ -43,6 +44,7 @@ struct netfs_io_request *netfs_alloc_request(struct 
address_space *mapping,
        if (rreq->netfs_ops->init_request) {
                ret = rreq->netfs_ops->init_request(rreq, file);
                if (ret < 0) {
+                       xa_destroy(&rreq->bounce);
                        kfree(rreq);
                        return ERR_PTR(ret);
                }
@@ -96,6 +98,7 @@ static void netfs_free_request(struct work_struct *work)
                }
                kvfree(rreq->direct_bv);
        }
+       netfs_clear_buffer(&rreq->bounce);
        kfree_rcu(rreq, rcu);
        netfs_stat_d(&netfs_n_rh_rreq);
 }
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index e8d702ac6968..a7220e906287 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -196,6 +196,7 @@ struct netfs_io_request {
        struct iov_iter         iter;           /* Unencrypted-side iterator */
        struct iov_iter         io_iter;        /* I/O (Encrypted-side) 
iterator */
        struct bio_vec          *direct_bv;     /* DIO buffer list (when 
handling iovec-iter) */
+       struct xarray           bounce;         /* Bounce buffer (eg. for 
crypto/compression) */
        void                    *netfs_priv;    /* Private data for the netfs */
        unsigned int            direct_bv_count; /* Number of elements in bv[] 
*/
        unsigned int            debug_id;
@@ -220,6 +221,7 @@ struct netfs_io_request {
 #define NETFS_RREQ_IN_PROGRESS         5       /* Unlocked when the request 
completes */
 #define NETFS_RREQ_NONBLOCK            6       /* Don't block if possible 
(O_NONBLOCK) */
 #define NETFS_RREQ_BLOCKED             7       /* We blocked */
+#define NETFS_RREQ_USE_BOUNCE_BUFFER   8       /* Use bounce buffer */
        const struct netfs_request_ops *netfs_ops;
 };
 
--
Linux-cachefs mailing list
Linux-cachefs@redhat.com
https://listman.redhat.com/mailman/listinfo/linux-cachefs

Reply via email to