Provide tools to create a buffer in an xarray, with a function to add
new folios with a mark.  This will be used to create bounce buffer and can be
used more easily to create a list of folios the span of which would require
more than a page's worth of bio_vec structs.

Signed-off-by: David Howells <dhowe...@redhat.com>
cc: Jeff Layton <jlay...@kernel.org>
cc: linux-cachefs@redhat.com
cc: linux-fsde...@vger.kernel.org
cc: linux...@kvack.org
---
 fs/netfs/internal.h   |  16 +++++
 fs/netfs/misc.c       | 140 ++++++++++++++++++++++++++++++++++++++++++
 include/linux/netfs.h |   4 ++
 3 files changed, 160 insertions(+)

diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index 1f067aa96c50..00e01278316f 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -52,6 +52,22 @@ static inline void netfs_proc_add_rreq(struct 
netfs_io_request *rreq) {}
 static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
 #endif
 
+/*
+ * misc.c
+ */
+int netfs_xa_store_and_mark(struct xarray *xa, unsigned long index,
+                           struct folio *folio, bool put_mark,
+                           bool pagecache_mark, gfp_t gfp_mask);
+int netfs_add_folios_to_buffer(struct xarray *buffer,
+                              struct address_space *mapping,
+                              pgoff_t index, pgoff_t to, gfp_t gfp_mask);
+int netfs_set_up_buffer(struct xarray *buffer,
+                       struct address_space *mapping,
+                       struct readahead_control *ractl,
+                       struct folio *keep,
+                       pgoff_t have_index, unsigned int have_folios);
+void netfs_clear_buffer(struct xarray *buffer);
+
 /*
  * objects.c
  */
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
index c3baf2b247d9..c70f856f3129 100644
--- a/fs/netfs/misc.c
+++ b/fs/netfs/misc.c
@@ -8,6 +8,146 @@
 #include <linux/swap.h>
 #include "internal.h"
 
+/*
+ * Attach a folio to the buffer and maybe set marks on it to say that we need
+ * to put the folio later and twiddle the pagecache flags.
+ */
+int netfs_xa_store_and_mark(struct xarray *xa, unsigned long index,
+                           struct folio *folio, bool put_mark,
+                           bool pagecache_mark, gfp_t gfp_mask)
+{
+       XA_STATE_ORDER(xas, xa, index, folio_order(folio));
+
+retry:
+       xas_lock(&xas);
+       for (;;) {
+               xas_store(&xas, folio);
+               if (!xas_error(&xas))
+                       break;
+               xas_unlock(&xas);
+               if (!xas_nomem(&xas, gfp_mask))
+                       return xas_error(&xas);
+               goto retry;
+       }
+
+       if (put_mark)
+               xas_set_mark(&xas, NETFS_BUF_PUT_MARK);
+       if (pagecache_mark)
+               xas_set_mark(&xas, NETFS_BUF_PAGECACHE_MARK);
+       xas_unlock(&xas);
+       return xas_error(&xas);
+}
+
+/*
+ * Create the specified range of folios in the buffer attached to the read
+ * request.  The folios are marked with NETFS_BUF_PUT_MARK so that we know that
+ * these need freeing later.
+ */
+int netfs_add_folios_to_buffer(struct xarray *buffer,
+                              struct address_space *mapping,
+                              pgoff_t index, pgoff_t to, gfp_t gfp_mask)
+{
+       struct folio *folio;
+       int ret;
+
+       if (to + 1 == index) /* Page range is inclusive */
+               return 0;
+
+       do {
+               /* TODO: Figure out what order folio can be allocated here */
+               folio = filemap_alloc_folio(readahead_gfp_mask(mapping), 0);
+               if (!folio)
+                       return -ENOMEM;
+               folio->index = index;
+               ret = netfs_xa_store_and_mark(buffer, index, folio,
+                                             true, false, gfp_mask);
+               if (ret < 0) {
+                       folio_put(folio);
+                       return ret;
+               }
+
+               index += folio_nr_pages(folio);
+       } while (index <= to && index != 0);
+
+       return 0;
+}
+
+/*
+ * Set up a buffer into which to data will be read or decrypted/decompressed.
+ * The folios to be read into are attached to this buffer and the gaps filled
+ * in to form a continuous region.
+ */
+int netfs_set_up_buffer(struct xarray *buffer,
+                       struct address_space *mapping,
+                       struct readahead_control *ractl,
+                       struct folio *keep,
+                       pgoff_t have_index, unsigned int have_folios)
+{
+       struct folio *folio;
+       gfp_t gfp_mask = readahead_gfp_mask(mapping);
+       unsigned int want_folios = have_folios;
+       pgoff_t want_index = have_index;
+       int ret;
+
+       ret = netfs_add_folios_to_buffer(buffer, mapping, want_index,
+                                        have_index - 1, gfp_mask);
+       if (ret < 0)
+               return ret;
+       have_folios += have_index - want_index;
+
+       ret = netfs_add_folios_to_buffer(buffer, mapping,
+                                        have_index + have_folios,
+                                        want_index + want_folios - 1,
+                                        gfp_mask);
+       if (ret < 0)
+               return ret;
+
+       /* Transfer the folios proposed by the VM into the buffer and take refs
+        * on them.  The locks will be dropped in netfs_rreq_unlock().
+        */
+       if (ractl) {
+               while ((folio = readahead_folio(ractl))) {
+                       folio_get(folio);
+                       if (folio == keep)
+                               folio_get(folio);
+                       ret = netfs_xa_store_and_mark(buffer, folio->index, 
folio,
+                                                     true, true, gfp_mask);
+                       if (ret < 0) {
+                               if (folio != keep)
+                                       folio_unlock(folio);
+                               folio_put(folio);
+                               return ret;
+                       }
+               }
+       } else {
+               folio_get(keep);
+               ret = netfs_xa_store_and_mark(buffer, keep->index, keep,
+                                             true, true, gfp_mask);
+               if (ret < 0) {
+                       folio_put(keep);
+                       return ret;
+               }
+       }
+       return 0;
+}
+
+/*
+ * Clear an xarray buffer, putting a ref on the folios that have
+ * NETFS_BUF_PUT_MARK set.
+ */
+void netfs_clear_buffer(struct xarray *buffer)
+{
+       struct folio *folio;
+       XA_STATE(xas, buffer, 0);
+
+       rcu_read_lock();
+       xas_for_each_marked(&xas, folio, ULONG_MAX, NETFS_BUF_PUT_MARK) {
+               folio_put(folio);
+       }
+       rcu_read_unlock();
+       xa_destroy(buffer);
+}
+
 /**
  * netfs_invalidate_folio - Invalidate or partially invalidate a folio
  * @folio: Folio proposed for release
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index 66479a61ad00..e8d702ac6968 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -109,6 +109,10 @@ static inline int wait_on_page_fscache_killable(struct 
page *page)
        return folio_wait_private_2_killable(page_folio(page));
 }
 
+/* Marks used on xarray-based buffers */
+#define NETFS_BUF_PUT_MARK     XA_MARK_0       /* - Page needs putting  */
+#define NETFS_BUF_PAGECACHE_MARK XA_MARK_1     /* - Page needs wb/dirty flag 
wrangling */
+
 enum netfs_io_source {
        NETFS_FILL_WITH_ZEROES,
        NETFS_DOWNLOAD_FROM_SERVER,
--
Linux-cachefs mailing list
Linux-cachefs@redhat.com
https://listman.redhat.com/mailman/listinfo/linux-cachefs

Reply via email to