Eliminate the ->iomap_valid() handler by switching to a ->page_prepare()
handler and validating the mapping there.

Signed-off-by: Andreas Gruenbacher <agrue...@redhat.com>
---
 fs/iomap/buffered-io.c | 25 +++++--------------------
 fs/xfs/xfs_iomap.c     | 38 +++++++++++++++++++++++++++-----------
 include/linux/iomap.h  | 17 -----------------
 3 files changed, 32 insertions(+), 48 deletions(-)

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 819562633998..32a2a287d32c 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -628,7 +628,7 @@ static int iomap_write_begin(struct iomap_iter *iter, 
loff_t pos,
        const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
        const struct iomap *srcmap = iomap_iter_srcmap(iter);
        struct folio *folio;
-       int status = 0;
+       int status;
 
        BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
        if (srcmap != &iter->iomap)
@@ -644,27 +644,12 @@ static int iomap_write_begin(struct iomap_iter *iter, 
loff_t pos,
                folio = page_ops->page_prepare(iter, pos, len);
        else
                folio = iomap_folio_prepare(iter, pos);
-       if (IS_ERR(folio))
-               return PTR_ERR(folio);
-
-       /*
-        * Now we have a locked folio, before we do anything with it we need to
-        * check that the iomap we have cached is not stale. The inode extent
-        * mapping can change due to concurrent IO in flight (e.g.
-        * IOMAP_UNWRITTEN state can change and memory reclaim could have
-        * reclaimed a previously partially written page at this index after IO
-        * completion before this write reaches this file offset) and hence we
-        * could do the wrong thing here (zero a page range incorrectly or fail
-        * to zero) and corrupt data.
-        */
-       if (page_ops && page_ops->iomap_valid) {
-               bool iomap_valid = page_ops->iomap_valid(iter->inode,
-                                                       &iter->iomap);
-               if (!iomap_valid) {
+       if (IS_ERR(folio)) {
+               if (folio == ERR_PTR(-ESTALE)) {
                        iter->iomap.flags |= IOMAP_F_STALE;
-                       status = 0;
-                       goto out_unlock;
+                       return 0;
                }
+               return PTR_ERR(folio);
        }
 
        if (pos + len > folio_pos(folio) + folio_size(folio))
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 669c1bc5c3a7..ae83cb89279d 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -62,29 +62,45 @@ xfs_iomap_inode_sequence(
        return cookie | READ_ONCE(ip->i_df.if_seq);
 }
 
-/*
- * Check that the iomap passed to us is still valid for the given offset and
- * length.
- */
-static bool
-xfs_iomap_valid(
-       struct inode            *inode,
-       const struct iomap      *iomap)
+static struct folio *
+xfs_page_prepare(
+       struct iomap_iter       *iter,
+       loff_t                  pos,
+       unsigned                len)
 {
+       struct inode            *inode = iter->inode;
+       struct iomap            *iomap = &iter->iomap;
        struct xfs_inode        *ip = XFS_I(inode);
+       struct folio *folio;
 
+       folio = iomap_folio_prepare(iter, pos);
+       if (IS_ERR(folio))
+               return folio;
+
+       /*
+        * Now we have a locked folio, before we do anything with it we need to
+        * check that the iomap we have cached is not stale. The inode extent
+        * mapping can change due to concurrent IO in flight (e.g.
+        * IOMAP_UNWRITTEN state can change and memory reclaim could have
+        * reclaimed a previously partially written page at this index after IO
+        * completion before this write reaches this file offset) and hence we
+        * could do the wrong thing here (zero a page range incorrectly or fail
+        * to zero) and corrupt data.
+        */
        if (iomap->validity_cookie !=
                        xfs_iomap_inode_sequence(ip, iomap->flags)) {
                trace_xfs_iomap_invalid(ip, iomap);
-               return false;
+               folio_unlock(folio);
+               folio_put(folio);
+               return ERR_PTR(-ESTALE);
        }
 
        XFS_ERRORTAG_DELAY(ip->i_mount, XFS_ERRTAG_WRITE_DELAY_MS);
-       return true;
+       return folio;
 }
 
 const struct iomap_page_ops xfs_iomap_page_ops = {
-       .iomap_valid            = xfs_iomap_valid,
+       .page_prepare           = xfs_page_prepare,
 };
 
 int
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index c74ab8c53b47..1c8b9a04b0bb 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -140,23 +140,6 @@ struct iomap_page_ops {
                        unsigned len);
        void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
                        struct folio *folio);
-
-       /*
-        * Check that the cached iomap still maps correctly to the filesystem's
-        * internal extent map. FS internal extent maps can change while iomap
-        * is iterating a cached iomap, so this hook allows iomap to detect that
-        * the iomap needs to be refreshed during a long running write
-        * operation.
-        *
-        * The filesystem can store internal state (e.g. a sequence number) in
-        * iomap->validity_cookie when the iomap is first mapped to be able to
-        * detect changes between mapping time and whenever .iomap_valid() is
-        * called.
-        *
-        * This is called with the folio over the specified file position held
-        * locked by the iomap code.
-        */
-       bool (*iomap_valid)(struct inode *inode, const struct iomap *iomap);
 };
 
 /*
-- 
2.38.1

Reply via email to