Use the common helpers for direct I/O page invalidation instead of
open coding the logic.  This leads to a slight reordering of checks
in __iomap_dio_rw to keep the logic straight.

Signed-off-by: Christoph Hellwig <h...@lst.de>
Reviewed-by: Damien Le Moal <dlem...@kernel.org>
Reviewed-by: Hannes Reinecke <h...@suse.de>
Reviewed-by: Darrick J. Wong <djw...@kernel.org>
---
 fs/iomap/direct-io.c | 55 ++++++++++++++++----------------------------
 1 file changed, 20 insertions(+), 35 deletions(-)

diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 0795c54a745bca..6bd14691f96e07 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -472,7 +472,6 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
                unsigned int dio_flags, void *private, size_t done_before)
 {
-       struct address_space *mapping = iocb->ki_filp->f_mapping;
        struct inode *inode = file_inode(iocb->ki_filp);
        struct iomap_iter iomi = {
                .inode          = inode,
@@ -481,11 +480,11 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                .flags          = IOMAP_DIRECT,
                .private        = private,
        };
-       loff_t end = iomi.pos + iomi.len - 1, ret = 0;
        bool wait_for_completion =
                is_sync_kiocb(iocb) || (dio_flags & IOMAP_DIO_FORCE_WAIT);
        struct blk_plug plug;
        struct iomap_dio *dio;
+       loff_t ret = 0;
 
        trace_iomap_dio_rw_begin(iocb, iter, dio_flags, done_before);
 
@@ -509,31 +508,29 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
        dio->submit.waiter = current;
        dio->submit.poll_bio = NULL;
 
+       if (iocb->ki_flags & IOCB_NOWAIT)
+               iomi.flags |= IOMAP_NOWAIT;
+
        if (iov_iter_rw(iter) == READ) {
                if (iomi.pos >= dio->i_size)
                        goto out_free_dio;
 
-               if (iocb->ki_flags & IOCB_NOWAIT) {
-                       if (filemap_range_needs_writeback(mapping, iomi.pos,
-                                       end)) {
-                               ret = -EAGAIN;
-                               goto out_free_dio;
-                       }
-                       iomi.flags |= IOMAP_NOWAIT;
-               }
-
                if (user_backed_iter(iter))
                        dio->flags |= IOMAP_DIO_DIRTY;
+
+               ret = kiocb_write_and_wait(iocb, iomi.len);
+               if (ret)
+                       goto out_free_dio;
        } else {
                iomi.flags |= IOMAP_WRITE;
                dio->flags |= IOMAP_DIO_WRITE;
 
-               if (iocb->ki_flags & IOCB_NOWAIT) {
-                       if (filemap_range_has_page(mapping, iomi.pos, end)) {
-                               ret = -EAGAIN;
+               if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) {
+                       ret = -EAGAIN;
+                       if (iomi.pos >= dio->i_size ||
+                           iomi.pos + iomi.len > dio->i_size)
                                goto out_free_dio;
-                       }
-                       iomi.flags |= IOMAP_NOWAIT;
+                       iomi.flags |= IOMAP_OVERWRITE_ONLY;
                }
 
                /* for data sync or sync, we need sync completion processing */
@@ -549,31 +546,19 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                        if (!(iocb->ki_flags & IOCB_SYNC))
                                dio->flags |= IOMAP_DIO_WRITE_FUA;
                }
-       }
-
-       if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) {
-               ret = -EAGAIN;
-               if (iomi.pos >= dio->i_size ||
-                   iomi.pos + iomi.len > dio->i_size)
-                       goto out_free_dio;
-               iomi.flags |= IOMAP_OVERWRITE_ONLY;
-       }
 
-       ret = filemap_write_and_wait_range(mapping, iomi.pos, end);
-       if (ret)
-               goto out_free_dio;
-
-       if (iov_iter_rw(iter) == WRITE) {
                /*
                 * Try to invalidate cache pages for the range we are writing.
                 * If this invalidation fails, let the caller fall back to
                 * buffered I/O.
                 */
-               if (invalidate_inode_pages2_range(mapping,
-                               iomi.pos >> PAGE_SHIFT, end >> PAGE_SHIFT)) {
-                       trace_iomap_dio_invalidate_fail(inode, iomi.pos,
-                                                       iomi.len);
-                       ret = -ENOTBLK;
+               ret = kiocb_invalidate_pages(iocb, iomi.len);
+               if (ret) {
+                       if (ret != -EAGAIN) {
+                               trace_iomap_dio_invalidate_fail(inode, iomi.pos,
+                                                               iomi.len);
+                               ret = -ENOTBLK;
+                       }
                        goto out_free_dio;
                }
 
-- 
2.39.2

Reply via email to