Introduce xfs_mmaplock_two_inodes_and_break_dax_layout() for dax files
who are going to be deduped.  After that, call compare range function
only when files are both DAX or not.

Signed-off-by: Shiyang Ruan <ruansy.f...@fujitsu.com>
Reviewed-by: Darrick J. Wong <djw...@kernel.org>
Reviewed-by: Christoph Hellwig <h...@lst.de>
---
 fs/xfs/xfs_file.c    |  2 +-
 fs/xfs/xfs_inode.c   | 80 +++++++++++++++++++++++++++++++++++++++++---
 fs/xfs/xfs_inode.h   |  1 +
 fs/xfs/xfs_reflink.c |  4 +--
 4 files changed, 80 insertions(+), 7 deletions(-)

diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 2ef1930374d2..c3061723613c 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -846,7 +846,7 @@ xfs_wait_dax_page(
        xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 }
 
-static int
+int
 xfs_break_dax_layouts(
        struct inode            *inode,
        bool                    *retry)
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index a4f6f034fb81..bdc084cdbf46 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -3790,6 +3790,61 @@ xfs_iolock_two_inodes_and_break_layout(
        return 0;
 }
 
+static int
+xfs_mmaplock_two_inodes_and_break_dax_layout(
+       struct xfs_inode        *ip1,
+       struct xfs_inode        *ip2)
+{
+       int                     error, attempts = 0;
+       bool                    retry;
+       struct page             *page;
+       struct xfs_log_item     *lp;
+
+       if (ip1->i_ino > ip2->i_ino)
+               swap(ip1, ip2);
+
+again:
+       retry = false;
+       /* Lock the first inode */
+       xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
+       error = xfs_break_dax_layouts(VFS_I(ip1), &retry);
+       if (error || retry) {
+               xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
+               if (error == 0 && retry)
+                       goto again;
+               return error;
+       }
+
+       if (ip1 == ip2)
+               return 0;
+
+       /* Nested lock the second inode */
+       lp = &ip1->i_itemp->ili_item;
+       if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
+               if (!xfs_ilock_nowait(ip2,
+                   xfs_lock_inumorder(XFS_MMAPLOCK_EXCL, 1))) {
+                       xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
+                       if ((++attempts % 5) == 0)
+                               delay(1); /* Don't just spin the CPU */
+                       goto again;
+               }
+       } else
+               xfs_ilock(ip2, xfs_lock_inumorder(XFS_MMAPLOCK_EXCL, 1));
+       /*
+        * We cannot use xfs_break_dax_layouts() directly here because it may
+        * need to unlock & lock the XFS_MMAPLOCK_EXCL which is not suitable
+        * for this nested lock case.
+        */
+       page = dax_layout_busy_page(VFS_I(ip2)->i_mapping);
+       if (page && page_ref_count(page) != 1) {
+               xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
+               xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
+               goto again;
+       }
+
+       return 0;
+}
+
 /*
  * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
  * mmap activity.
@@ -3804,8 +3859,19 @@ xfs_ilock2_io_mmap(
        ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
        if (ret)
                return ret;
-       filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
-                                   VFS_I(ip2)->i_mapping);
+
+       if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
+               ret = xfs_mmaplock_two_inodes_and_break_dax_layout(ip1, ip2);
+               if (ret) {
+                       inode_unlock(VFS_I(ip2));
+                       if (ip1 != ip2)
+                               inode_unlock(VFS_I(ip1));
+                       return ret;
+               }
+       } else
+               filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
+                                           VFS_I(ip2)->i_mapping);
+
        return 0;
 }
 
@@ -3815,8 +3881,14 @@ xfs_iunlock2_io_mmap(
        struct xfs_inode        *ip1,
        struct xfs_inode        *ip2)
 {
-       filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
-                                     VFS_I(ip2)->i_mapping);
+       if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
+               xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
+               if (ip1 != ip2)
+                       xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
+       } else
+               filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
+                                             VFS_I(ip2)->i_mapping);
+
        inode_unlock(VFS_I(ip2));
        if (ip1 != ip2)
                inode_unlock(VFS_I(ip1));
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index b21b177832d1..f7e26fe31a26 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -472,6 +472,7 @@ enum xfs_prealloc_flags {
 
 int    xfs_update_prealloc_flags(struct xfs_inode *ip,
                                  enum xfs_prealloc_flags flags);
+int    xfs_break_dax_layouts(struct inode *inode, bool *retry);
 int    xfs_break_layouts(struct inode *inode, uint *iolock,
                enum layout_break_reason reason);
 
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 9d876e268734..3b99c9dfcf0d 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -1327,8 +1327,8 @@ xfs_reflink_remap_prep(
        if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest))
                goto out_unlock;
 
-       /* Don't share DAX file data for now. */
-       if (IS_DAX(inode_in) || IS_DAX(inode_out))
+       /* Don't share DAX file data with non-DAX file. */
+       if (IS_DAX(inode_in) != IS_DAX(inode_out))
                goto out_unlock;
 
        if (!IS_DAX(inode_in))
-- 
2.33.0




Reply via email to