Introduce a PAGE_MAPPING_DAX_COW flag to support association with CoW file
mappings.  In this case, since the dax-rmap has already took the
responsibility to look up for shared files by given dax page,
the page->mapping is no longer to used for rmap but for marking that
this dax page is shared.  And to make sure disassociation works fine, we
use page->index as refcount, and clear page->mapping to the initial
state when page->index is decreased to 0.

With the help of this new flag, it is able to distinguish normal case
and CoW case, and keep the warning in normal case.

Signed-off-by: Shiyang Ruan <ruansy.f...@fujitsu.com>
Reviewed-by: Christoph Hellwig <h...@lst.de>
Reviewed-by: Darrick J. Wong <djw...@kernel.org>
---
 fs/dax.c                   | 50 +++++++++++++++++++++++++++++++-------
 include/linux/page-flags.h |  6 +++++
 2 files changed, 47 insertions(+), 9 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index 57efd3f73655..4d3dfc8bee33 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -334,13 +334,35 @@ static unsigned long dax_end_pfn(void *entry)
        for (pfn = dax_to_pfn(entry); \
                        pfn < dax_end_pfn(entry); pfn++)
 
+static inline bool dax_mapping_is_cow(struct address_space *mapping)
+{
+       return (unsigned long)mapping == PAGE_MAPPING_DAX_COW;
+}
+
 /*
- * TODO: for reflink+dax we need a way to associate a single page with
- * multiple address_space instances at different linear_page_index()
- * offsets.
+ * Set the page->mapping with FS_DAX_MAPPING_COW flag, increase the refcount.
+ */
+static inline void dax_mapping_set_cow(struct page *page)
+{
+       if ((uintptr_t)page->mapping != PAGE_MAPPING_DAX_COW) {
+               /*
+                * Reset the index if the page was already mapped
+                * regularly before.
+                */
+               if (page->mapping)
+                       page->index = 1;
+               page->mapping = (void *)PAGE_MAPPING_DAX_COW;
+       }
+       page->index++;
+}
+
+/*
+ * When it is called in dax_insert_entry(), the cow flag will indicate that
+ * whether this entry is shared by multiple files.  If so, set the 
page->mapping
+ * FS_DAX_MAPPING_COW, and use page->index as refcount.
  */
 static void dax_associate_entry(void *entry, struct address_space *mapping,
-               struct vm_area_struct *vma, unsigned long address)
+               struct vm_area_struct *vma, unsigned long address, bool cow)
 {
        unsigned long size = dax_entry_size(entry), pfn, index;
        int i = 0;
@@ -352,9 +374,13 @@ static void dax_associate_entry(void *entry, struct 
address_space *mapping,
        for_each_mapped_pfn(entry, pfn) {
                struct page *page = pfn_to_page(pfn);
 
-               WARN_ON_ONCE(page->mapping);
-               page->mapping = mapping;
-               page->index = index + i++;
+               if (cow) {
+                       dax_mapping_set_cow(page);
+               } else {
+                       WARN_ON_ONCE(page->mapping);
+                       page->mapping = mapping;
+                       page->index = index + i++;
+               }
        }
 }
 
@@ -370,7 +396,12 @@ static void dax_disassociate_entry(void *entry, struct 
address_space *mapping,
                struct page *page = pfn_to_page(pfn);
 
                WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
-               WARN_ON_ONCE(page->mapping && page->mapping != mapping);
+               if (dax_mapping_is_cow(page->mapping)) {
+                       /* keep the CoW flag if this page is still shared */
+                       if (page->index-- > 0)
+                               continue;
+               } else
+                       WARN_ON_ONCE(page->mapping && page->mapping != mapping);
                page->mapping = NULL;
                page->index = 0;
        }
@@ -829,7 +860,8 @@ static void *dax_insert_entry(struct xa_state *xas,
                void *old;
 
                dax_disassociate_entry(entry, mapping, false);
-               dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
+               dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
+                               false);
                /*
                 * Only swap our new entry into the page cache if the current
                 * entry is a zero page or an empty entry.  If a normal PTE or
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index fe47ee8dc258..cad9aeb5e75c 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -650,6 +650,12 @@ __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
 #define PAGE_MAPPING_KSM       (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
 #define PAGE_MAPPING_FLAGS     (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
 
+/*
+ * Different with flags above, this flag is used only for fsdax mode.  It
+ * indicates that this page->mapping is now under reflink case.
+ */
+#define PAGE_MAPPING_DAX_COW   0x1
+
 static __always_inline int PageMappingFlags(struct page *page)
 {
        return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
-- 
2.35.1




Reply via email to