On Tue, Apr 19, 2022 at 12:50:45PM +0800, Shiyang Ruan wrote:
> Introduce a PAGE_MAPPING_DAX_COW flag to support association with CoW file
> mappings.  In this case, since the dax-rmap has already took the
> responsibility to look up for shared files by given dax page,
> the page->mapping is no longer to used for rmap but for marking that
> this dax page is shared.  And to make sure disassociation works fine, we
> use page->index as refcount, and clear page->mapping to the initial
> state when page->index is decreased to 0.
> 
> With the help of this new flag, it is able to distinguish normal case
> and CoW case, and keep the warning in normal case.
> 
> ==
> PS: The @cow added for dax_associate_entry(), is used to let it know
> whether the entry is to be shared during iomap operation.  It is decided
> by iomap,srcmap's flag, and will be used in another patchset(
> fsdax,xfs: Add reflink&dedupe support for fsdax[1]).
> 
> In this patch, we set @cow always false for now.
> 
> [1] 
> https://lore.kernel.org/linux-xfs/20210928062311.4012070-1-ruansy.f...@fujitsu.com/
> ==
> 
> Signed-off-by: Shiyang Ruan <ruansy.f...@fujitsu.com>
> ---
>  fs/dax.c                   | 50 +++++++++++++++++++++++++++++++-------
>  include/linux/page-flags.h |  6 +++++
>  2 files changed, 47 insertions(+), 9 deletions(-)
> 
> diff --git a/fs/dax.c b/fs/dax.c
> index 57efd3f73655..4d3dfc8bee33 100644
> --- a/fs/dax.c
> +++ b/fs/dax.c
> @@ -334,13 +334,35 @@ static unsigned long dax_end_pfn(void *entry)
>       for (pfn = dax_to_pfn(entry); \
>                       pfn < dax_end_pfn(entry); pfn++)
>  
> +static inline bool dax_mapping_is_cow(struct address_space *mapping)
> +{
> +     return (unsigned long)mapping == PAGE_MAPPING_DAX_COW;
> +}
> +
>  /*
> - * TODO: for reflink+dax we need a way to associate a single page with
> - * multiple address_space instances at different linear_page_index()
> - * offsets.
> + * Set the page->mapping with FS_DAX_MAPPING_COW flag, increase the refcount.
> + */
> +static inline void dax_mapping_set_cow(struct page *page)
> +{
> +     if ((uintptr_t)page->mapping != PAGE_MAPPING_DAX_COW) {
> +             /*
> +              * Reset the index if the page was already mapped
> +              * regularly before.
> +              */
> +             if (page->mapping)
> +                     page->index = 1;
> +             page->mapping = (void *)PAGE_MAPPING_DAX_COW;
> +     }
> +     page->index++;
> +}
> +
> +/*
> + * When it is called in dax_insert_entry(), the cow flag will indicate that
> + * whether this entry is shared by multiple files.  If so, set the 
> page->mapping
> + * FS_DAX_MAPPING_COW, and use page->index as refcount.
>   */
>  static void dax_associate_entry(void *entry, struct address_space *mapping,
> -             struct vm_area_struct *vma, unsigned long address)
> +             struct vm_area_struct *vma, unsigned long address, bool cow)
>  {
>       unsigned long size = dax_entry_size(entry), pfn, index;
>       int i = 0;
> @@ -352,9 +374,13 @@ static void dax_associate_entry(void *entry, struct 
> address_space *mapping,
>       for_each_mapped_pfn(entry, pfn) {
>               struct page *page = pfn_to_page(pfn);
>  
> -             WARN_ON_ONCE(page->mapping);
> -             page->mapping = mapping;
> -             page->index = index + i++;
> +             if (cow) {
> +                     dax_mapping_set_cow(page);
> +             } else {
> +                     WARN_ON_ONCE(page->mapping);
> +                     page->mapping = mapping;
> +                     page->index = index + i++;
> +             }
>       }
>  }
>  
> @@ -370,7 +396,12 @@ static void dax_disassociate_entry(void *entry, struct 
> address_space *mapping,
>               struct page *page = pfn_to_page(pfn);
>  
>               WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
> -             WARN_ON_ONCE(page->mapping && page->mapping != mapping);
> +             if (dax_mapping_is_cow(page->mapping)) {
> +                     /* keep the CoW flag if this page is still shared */
> +                     if (page->index-- > 0)
> +                             continue;
> +             } else
> +                     WARN_ON_ONCE(page->mapping && page->mapping != mapping);
>               page->mapping = NULL;
>               page->index = 0;
>       }
> @@ -829,7 +860,8 @@ static void *dax_insert_entry(struct xa_state *xas,
>               void *old;
>  
>               dax_disassociate_entry(entry, mapping, false);
> -             dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
> +             dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
> +                             false);
>               /*
>                * Only swap our new entry into the page cache if the current
>                * entry is a zero page or an empty entry.  If a normal PTE or
> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
> index d725a2d17806..5b601e375773 100644
> --- a/include/linux/page-flags.h
> +++ b/include/linux/page-flags.h
> @@ -650,6 +650,12 @@ __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
>  #define PAGE_MAPPING_KSM     (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
>  #define PAGE_MAPPING_FLAGS   (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
>  
> +/*
> + * Different with flags above, this flag is used only for fsdax mode.  It
> + * indicates that this page->mapping is now under reflink case.
> + */
> +#define PAGE_MAPPING_DAX_COW 0x1

The logic looks sound enough, I guess.

Though I do wonder -- if this were defined like this:

#define PAGE_MAPPING_DAX_COW    ((struct address_space *)0x1)

Could you then avoid all uintptr_t/unsigned long casts above?

It's probably not worth holding up the whole patchset though, so
Reviewed-by: Darrick J. Wong <djw...@kernel.org>

--D

> +
>  static __always_inline int PageMappingFlags(struct page *page)
>  {
>       return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
> -- 
> 2.35.1
> 
> 
> 

Reply via email to