Re: [PATCH V7 08/18] x86/entry: Preserve PKRS MSR across exceptions

2021-12-02 Thread Andy Lutomirski

On 11/12/21 16:50, Ira Weiny wrote:

On Tue, Aug 03, 2021 at 09:32:21PM -0700, 'Ira Weiny' wrote:

From: Ira Weiny 

The PKRS MSR is not managed by XSAVE.  It is preserved through a context
switch but this support leaves exception handling code open to memory
accesses during exceptions.

2 possible places for preserving this state were considered,
irqentry_state_t or pt_regs.[1]  pt_regs was much more complicated and
was potentially fraught with unintended consequences.[2]  However, Andy
came up with a way to hide additional values on the stack which could be
accessed as "extended_pt_regs".[3]


Andy,

I'm preparing to send V8 of this PKS work.  But I have not seen any feed back
since I originally implemented this in V4[1].

Does this meets your expectations?  Are there any issues you can see with this
code?


I think I'm generally okay with the approach to allocating space.  All 
of Thomas' comments still apply, though.  (Sorry, I'm horribly behind.)




[PATCH v8 7/9] dax: add dax holder helper for filesystems

2021-12-02 Thread Shiyang Ruan
Add these helper functions, and export them for filesystem use.

Signed-off-by: Shiyang Ruan 
---
 drivers/dax/super.c | 19 +++
 include/linux/dax.h | 15 +++
 2 files changed, 34 insertions(+)

diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index a19fcc0a54f3..acbe7078ce4c 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -114,6 +114,25 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device 
*bdev, u64 *start_off)
return dax_dev;
 }
 EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
+
+void fs_dax_register_holder(struct dax_device *dax_dev, void *holder,
+   const struct dax_holder_operations *ops)
+{
+   dax_set_holder(dax_dev, holder, ops);
+}
+EXPORT_SYMBOL_GPL(fs_dax_register_holder);
+
+void fs_dax_unregister_holder(struct dax_device *dax_dev)
+{
+   dax_set_holder(dax_dev, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(fs_dax_unregister_holder);
+
+void *fs_dax_get_holder(struct dax_device *dax_dev)
+{
+   return dax_get_holder(dax_dev);
+}
+EXPORT_SYMBOL_GPL(fs_dax_get_holder);
 #endif /* CONFIG_BLOCK && CONFIG_FS_DAX */
 
 enum dax_device_flags {
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 500d048d444e..15a0ad4c248d 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -140,6 +140,10 @@ static inline void fs_put_dax(struct dax_device *dax_dev)
 {
put_dax(dax_dev);
 }
+void fs_dax_register_holder(struct dax_device *dax_dev, void *holder,
+   const struct dax_holder_operations *ops);
+void fs_dax_unregister_holder(struct dax_device *dax_dev);
+void *fs_dax_get_holder(struct dax_device *dax_dev);
 #else
 static inline int dax_add_host(struct dax_device *dax_dev, struct gendisk 
*disk)
 {
@@ -156,6 +160,17 @@ static inline struct dax_device *fs_dax_get_by_bdev(struct 
block_device *bdev,
 static inline void fs_put_dax(struct dax_device *dax_dev)
 {
 }
+static inline void fs_dax_register_holder(struct dax_device *dax_dev,
+   void *holder, const struct dax_holder_operations *ops)
+{
+}
+static inline void fs_dax_unregister_holder(struct dax_device *dax_dev)
+{
+}
+static inline void *fs_dax_get_holder(struct dax_device *dax_dev)
+{
+   return NULL;
+}
 #endif /* CONFIG_BLOCK && CONFIG_FS_DAX */
 
 #if IS_ENABLED(CONFIG_FS_DAX)
-- 
2.34.0






[PATCH v8 6/9] mm: Introduce mf_dax_kill_procs() for fsdax case

2021-12-02 Thread Shiyang Ruan
This function is called at the end of RMAP routine, i.e. filesystem
recovery function, to collect and kill processes using a shared page of
DAX file.  The difference between mf_generic_kill_procs() is,
it accepts file's mapping,offset instead of struct page.  Because
different file's mappings and offsets may share the same page in fsdax
mode.  So, it is called when filesystem RMAP results are found.

Signed-off-by: Shiyang Ruan 
---
 fs/dax.c| 10 --
 include/linux/dax.h |  9 +
 include/linux/mm.h  |  2 ++
 mm/memory-failure.c | 83 -
 4 files changed, 86 insertions(+), 18 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index b3c737aff9de..66366ba83ffc 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -853,16 +853,6 @@ static void *dax_insert_entry(struct xa_state *xas,
return entry;
 }
 
-static inline
-unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
-{
-   unsigned long address;
-
-   address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
-   VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
-   return address;
-}
-
 /* Walk all mappings of a given index of a file and writeprotect them */
 static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
unsigned long pfn)
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 7e75d2c45f78..500d048d444e 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -254,6 +254,15 @@ static inline bool dax_mapping(struct address_space 
*mapping)
 {
return mapping->host && IS_DAX(mapping->host);
 }
+static inline unsigned long pgoff_address(pgoff_t pgoff,
+   struct vm_area_struct *vma)
+{
+   unsigned long address;
+
+   address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+   VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
+   return address;
+}
 
 #ifdef CONFIG_DEV_DAX_HMEM_DEVICES
 void hmem_register_device(int target_nid, struct resource *r);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a7e4a9e7d807..8a48097d5fb8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3221,6 +3221,8 @@ enum mf_flags {
MF_MUST_KILL = 1 << 2,
MF_SOFT_OFFLINE = 1 << 3,
 };
+extern int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
+unsigned long count, int mf_flags);
 extern int memory_failure(unsigned long pfn, int flags);
 extern void memory_failure_queue(unsigned long pfn, int flags);
 extern void memory_failure_queue_kick(int cpu);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 3cc612b29f89..0daab29b 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -303,10 +303,9 @@ void shake_page(struct page *p)
 }
 EXPORT_SYMBOL_GPL(shake_page);
 
-static unsigned long dev_pagemap_mapping_shift(struct page *page,
+static unsigned long dev_pagemap_mapping_shift(unsigned long address,
struct vm_area_struct *vma)
 {
-   unsigned long address = vma_address(page, vma);
unsigned long ret = 0;
pgd_t *pgd;
p4d_t *p4d;
@@ -346,7 +345,7 @@ static unsigned long dev_pagemap_mapping_shift(struct page 
*page,
  * Schedule a process for later kill.
  * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
  */
-static void add_to_kill(struct task_struct *tsk, struct page *p,
+static void add_to_kill(struct task_struct *tsk, struct page *p, pgoff_t pgoff,
   struct vm_area_struct *vma,
   struct list_head *to_kill)
 {
@@ -359,9 +358,15 @@ static void add_to_kill(struct task_struct *tsk, struct 
page *p,
}
 
tk->addr = page_address_in_vma(p, vma);
-   if (is_zone_device_page(p))
-   tk->size_shift = dev_pagemap_mapping_shift(p, vma);
-   else
+   if (is_zone_device_page(p)) {
+   /*
+* Since page->mapping is no more used for fsdax, we should
+* calculate the address in a fsdax way.
+*/
+   if (p->pgmap->type == MEMORY_DEVICE_FS_DAX)
+   tk->addr = pgoff_address(pgoff, vma);
+   tk->size_shift = dev_pagemap_mapping_shift(tk->addr, vma);
+   } else
tk->size_shift = page_shift(compound_head(p));
 
/*
@@ -509,7 +514,7 @@ static void collect_procs_anon(struct page *page, struct 
list_head *to_kill,
if (!page_mapped_in_vma(page, vma))
continue;
if (vma->vm_mm == t->mm)
-   add_to_kill(t, page, vma, to_kill);
+   add_to_kill(t, page, 0, vma, to_kill);
}
}
read_unlock(_lock);
@@ -545,7 +550,32 @@ static void collect_procs_file(struct page *page, struct 
list_head *to_kill,
 * to be informed of all such data 

[PATCH v8 4/9] pagemap,pmem: Introduce ->memory_failure()

2021-12-02 Thread Shiyang Ruan
When memory-failure occurs, we call this function which is implemented
by each kind of devices.  For the fsdax case, pmem device driver
implements it.  Pmem device driver will find out the filesystem in which
the corrupted page located in.

With dax_holder notify support, we are able to notify the memory failure
from pmem driver to upper layers.  If there is something not support in
the notify routine, memory_failure will fall back to the generic hanlder.

Signed-off-by: Shiyang Ruan 
Reviewed-by: Christoph Hellwig 
---
 drivers/nvdimm/pmem.c| 16 
 include/linux/memremap.h |  9 +
 mm/memory-failure.c  | 14 ++
 3 files changed, 39 insertions(+)

diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 4190c8c46ca8..2114554358eb 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -386,6 +386,20 @@ static void pmem_release_disk(void *__pmem)
blk_cleanup_disk(pmem->disk);
 }
 
+static int pmem_pagemap_memory_failure(struct dev_pagemap *pgmap,
+   unsigned long pfn, u64 len, int mf_flags)
+{
+   struct pmem_device *pmem =
+   container_of(pgmap, struct pmem_device, pgmap);
+   loff_t offset = PFN_PHYS(pfn) - pmem->phys_addr - pmem->data_offset;
+
+   return dax_holder_notify_failure(pmem->dax_dev, offset, len, mf_flags);
+}
+
+static const struct dev_pagemap_ops fsdax_pagemap_ops = {
+   .memory_failure = pmem_pagemap_memory_failure,
+};
+
 static int pmem_attach_disk(struct device *dev,
struct nd_namespace_common *ndns)
 {
@@ -448,6 +462,7 @@ static int pmem_attach_disk(struct device *dev,
pmem->pfn_flags = PFN_DEV;
if (is_nd_pfn(dev)) {
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
+   pmem->pgmap.ops = _pagemap_ops;
addr = devm_memremap_pages(dev, >pgmap);
pfn_sb = nd_pfn->pfn_sb;
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
@@ -461,6 +476,7 @@ static int pmem_attach_disk(struct device *dev,
pmem->pgmap.range.end = res->end;
pmem->pgmap.nr_range = 1;
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
+   pmem->pgmap.ops = _pagemap_ops;
addr = devm_memremap_pages(dev, >pgmap);
pmem->pfn_flags |= PFN_MAP;
bb_range = pmem->pgmap.range;
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index c0e9d35889e8..820c2f33b163 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -87,6 +87,15 @@ struct dev_pagemap_ops {
 * the page back to a CPU accessible page.
 */
vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
+
+   /*
+* Handle the memory failure happens on a range of pfns.  Notify the
+* processes who are using these pfns, and try to recover the data on
+* them if necessary.  The mf_flags is finally passed to the recover
+* function through the whole notify routine.
+*/
+   int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn,
+ u64 len, int mf_flags);
 };
 
 #define PGMAP_ALTMAP_VALID (1 << 0)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 1ee7d626fed7..3cc612b29f89 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1625,6 +1625,20 @@ static int memory_failure_dev_pagemap(unsigned long pfn, 
int flags,
if (!pgmap_pfn_valid(pgmap, pfn))
goto out;
 
+   /*
+* Call driver's implementation to handle the memory failure, otherwise
+* fall back to generic handler.
+*/
+   if (pgmap->ops->memory_failure) {
+   rc = pgmap->ops->memory_failure(pgmap, pfn, PAGE_SIZE, flags);
+   /*
+* Fall back to generic handler too if operation is not
+* supported inside the driver/device/filesystem.
+*/
+   if (rc != -EOPNOTSUPP)
+   goto out;
+   }
+
rc = mf_generic_kill_procs(pfn, flags, pgmap);
 out:
/* drop pgmap ref acquired in caller */
-- 
2.34.0






[PATCH v8 3/9] mm: factor helpers for memory_failure_dev_pagemap

2021-12-02 Thread Shiyang Ruan
memory_failure_dev_pagemap code is a bit complex before introduce RMAP
feature for fsdax.  So it is needed to factor some helper functions to
simplify these code.

Signed-off-by: Shiyang Ruan 
Reviewed-by: Darrick J. Wong 
Reviewed-by: Christoph Hellwig 
---
 mm/memory-failure.c | 141 
 1 file changed, 77 insertions(+), 64 deletions(-)

diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 07c875fdeaf0..1ee7d626fed7 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1449,6 +1449,80 @@ static int try_to_split_thp_page(struct page *page, 
const char *msg)
return 0;
 }
 
+static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
+   struct address_space *mapping, pgoff_t index, int flags)
+{
+   struct to_kill *tk;
+   unsigned long size = 0;
+
+   list_for_each_entry(tk, to_kill, nd)
+   if (tk->size_shift)
+   size = max(size, 1UL << tk->size_shift);
+
+   if (size) {
+   /*
+* Unmap the largest mapping to avoid breaking up device-dax
+* mappings which are constant size. The actual size of the
+* mapping being torn down is communicated in siginfo, see
+* kill_proc()
+*/
+   loff_t start = (index << PAGE_SHIFT) & ~(size - 1);
+
+   unmap_mapping_range(mapping, start, size, 0);
+   }
+
+   kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
+}
+
+static int mf_generic_kill_procs(unsigned long long pfn, int flags,
+   struct dev_pagemap *pgmap)
+{
+   struct page *page = pfn_to_page(pfn);
+   LIST_HEAD(to_kill);
+   dax_entry_t cookie;
+
+   /*
+* Prevent the inode from being freed while we are interrogating
+* the address_space, typically this would be handled by
+* lock_page(), but dax pages do not use the page lock. This
+* also prevents changes to the mapping of this pfn until
+* poison signaling is complete.
+*/
+   cookie = dax_lock_page(page);
+   if (!cookie)
+   return -EBUSY;
+
+   if (hwpoison_filter(page))
+   return 0;
+
+   if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+   /*
+* TODO: Handle HMM pages which may need coordination
+* with device-side memory.
+*/
+   return -EBUSY;
+   }
+
+   /*
+* Use this flag as an indication that the dax page has been
+* remapped UC to prevent speculative consumption of poison.
+*/
+   SetPageHWPoison(page);
+
+   /*
+* Unlike System-RAM there is no possibility to swap in a
+* different physical page at a given virtual address, so all
+* userspace consumption of ZONE_DEVICE memory necessitates
+* SIGBUS (i.e. MF_MUST_KILL)
+*/
+   flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
+   collect_procs(page, _kill, true);
+
+   unmap_and_kill(_kill, pfn, page->mapping, page->index, flags);
+   dax_unlock_page(page, cookie);
+   return 0;
+}
+
 static int memory_failure_hugetlb(unsigned long pfn, int flags)
 {
struct page *p = pfn_to_page(pfn);
@@ -1538,12 +1612,8 @@ static int memory_failure_dev_pagemap(unsigned long pfn, 
int flags,
struct dev_pagemap *pgmap)
 {
struct page *page = pfn_to_page(pfn);
-   unsigned long size = 0;
-   struct to_kill *tk;
LIST_HEAD(tokill);
-   int rc = -EBUSY;
-   loff_t start;
-   dax_entry_t cookie;
+   int rc = -ENXIO;
 
if (flags & MF_COUNT_INCREASED)
/*
@@ -1552,67 +1622,10 @@ static int memory_failure_dev_pagemap(unsigned long 
pfn, int flags,
put_page(page);
 
/* device metadata space is not recoverable */
-   if (!pgmap_pfn_valid(pgmap, pfn)) {
-   rc = -ENXIO;
-   goto out;
-   }
-
-   /*
-* Prevent the inode from being freed while we are interrogating
-* the address_space, typically this would be handled by
-* lock_page(), but dax pages do not use the page lock. This
-* also prevents changes to the mapping of this pfn until
-* poison signaling is complete.
-*/
-   cookie = dax_lock_page(page);
-   if (!cookie)
+   if (!pgmap_pfn_valid(pgmap, pfn))
goto out;
 
-   if (hwpoison_filter(page)) {
-   rc = 0;
-   goto unlock;
-   }
-
-   if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
-   /*
-* TODO: Handle HMM pages which may need coordination
-* with device-side memory.
-*/
-   goto unlock;
-   }
-
-   /*
-* Use this flag as an indication that the dax page has been
-* remapped UC to prevent speculative consumption of poison.
-

[PATCH v8 1/9] dax: Use percpu rwsem for dax_{read,write}_lock()

2021-12-02 Thread Shiyang Ruan
In order to introduce dax holder registration, we need a write lock for
dax.  Change the current lock to percpu_rw_semaphore and introduce a
write lock for registration.

Signed-off-by: Shiyang Ruan 
---
 drivers/dax/device.c   | 11 +--
 drivers/dax/super.c| 40 +-
 drivers/md/dm-writecache.c |  7 +++
 fs/dax.c   | 31 ++---
 fs/fuse/dax.c  |  6 +++---
 include/linux/dax.h|  9 -
 6 files changed, 57 insertions(+), 47 deletions(-)

diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index dd8222a42808..041345f9956d 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -198,7 +198,6 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
struct file *filp = vmf->vma->vm_file;
unsigned long fault_size;
vm_fault_t rc = VM_FAULT_SIGBUS;
-   int id;
pfn_t pfn;
struct dev_dax *dev_dax = filp->private_data;
 
@@ -206,7 +205,7 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
(vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read",
vmf->vma->vm_start, vmf->vma->vm_end, pe_size);
 
-   id = dax_read_lock();
+   dax_read_lock(dev_dax->dax_dev);
switch (pe_size) {
case PE_SIZE_PTE:
fault_size = PAGE_SIZE;
@@ -246,7 +245,7 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
page->index = pgoff + i;
}
}
-   dax_read_unlock(id);
+   dax_read_unlock(dev_dax->dax_dev);
 
return rc;
 }
@@ -284,7 +283,7 @@ static const struct vm_operations_struct dax_vm_ops = {
 static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
 {
struct dev_dax *dev_dax = filp->private_data;
-   int rc, id;
+   int rc;
 
dev_dbg(_dax->dev, "trace\n");
 
@@ -292,9 +291,9 @@ static int dax_mmap(struct file *filp, struct 
vm_area_struct *vma)
 * We lock to check dax_dev liveness and will re-check at
 * fault time.
 */
-   id = dax_read_lock();
+   dax_read_lock(dev_dax->dax_dev);
rc = check_vma(dev_dax, vma, __func__);
-   dax_read_unlock(id);
+   dax_read_unlock(dev_dax->dax_dev);
if (rc)
return rc;
 
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index e7152a6c4cc4..719e77b2c2d4 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -26,29 +26,39 @@ struct dax_device {
struct inode inode;
struct cdev cdev;
void *private;
+   struct percpu_rw_semaphore rwsem;
unsigned long flags;
const struct dax_operations *ops;
 };
 
 static dev_t dax_devt;
-DEFINE_STATIC_SRCU(dax_srcu);
 static struct vfsmount *dax_mnt;
 static DEFINE_IDA(dax_minor_ida);
 static struct kmem_cache *dax_cache __read_mostly;
 static struct super_block *dax_superblock __read_mostly;
 
-int dax_read_lock(void)
+void dax_read_lock(struct dax_device *dax_dev)
 {
-   return srcu_read_lock(_srcu);
+   percpu_down_read(_dev->rwsem);
 }
 EXPORT_SYMBOL_GPL(dax_read_lock);
 
-void dax_read_unlock(int id)
+void dax_read_unlock(struct dax_device *dax_dev)
 {
-   srcu_read_unlock(_srcu, id);
+   percpu_up_read(_dev->rwsem);
 }
 EXPORT_SYMBOL_GPL(dax_read_unlock);
 
+void dax_write_lock(struct dax_device *dax_dev)
+{
+   percpu_down_write(_dev->rwsem);
+}
+
+void dax_write_unlock(struct dax_device *dax_dev)
+{
+   percpu_up_write(_dev->rwsem);
+}
+
 #if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX)
 #include 
 
@@ -75,7 +85,7 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device 
*bdev, u64 *start_off)
 {
struct dax_device *dax_dev;
u64 part_size;
-   int id;
+   bool not_found;
 
if (!blk_queue_dax(bdev->bd_disk->queue))
return NULL;
@@ -87,11 +97,14 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device 
*bdev, u64 *start_off)
return NULL;
}
 
-   id = dax_read_lock();
dax_dev = xa_load(_hosts, (unsigned long)bdev->bd_disk);
-   if (!dax_dev || !dax_alive(dax_dev) || !igrab(_dev->inode))
-   dax_dev = NULL;
-   dax_read_unlock(id);
+   if (dax_dev) {
+   dax_read_lock(dax_dev);
+   not_found = !dax_alive(dax_dev) || !igrab(_dev->inode);
+   dax_read_unlock(dax_dev);
+   if (not_found)
+   dax_dev = NULL;
+   }
 
return dax_dev;
 }
@@ -222,7 +235,7 @@ EXPORT_SYMBOL_GPL(__set_dax_synchronous);
 
 bool dax_alive(struct dax_device *dax_dev)
 {
-   lockdep_assert_held(_srcu);
+   lockdep_assert_held(_dev->rwsem);
return test_bit(DAXDEV_ALIVE, _dev->flags);
 }
 EXPORT_SYMBOL_GPL(dax_alive);
@@ -237,9 +250,9 @@ void kill_dax(struct dax_device *dax_dev)
 {
if (!dax_dev)
return;
-
+   dax_write_lock(dax_dev);

[RESEND PATCH v8 0/9] fsdax: introduce fs query to support reflink

2021-12-02 Thread Shiyang Ruan
This patchset is aimed to support shared pages tracking for fsdax.

Christoph has posted "decouple DAX from block devices v2", I need to
rebase to his tree.  And since my v8 patchset sent before hasn't been
reviewed yet.  So, I send this patchset as a RESEND of v8.

Changes from V8:
  - Rebased to "decouple DAX from block devices v2"
  - Patch8(implementation in XFS): Separate dax part to Patch7
  - Patch9: add FS_DAX_MAPPING_COW flag to distinguish CoW with normal

Changes from V7:
  - Change dax lock from global rwsem to per-device percpu_rwsem
  - Change type of range length from size_t to u64
  - Rename 'flags' to 'mf_flags'
  - Fix mistakes in XFS code
  - Add cow branch for dax_assocaite_entry()

This patchset moves owner tracking from dax_assocaite_entry() to pmem
device driver, by introducing an interface ->memory_failure() for struct
pagemap.  This interface is called by memory_failure() in mm, and
implemented by pmem device.

Then call holder operations to find the filesystem which the corrupted
data located in, and call filesystem handler to track files or metadata
associated with this page.

Finally we are able to try to fix the corrupted data in filesystem and
do other necessary processing, such as killing processes who are using
the files affected.

The call trace is like this:
memory_failure()
|* fsdax case
|
|pgmap->ops->memory_failure()  => pmem_pgmap_memory_failure()
| dax_holder_notify_failure()  =>
|  dax_device->holder_ops->notify_failure() =>
| - xfs_dax_notify_failure()
|  |* xfs_dax_notify_failure()
|  |--
|  |   xfs_rmap_query_range()
|  |xfs_dax_notify_failure_fn()
|  |* corrupted on metadata
|  |   try to recover data, call xfs_force_shutdown()
|  |* corrupted on file data
|  |   try to recover data, call mf_dax_kill_procs()
|* normal case
|-
|mf_generic_kill_procs()

==
Shiyang Ruan (9):
  dax: Use percpu rwsem for dax_{read,write}_lock()
  dax: Introduce holder for dax_device
  mm: factor helpers for memory_failure_dev_pagemap
  pagemap,pmem: Introduce ->memory_failure()
  fsdax: Introduce dax_lock_mapping_entry()
  mm: Introduce mf_dax_kill_procs() for fsdax case
  dax: add dax holder helper for filesystems
  xfs: Implement ->notify_failure() for XFS
  fsdax: set a CoW flag when associate reflink mappings

 drivers/dax/device.c|  11 +-
 drivers/dax/super.c | 120 ---
 drivers/md/dm-writecache.c  |   7 +-
 drivers/nvdimm/pmem.c   |  16 +++
 fs/dax.c| 172 +--
 fs/fuse/dax.c   |   6 +-
 fs/xfs/Makefile |   1 +
 fs/xfs/xfs_buf.c|   4 +
 fs/xfs/xfs_fsops.c  |   3 +
 fs/xfs/xfs_mount.h  |   1 +
 fs/xfs/xfs_notify_failure.c | 224 +++
 fs/xfs/xfs_notify_failure.h |  15 +++
 include/linux/dax.h |  73 +++-
 include/linux/memremap.h|   9 ++
 include/linux/mm.h  |   2 +
 mm/memory-failure.c | 226 +---
 16 files changed, 757 insertions(+), 133 deletions(-)
 create mode 100644 fs/xfs/xfs_notify_failure.c
 create mode 100644 fs/xfs/xfs_notify_failure.h

-- 
2.34.0






[PATCH v8 2/9] dax: Introduce holder for dax_device

2021-12-02 Thread Shiyang Ruan
To easily track filesystem from a pmem device, we introduce a holder for
dax_device structure, and also its operation.  This holder is used to
remember who is using this dax_device:
 - When it is the backend of a filesystem, the holder will be the
   superblock of this filesystem.
 - When this pmem device is one of the targets in a mapped device, the
   holder will be this mapped device.  In this case, the mapped device
   has its own dax_device and it will follow the first rule.  So that we
   can finally track to the filesystem we needed.

The holder and holder_ops will be set when filesystem is being mounted,
or an target device is being activated.

Signed-off-by: Shiyang Ruan 
---
 drivers/dax/super.c | 61 +
 include/linux/dax.h | 25 +++
 2 files changed, 86 insertions(+)

diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 719e77b2c2d4..a19fcc0a54f3 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -20,15 +20,20 @@
  * @inode: core vfs
  * @cdev: optional character interface for "device dax"
  * @private: dax driver private data
+ * @holder_data: holder of a dax_device: could be filesystem or mapped device
  * @flags: state and boolean properties
+ * @ops: operations for dax_device
+ * @holder_ops: operations for the inner holder
  */
 struct dax_device {
struct inode inode;
struct cdev cdev;
void *private;
struct percpu_rw_semaphore rwsem;
+   void *holder_data;
unsigned long flags;
const struct dax_operations *ops;
+   const struct dax_holder_operations *holder_ops;
 };
 
 static dev_t dax_devt;
@@ -190,6 +195,29 @@ int dax_zero_page_range(struct dax_device *dax_dev, 
pgoff_t pgoff,
 }
 EXPORT_SYMBOL_GPL(dax_zero_page_range);
 
+int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off,
+ u64 len, int mf_flags)
+{
+   int rc;
+
+   dax_read_lock(dax_dev);
+   if (!dax_alive(dax_dev)) {
+   rc = -ENXIO;
+   goto out;
+   }
+
+   if (!dax_dev->holder_ops) {
+   rc = -EOPNOTSUPP;
+   goto out;
+   }
+
+   rc = dax_dev->holder_ops->notify_failure(dax_dev, off, len, mf_flags);
+out:
+   dax_read_unlock(dax_dev);
+   return rc;
+}
+EXPORT_SYMBOL_GPL(dax_holder_notify_failure);
+
 #ifdef CONFIG_ARCH_HAS_PMEM_API
 void arch_wb_cache_pmem(void *addr, size_t size);
 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
@@ -252,6 +280,10 @@ void kill_dax(struct dax_device *dax_dev)
return;
dax_write_lock(dax_dev);
clear_bit(DAXDEV_ALIVE, _dev->flags);
+
+   /* clear holder data */
+   dax_dev->holder_ops = NULL;
+   dax_dev->holder_data = NULL;
dax_write_unlock(dax_dev);
 }
 EXPORT_SYMBOL_GPL(kill_dax);
@@ -399,6 +431,35 @@ void put_dax(struct dax_device *dax_dev)
 }
 EXPORT_SYMBOL_GPL(put_dax);
 
+void dax_set_holder(struct dax_device *dax_dev, void *holder,
+   const struct dax_holder_operations *ops)
+{
+   dax_write_lock(dax_dev);
+   if (!dax_alive(dax_dev))
+   goto out;
+
+   dax_dev->holder_data = holder;
+   dax_dev->holder_ops = ops;
+out:
+   dax_write_unlock(dax_dev);
+}
+EXPORT_SYMBOL_GPL(dax_set_holder);
+
+void *dax_get_holder(struct dax_device *dax_dev)
+{
+   void *holder = NULL;
+
+   dax_read_lock(dax_dev);
+   if (!dax_alive(dax_dev))
+   goto out;
+
+   holder = dax_dev->holder_data;
+out:
+   dax_read_unlock(dax_dev);
+   return holder;
+}
+EXPORT_SYMBOL_GPL(dax_get_holder);
+
 /**
  * inode_dax: convert a public inode into its dax_dev
  * @inode: An inode with i_cdev pointing to a dax_dev
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 8414a08dcbea..f01684a63447 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -44,6 +44,21 @@ struct dax_operations {
 #if IS_ENABLED(CONFIG_DAX)
 struct dax_device *alloc_dax(void *private, const struct dax_operations *ops,
unsigned long flags);
+struct dax_holder_operations {
+   /*
+* notify_failure - notify memory failure into inner holder device
+* @dax_dev: the dax device which contains the holder
+* @offset: offset on this dax device where memory failure occurs
+* @len: length of this memory failure event
+* @flags: action flags for memory failure handler
+*/
+   int (*notify_failure)(struct dax_device *dax_dev, u64 offset,
+   u64 len, int mf_flags);
+};
+
+void dax_set_holder(struct dax_device *dax_dev, void *holder,
+   const struct dax_holder_operations *ops);
+void *dax_get_holder(struct dax_device *dax_dev);
 void put_dax(struct dax_device *dax_dev);
 void kill_dax(struct dax_device *dax_dev);
 void dax_write_cache(struct dax_device *dax_dev, bool wc);
@@ -71,6 +86,14 @@ static inline bool daxdev_mapping_supported(struct 

[PATCH v8 8/9] xfs: Implement ->notify_failure() for XFS

2021-12-02 Thread Shiyang Ruan
Introduce xfs_notify_failure.c to handle failure related works, such as
implement ->notify_failure(), register/unregister dax holder in xfs, and
so on.

If the rmap feature of XFS enabled, we can query it to find files and
metadata which are associated with the corrupt data.  For now all we do
is kill processes with that file mapped into their address spaces, but
future patches could actually do something about corrupt metadata.

After that, the memory failure needs to notify the processes who are
using those files.

Signed-off-by: Shiyang Ruan 
---
 fs/xfs/Makefile |   1 +
 fs/xfs/xfs_buf.c|   4 +
 fs/xfs/xfs_fsops.c  |   3 +
 fs/xfs/xfs_mount.h  |   1 +
 fs/xfs/xfs_notify_failure.c | 224 
 fs/xfs/xfs_notify_failure.h |  15 +++
 6 files changed, 248 insertions(+)
 create mode 100644 fs/xfs/xfs_notify_failure.c
 create mode 100644 fs/xfs/xfs_notify_failure.h

diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 04611a1068b4..389970b3e13b 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -84,6 +84,7 @@ xfs-y += xfs_aops.o \
   xfs_message.o \
   xfs_mount.o \
   xfs_mru_cache.o \
+  xfs_notify_failure.o \
   xfs_pwork.o \
   xfs_reflink.o \
   xfs_stats.o \
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index bbb0fbd34e64..40a8916cbbcb 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -19,6 +19,7 @@
 #include "xfs_errortag.h"
 #include "xfs_error.h"
 #include "xfs_ag.h"
+#include "xfs_notify_failure.h"
 
 static struct kmem_cache *xfs_buf_cache;
 
@@ -1892,6 +1893,7 @@ xfs_free_buftarg(
list_lru_destroy(>bt_lru);
 
blkdev_issue_flush(btp->bt_bdev);
+   xfs_notify_failure_unregister(btp->bt_daxdev);
fs_put_dax(btp->bt_daxdev);
 
kmem_free(btp);
@@ -1947,6 +1949,8 @@ xfs_alloc_buftarg(
btp->bt_bdev = bdev;
btp->bt_daxdev = fs_dax_get_by_bdev(bdev, >bt_dax_part_off);
 
+   xfs_notify_failure_register(mp, btp->bt_daxdev);
+
/*
 * Buffer IO error rate limiting. Limit it to no more than 10 messages
 * per 30 seconds so as to not spam logs too much on repeated errors.
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 33e26690a8c4..4c2d3d4ca5a5 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -542,6 +542,9 @@ xfs_do_force_shutdown(
} else if (flags & SHUTDOWN_CORRUPT_INCORE) {
tag = XFS_PTAG_SHUTDOWN_CORRUPT;
why = "Corruption of in-memory data";
+   } else if (flags & SHUTDOWN_CORRUPT_META) {
+   tag = XFS_PTAG_SHUTDOWN_CORRUPT;
+   why = "Corruption of on-disk metadata";
} else {
tag = XFS_PTAG_SHUTDOWN_IOERROR;
why = "Metadata I/O Error";
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 00720a02e761..7812de2c00a7 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -435,6 +435,7 @@ void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, 
char *fname,
 #define SHUTDOWN_LOG_IO_ERROR  0x0002  /* write attempt to the log failed */
 #define SHUTDOWN_FORCE_UMOUNT  0x0004  /* shutdown from a forced unmount */
 #define SHUTDOWN_CORRUPT_INCORE0x0008  /* corrupt in-memory data 
structures */
+#define SHUTDOWN_CORRUPT_META  0x0010  /* corrupt metadata on device */
 
 #define XFS_SHUTDOWN_STRINGS \
{ SHUTDOWN_META_IO_ERROR,   "metadata_io" }, \
diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c
new file mode 100644
index ..0c868f89ca3e
--- /dev/null
+++ b/fs/xfs/xfs_notify_failure.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 Fujitsu.  All Rights Reserved.
+ */
+
+#include "xfs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_alloc.h"
+#include "xfs_bit.h"
+#include "xfs_btree.h"
+#include "xfs_inode.h"
+#include "xfs_icache.h"
+#include "xfs_rmap.h"
+#include "xfs_rmap_btree.h"
+#include "xfs_rtalloc.h"
+#include "xfs_trans.h"
+
+#include 
+#include 
+
+struct failure_info {
+   xfs_agblock_t   startblock;
+   xfs_filblks_t   blockcount;
+   int mf_flags;
+};
+
+static pgoff_t
+xfs_failure_pgoff(
+   struct xfs_mount*mp,
+   const struct xfs_rmap_irec  *rec,
+   const struct failure_info   *notify)
+{
+   uint64_t pos = rec->rm_offset;
+
+   if (notify->startblock > rec->rm_startblock)
+   pos += XFS_FSB_TO_B(mp,
+   notify->startblock - rec->rm_startblock);
+   return pos >> PAGE_SHIFT;
+}
+
+static unsigned long
+xfs_failure_pgcnt(
+   

[PATCH v8 5/9] fsdax: Introduce dax_lock_mapping_entry()

2021-12-02 Thread Shiyang Ruan
The current dax_lock_page() locks dax entry by obtaining mapping and
index in page.  To support 1-to-N RMAP in NVDIMM, we need a new function
to lock a specific dax entry corresponding to this file's mapping,index.
And BTW, output the page corresponding to the specific dax entry for
caller use.

Signed-off-by: Shiyang Ruan 
---
 fs/dax.c| 65 -
 include/linux/dax.h | 15 +++
 2 files changed, 79 insertions(+), 1 deletion(-)

diff --git a/fs/dax.c b/fs/dax.c
index 1f46810d4b68..b3c737aff9de 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -390,7 +390,7 @@ static struct page *dax_busy_page(void *entry)
 }
 
 /*
- * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
+ * dax_lock_page - Lock the DAX entry corresponding to a page
  * @page: The page whose entry we want to lock
  *
  * Context: Process context.
@@ -455,6 +455,69 @@ void dax_unlock_page(struct page *page, dax_entry_t cookie)
dax_unlock_entry(, (void *)cookie);
 }
 
+/*
+ * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping
+ * @mapping: the file's mapping whose entry we want to lock
+ * @index: the offset within this file
+ * @page: output the dax page corresponding to this dax entry
+ *
+ * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry
+ * could not be locked.
+ */
+dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t 
index,
+   struct page **page)
+{
+   XA_STATE(xas, NULL, 0);
+   void *entry;
+
+   rcu_read_lock();
+   for (;;) {
+   entry = NULL;
+   if (!dax_mapping(mapping))
+   break;
+
+   xas.xa = >i_pages;
+   xas_lock_irq();
+   xas_set(, index);
+   entry = xas_load();
+   if (dax_is_locked(entry)) {
+   rcu_read_unlock();
+   wait_entry_unlocked(, entry);
+   rcu_read_lock();
+   continue;
+   }
+   if (!entry ||
+   dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
+   /*
+* Because we are looking for entry from file's mapping
+* and index, so the entry may not be inserted for now,
+* or even a zero/empty entry.  We don't think this is
+* an error case.  So, return a special value and do
+* not output @page.
+*/
+   entry = (void *)~0UL;
+   } else {
+   *page = pfn_to_page(dax_to_pfn(entry));
+   dax_lock_entry(, entry);
+   }
+   xas_unlock_irq();
+   break;
+   }
+   rcu_read_unlock();
+   return (dax_entry_t)entry;
+}
+
+void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index,
+   dax_entry_t cookie)
+{
+   XA_STATE(xas, >i_pages, index);
+
+   if (cookie == ~0UL)
+   return;
+
+   dax_unlock_entry(, (void *)cookie);
+}
+
 /*
  * Find page cache entry at given index. If it is a DAX entry, return it
  * with the entry locked. If the page cache doesn't contain an entry at
diff --git a/include/linux/dax.h b/include/linux/dax.h
index f01684a63447..7e75d2c45f78 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -166,6 +166,10 @@ struct page *dax_layout_busy_page(struct address_space 
*mapping);
 struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t 
start, loff_t end);
 dax_entry_t dax_lock_page(struct page *page);
 void dax_unlock_page(struct page *page, dax_entry_t cookie);
+dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
+   unsigned long index, struct page **page);
+void dax_unlock_mapping_entry(struct address_space *mapping,
+   unsigned long index, dax_entry_t cookie);
 #else
 static inline struct page *dax_layout_busy_page(struct address_space *mapping)
 {
@@ -193,6 +197,17 @@ static inline dax_entry_t dax_lock_page(struct page *page)
 static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
 {
 }
+
+static inline dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
+   unsigned long index, struct page **page)
+{
+   return 0;
+}
+
+static inline void dax_unlock_mapping_entry(struct address_space *mapping,
+   unsigned long index, dax_entry_t cookie)
+{
+}
 #endif
 
 int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
-- 
2.34.0






[PATCH v8 9/9] fsdax: set a CoW flag when associate reflink mappings

2021-12-02 Thread Shiyang Ruan
Introduce a FS_DAX_MAPPING_COW flag to support association with CoW file
mappings.  In this case, the dax-RMAP already takes the responsibility
to look up for shared files by given dax page.  The page->mapping is no
longer to used for rmap but for marking that this dax page is shared.
And to make sure disassociation works fine, we use page->index as
refcount, and clear page->mapping to the initial state when page->index
is decreased to 0.

With the help of this new flag, it is able to distinguish normal case
and CoW case, and keep the warning in normal case.

Signed-off-by: Shiyang Ruan 
---
 fs/dax.c | 66 
 1 file changed, 57 insertions(+), 9 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index 66366ba83ffc..18823f2c2385 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -335,12 +335,46 @@ static unsigned long dax_end_pfn(void *entry)
pfn < dax_end_pfn(entry); pfn++)
 
 /*
- * TODO: for reflink+dax we need a way to associate a single page with
- * multiple address_space instances at different linear_page_index()
- * offsets.
+ * Set FS_DAX_MAPPING_COW flag on the last bit of page->mapping to indicate 
that
+ * this is a reflink case.  In this case, we associate this page->mapping with
+ * file mapping at the first time and only once.
+ */
+#define FS_DAX_MAPPING_COW 1UL
+
+#define MAPPING_SET_COW(m) (m = (struct address_space *)FS_DAX_MAPPING_COW)
+#define MAPPING_TEST_COW(m)(((unsigned long)m & FS_DAX_MAPPING_COW) == \
+   FS_DAX_MAPPING_COW)
+
+/*
+ * Set or Update the page->mapping with FS_DAX_MAPPING_COW flag.
+ * Return true if it is an Update.
+ */
+static inline bool dax_mapping_set_cow(struct page *page)
+{
+   if (page->mapping) {
+   /* flag already set  */
+   if (MAPPING_TEST_COW(page->mapping))
+   return false;
+
+   /*
+* This page has been mapped even before it is shared, just
+* need to set this FS_DAX_MAPPING_COW flag.
+*/
+   MAPPING_SET_COW(page->mapping);
+   return true;
+   }
+   /* Newly associate CoW mapping */
+   MAPPING_SET_COW(page->mapping);
+   return false;
+}
+
+/*
+ * When it is called in dax_insert_entry(), the cow flag will indicate that
+ * whether this entry is shared by multiple files.  If so, set the 
page->mapping
+ * to be FS_DAX_MAPPING_COW, and use page->index as refcount.
  */
 static void dax_associate_entry(void *entry, struct address_space *mapping,
-   struct vm_area_struct *vma, unsigned long address)
+   struct vm_area_struct *vma, unsigned long address, bool cow)
 {
unsigned long size = dax_entry_size(entry), pfn, index;
int i = 0;
@@ -352,9 +386,17 @@ static void dax_associate_entry(void *entry, struct 
address_space *mapping,
for_each_mapped_pfn(entry, pfn) {
struct page *page = pfn_to_page(pfn);
 
-   WARN_ON_ONCE(page->mapping);
-   page->mapping = mapping;
-   page->index = index + i++;
+   if (cow) {
+   if (dax_mapping_set_cow(page)) {
+   /* Was normal, now updated to CoW */
+   page->index = 2;
+   } else
+   page->index++;
+   } else {
+   WARN_ON_ONCE(page->mapping);
+   page->mapping = mapping;
+   page->index = index + i++;
+   }
}
 }
 
@@ -370,7 +412,12 @@ static void dax_disassociate_entry(void *entry, struct 
address_space *mapping,
struct page *page = pfn_to_page(pfn);
 
WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
-   WARN_ON_ONCE(page->mapping && page->mapping != mapping);
+   if (!MAPPING_TEST_COW(page->mapping)) {
+   /* keep the CoW flag if this page is still shared */
+   if (page->index-- > 0)
+   continue;
+   } else
+   WARN_ON_ONCE(page->mapping && page->mapping != mapping);
page->mapping = NULL;
page->index = 0;
}
@@ -829,7 +876,8 @@ static void *dax_insert_entry(struct xa_state *xas,
void *old;
 
dax_disassociate_entry(entry, mapping, false);
-   dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
+   dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
+   false);
/*
 * Only swap our new entry into the page cache if the current
 * entry is a zero page or an empty entry.  If a normal PTE or
-- 
2.34.0