Add address output in dax_iomap_pfn() in order to perform a memcpy() in CoW
case.  Since this function both output address and pfn, rename it to
dax_iomap_direct_access().

dax_copy_edges() is a helper functions performs a copy from one part of
the device to another for data not page aligned.

Signed-off-by: Goldwyn Rodrigues <rgold...@suse.com>
Signed-off-by: Shiyang Ruan <ruansy.f...@cn.fujitsu.com>
---
 fs/dax.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++------
 1 file changed, 58 insertions(+), 6 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index 47380f75ef38..308678c58d4d 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1043,8 +1043,8 @@ static sector_t dax_iomap_sector(struct iomap *iomap, 
loff_t pos)
        return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
 }
 
-static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
-                        pfn_t *pfnp)
+static int dax_iomap_direct_access(struct iomap *iomap, loff_t pos, size_t 
size,
+                        pfn_t *pfnp, void **addr)
 {
        const sector_t sector = dax_iomap_sector(iomap, pos);
        pgoff_t pgoff;
@@ -1055,12 +1055,14 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t 
pos, size_t size,
        if (rc)
                return rc;
        id = dax_read_lock();
-       length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
-                                  NULL, pfnp);
+       length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), addr,
+                                  pfnp);
        if (length < 0) {
                rc = length;
                goto out;
        }
+       if (!pfnp)
+               goto out_check_addr;
        rc = -EINVAL;
        if (PFN_PHYS(length) < size)
                goto out;
@@ -1070,11 +1072,59 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t 
pos, size_t size,
        if (length > 1 && !pfn_t_devmap(*pfnp))
                goto out;
        rc = 0;
+
+out_check_addr:
+       if (!addr)
+               goto out;
+       if (!*addr)
+               rc = -EFAULT;
 out:
        dax_read_unlock(id);
        return rc;
 }
 
+/*
+ * dax_copy_edges - Copies the part of the pages not included in
+ *                 the write, but required for CoW because
+ *                 offset/offset+length are not page aligned.
+ */
+static int dax_copy_edges(loff_t pos, loff_t length, struct iomap *srcmap,
+                         void *daddr, bool pmd)
+{
+       size_t page_size = pmd ? PMD_SIZE : PAGE_SIZE;
+       loff_t offset = pos & (page_size - 1);
+       size_t size = ALIGN(offset + length, page_size);
+       loff_t end = pos + length;
+       loff_t pg_end = round_up(end, page_size);
+       void *saddr = 0;
+       int ret = 0;
+
+       ret = dax_iomap_direct_access(srcmap, pos, size, NULL, &saddr);
+       if (ret)
+               return ret;
+       /*
+        * Copy the first part of the page
+        * Note: we pass offset as length
+        */
+       if (offset) {
+               if (saddr)
+                       ret = memcpy_mcsafe(daddr, saddr, offset);
+               else
+                       memset(daddr, 0, offset);
+       }
+
+       /* Copy the last part of the range */
+       if (end < pg_end) {
+               if (saddr)
+                       ret = memcpy_mcsafe(daddr + offset + length,
+                              saddr + offset + length, pg_end - end);
+               else
+                       memset(daddr + offset + length, 0,
+                                       pg_end - end);
+       }
+       return ret;
+}
+
 /*
  * The user has performed a load from a hole in the file.  Allocating a new
  * page in the file would cause excessive storage usage for workloads with
@@ -1394,7 +1444,8 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault 
*vmf, pfn_t *pfnp,
                        count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
                        major = VM_FAULT_MAJOR;
                }
-               error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
+               error = dax_iomap_direct_access(&iomap, pos, PAGE_SIZE, &pfn,
+                                               NULL);
                if (error < 0)
                        goto error_finish_iomap;
 
@@ -1612,7 +1663,8 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault 
*vmf, pfn_t *pfnp,
 
        switch (iomap.type) {
        case IOMAP_MAPPED:
-               error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
+               error = dax_iomap_direct_access(&iomap, pos, PMD_SIZE, &pfn,
+                                               NULL);
                if (error < 0)
                        goto finish_iomap;
 
-- 
2.27.0



Reply via email to