From: Ira Weiny <ira.we...@intel.com> Passing size to copy_user_dax implies it can copy variable sizes of data when in fact it calls copy_user_page() which is exactly a page.
We are safe because the only caller uses PAGE_SIZE anyway so just remove the variable for clarity. While we are at it change copy_user_dax() to copy_cow_page_dax() to make it clear it is a singleton helper for this one case not implementing what dax_iomap_actor() does. Reviewed-by: Ben Widawsky <ben.widaw...@intel.com> Reviewed-by: Dan Williams <dan.j.willi...@intel.com> Signed-off-by: Ira Weiny <ira.we...@intel.com> --- fs/dax.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index 11b16729b86f..3e0babeb0365 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -680,21 +680,20 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping, return __dax_invalidate_entry(mapping, index, false); } -static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, - sector_t sector, size_t size, struct page *to, - unsigned long vaddr) +static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_dev, + sector_t sector, struct page *to, unsigned long vaddr) { void *vto, *kaddr; pgoff_t pgoff; long rc; int id; - rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); + rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); if (rc) return rc; id = dax_read_lock(); - rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL); + rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(PAGE_SIZE), &kaddr, NULL); if (rc < 0) { dax_read_unlock(id); return rc; @@ -1305,8 +1304,8 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, clear_user_highpage(vmf->cow_page, vaddr); break; case IOMAP_MAPPED: - error = copy_user_dax(iomap.bdev, iomap.dax_dev, - sector, PAGE_SIZE, vmf->cow_page, vaddr); + error = copy_cow_page_dax(iomap.bdev, iomap.dax_dev, + sector, vmf->cow_page, vaddr); break; default: WARN_ON_ONCE(1); -- 2.28.0.rc0.12.gb6a658bd00c9