From: Ira Weiny <ira.we...@intel.com>

Now that there is a mechanism for users to safely take LONGTERM pins on
FS DAX pages, remove the FS DAX exclusion from GUP with FOLL_LONGTERM.

Special processing remains in effect for CONFIG_CMA

Signed-off-by: Ira Weiny <ira.we...@intel.com>
---
 mm/gup.c | 78 ++++++--------------------------------------------------
 1 file changed, 8 insertions(+), 70 deletions(-)

diff --git a/mm/gup.c b/mm/gup.c
index d06cc5b14c0b..4f6e5606b81e 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1392,26 +1392,6 @@ long get_user_pages_remote(struct task_struct *tsk, 
struct mm_struct *mm,
 }
 EXPORT_SYMBOL(get_user_pages_remote);
 
-#if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA)
-static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
-{
-       long i;
-       struct vm_area_struct *vma_prev = NULL;
-
-       for (i = 0; i < nr_pages; i++) {
-               struct vm_area_struct *vma = vmas[i];
-
-               if (vma == vma_prev)
-                       continue;
-
-               vma_prev = vma;
-
-               if (vma_is_fsdax(vma))
-                       return true;
-       }
-       return false;
-}
-
 #ifdef CONFIG_CMA
 static struct page *new_non_cma_page(struct page *page, unsigned long private)
 {
@@ -1542,18 +1522,6 @@ static long check_and_migrate_cma_pages(struct 
task_struct *tsk,
 
        return nr_pages;
 }
-#else
-static long check_and_migrate_cma_pages(struct task_struct *tsk,
-                                       struct mm_struct *mm,
-                                       unsigned long start,
-                                       unsigned long nr_pages,
-                                       struct page **pages,
-                                       struct vm_area_struct **vmas,
-                                       unsigned int gup_flags)
-{
-       return nr_pages;
-}
-#endif
 
 /*
  * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
@@ -1567,49 +1535,28 @@ static long __gup_longterm_locked(struct task_struct 
*tsk,
                                  struct vm_area_struct **vmas,
                                  unsigned int gup_flags)
 {
-       struct vm_area_struct **vmas_tmp = vmas;
        unsigned long flags = 0;
-       long rc, i;
+       long rc;
 
-       if (gup_flags & FOLL_LONGTERM) {
-               if (!pages)
-                       return -EINVAL;
-
-               if (!vmas_tmp) {
-                       vmas_tmp = kcalloc(nr_pages,
-                                          sizeof(struct vm_area_struct *),
-                                          GFP_KERNEL);
-                       if (!vmas_tmp)
-                               return -ENOMEM;
-               }
+       if (flags & FOLL_LONGTERM)
                flags = memalloc_nocma_save();
-       }
 
        rc = __get_user_pages_locked(tsk, mm, start, nr_pages, pages,
-                                    vmas_tmp, NULL, gup_flags);
+                                    vmas, NULL, gup_flags);
 
        if (gup_flags & FOLL_LONGTERM) {
                memalloc_nocma_restore(flags);
                if (rc < 0)
                        goto out;
 
-               if (check_dax_vmas(vmas_tmp, rc)) {
-                       for (i = 0; i < rc; i++)
-                               put_page(pages[i]);
-                       rc = -EOPNOTSUPP;
-                       goto out;
-               }
-
                rc = check_and_migrate_cma_pages(tsk, mm, start, rc, pages,
-                                                vmas_tmp, gup_flags);
+                                                vmas, gup_flags);
        }
 
 out:
-       if (vmas_tmp != vmas)
-               kfree(vmas_tmp);
        return rc;
 }
-#else /* !CONFIG_FS_DAX && !CONFIG_CMA */
+#else /* !CONFIG_CMA */
 static __always_inline long __gup_longterm_locked(struct task_struct *tsk,
                                                  struct mm_struct *mm,
                                                  unsigned long start,
@@ -1621,7 +1568,7 @@ static __always_inline long __gup_longterm_locked(struct 
task_struct *tsk,
        return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
                                       NULL, flags);
 }
-#endif /* CONFIG_FS_DAX || CONFIG_CMA */
+#endif /* CONFIG_CMA */
 
 /*
  * This is the same as get_user_pages_remote(), just with a
@@ -1882,9 +1829,6 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, 
unsigned long end,
                        goto pte_unmap;
 
                if (pte_devmap(pte)) {
-                       if (unlikely(flags & FOLL_LONGTERM))
-                               goto pte_unmap;
-
                        pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
                        if (unlikely(!pgmap)) {
                                undo_dev_pagemap(nr, nr_start, pages);
@@ -2057,12 +2001,9 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, 
unsigned long addr,
        if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
                return 0;
 
-       if (pmd_devmap(orig)) {
-               if (unlikely(flags & FOLL_LONGTERM))
-                       return 0;
+       if (pmd_devmap(orig))
                return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr,
                                             flags);
-       }
 
        refs = 0;
        page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
@@ -2101,12 +2042,9 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, 
unsigned long addr,
        if (!pud_access_permitted(orig, flags & FOLL_WRITE))
                return 0;
 
-       if (pud_devmap(orig)) {
-               if (unlikely(flags & FOLL_LONGTERM))
-                       return 0;
+       if (pud_devmap(orig))
                return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr,
                                             flags);
-       }
 
        refs = 0;
        page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
-- 
2.20.1

Reply via email to