From: Joonsoo Kim <iamjoonsoo....@lge.com>

There is a well-defined migration target allocation callback. Use it.

Acked-by: Vlastimil Babka <vba...@suse.cz>
Signed-off-by: Joonsoo Kim <iamjoonsoo....@lge.com>
---
 mm/gup.c | 54 ++++++------------------------------------------------
 1 file changed, 6 insertions(+), 48 deletions(-)

diff --git a/mm/gup.c b/mm/gup.c
index 4ba822a..628ca4c 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1608,52 +1608,6 @@ static bool check_dax_vmas(struct vm_area_struct **vmas, 
long nr_pages)
 }
 
 #ifdef CONFIG_CMA
-static struct page *new_non_cma_page(struct page *page, unsigned long private)
-{
-       /*
-        * We want to make sure we allocate the new page from the same node
-        * as the source page.
-        */
-       int nid = page_to_nid(page);
-       /*
-        * Trying to allocate a page for migration. Ignore allocation
-        * failure warnings. We don't force __GFP_THISNODE here because
-        * this node here is the node where we have CMA reservation and
-        * in some case these nodes will have really less non CMA
-        * allocation memory.
-        *
-        * Note that CMA region is prohibited by allocation scope.
-        */
-       gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN;
-
-       if (PageHighMem(page))
-               gfp_mask |= __GFP_HIGHMEM;
-
-#ifdef CONFIG_HUGETLB_PAGE
-       if (PageHuge(page)) {
-               struct hstate *h = page_hstate(page);
-
-               gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
-               return alloc_huge_page_nodemask(h, nid, NULL, gfp_mask);
-       }
-#endif
-       if (PageTransHuge(page)) {
-               struct page *thp;
-               /*
-                * ignore allocation failure warnings
-                */
-               gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN;
-
-               thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER);
-               if (!thp)
-                       return NULL;
-               prep_transhuge_page(thp);
-               return thp;
-       }
-
-       return __alloc_pages_node(nid, gfp_mask, 0);
-}
-
 static long check_and_migrate_cma_pages(struct task_struct *tsk,
                                        struct mm_struct *mm,
                                        unsigned long start,
@@ -1668,6 +1622,10 @@ static long check_and_migrate_cma_pages(struct 
task_struct *tsk,
        bool migrate_allow = true;
        LIST_HEAD(cma_page_list);
        long ret = nr_pages;
+       struct migration_target_control mtc = {
+               .nid = NUMA_NO_NODE,
+               .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN,
+       };
 
 check_again:
        for (i = 0; i < nr_pages;) {
@@ -1713,8 +1671,8 @@ static long check_and_migrate_cma_pages(struct 
task_struct *tsk,
                for (i = 0; i < nr_pages; i++)
                        put_page(pages[i]);
 
-               if (migrate_pages(&cma_page_list, new_non_cma_page,
-                                 NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
+               if (migrate_pages(&cma_page_list, alloc_migration_target, NULL,
+                       (unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
                        /*
                         * some of the pages failed migration. Do get_user_pages
                         * without migration.
-- 
2.7.4

Reply via email to