From: Joonsoo Kim <iamjoonsoo....@lge.com>

There is no reason to implement it's own function for migration
target allocation. Use standard one.

Signed-off-by: Joonsoo Kim <iamjoonsoo....@lge.com>
---
 mm/gup.c | 61 ++++++++++---------------------------------------------------
 1 file changed, 10 insertions(+), 51 deletions(-)

diff --git a/mm/gup.c b/mm/gup.c
index a49d7ea..0e4214d 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1593,58 +1593,16 @@ static bool check_dax_vmas(struct vm_area_struct 
**vmas, long nr_pages)
 }
 
 #ifdef CONFIG_CMA
-static struct page *new_non_cma_page(struct page *page,
+static struct page *alloc_migration_target_non_cma(struct page *page,
                                struct alloc_control *ac)
 {
-       /*
-        * We want to make sure we allocate the new page from the same node
-        * as the source page.
-        */
-       int nid = page_to_nid(page);
-       /*
-        * Trying to allocate a page for migration. Ignore allocation
-        * failure warnings. We don't force __GFP_THISNODE here because
-        * this node here is the node where we have CMA reservation and
-        * in some case these nodes will have really less non movable
-        * allocation memory.
-        */
-       gfp_t gfp_mask = GFP_USER | __GFP_NOWARN;
-
-       if (PageHighMem(page))
-               gfp_mask |= __GFP_HIGHMEM;
-
-       if (PageHuge(page)) {
-               struct hstate *h = page_hstate(page);
-               struct alloc_control ac = {
-                       .nid = nid,
-                       .nmask = NULL,
-                       .gfp_mask = __GFP_NOWARN,
-                       .skip_cma = true,
-               };
-
-               return alloc_huge_page_nodemask(h, &ac);
-       }
-
-       if (PageTransHuge(page)) {
-               struct page *thp;
-               /*
-                * ignore allocation failure warnings
-                */
-               gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN;
-
-               /*
-                * Remove the movable mask so that we don't allocate from
-                * CMA area again.
-                */
-               thp_gfpmask &= ~__GFP_MOVABLE;
-               thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER);
-               if (!thp)
-                       return NULL;
-               prep_transhuge_page(thp);
-               return thp;
-       }
+       struct alloc_control __ac = {
+               .nid = page_to_nid(page),
+               .gfp_mask = GFP_USER | __GFP_NOWARN,
+               .skip_cma = true,
+       };
 
-       return __alloc_pages_node(nid, gfp_mask, 0);
+       return alloc_migration_target(page, &__ac);
 }
 
 static long check_and_migrate_cma_pages(struct task_struct *tsk,
@@ -1706,8 +1664,9 @@ static long check_and_migrate_cma_pages(struct 
task_struct *tsk,
                for (i = 0; i < nr_pages; i++)
                        put_page(pages[i]);
 
-               if (migrate_pages(&cma_page_list, new_non_cma_page,
-                                 NULL, NULL, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
+               if (migrate_pages(&cma_page_list,
+                               alloc_migration_target_non_cma, NULL, NULL,
+                               MIGRATE_SYNC, MR_CONTIG_RANGE)) {
                        /*
                         * some of the pages failed migration. Do get_user_pages
                         * without migration.
-- 
2.7.4

Reply via email to