From: Joonsoo Kim <iamjoonsoo....@lge.com>

To calculate the correct node to migrate the page for hotplug, we need
to check node id of the page. Wrapper for alloc_migration_target() exists
for this purpose.

However, Vlastimil informs that all migration source pages come from
a single node. In this case, we don't need to check the node id for each
page and we don't need to re-set the target nodemask for each page by
using the wrapper. Set up the migration_target_control once and use it for
all pages.

Acked-by: Vlastimil Babka <vba...@suse.cz>
Acked-by: Michal Hocko <mho...@suse.com>
Signed-off-by: Joonsoo Kim <iamjoonsoo....@lge.com>
---
 mm/memory_hotplug.c | 46 ++++++++++++++++++++++------------------------
 1 file changed, 22 insertions(+), 24 deletions(-)

diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 431b470f..7c216d6 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1265,27 +1265,6 @@ static int scan_movable_pages(unsigned long start, 
unsigned long end,
        return 0;
 }
 
-static struct page *new_node_page(struct page *page, unsigned long private)
-{
-       nodemask_t nmask = node_states[N_MEMORY];
-       struct migration_target_control mtc = {
-               .nid = page_to_nid(page),
-               .nmask = &nmask,
-               .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
-       };
-
-       /*
-        * try to allocate from a different node but reuse this node if there
-        * are no other online nodes to be used (e.g. we are offlining a part
-        * of the only existing node)
-        */
-       node_clear(mtc.nid, nmask);
-       if (nodes_empty(nmask))
-               node_set(mtc.nid, nmask);
-
-       return alloc_migration_target(page, (unsigned long)&mtc);
-}
-
 static int
 do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 {
@@ -1345,9 +1324,28 @@ do_migrate_range(unsigned long start_pfn, unsigned long 
end_pfn)
                put_page(page);
        }
        if (!list_empty(&source)) {
-               /* Allocate a new page from the nearest neighbor node */
-               ret = migrate_pages(&source, new_node_page, NULL, 0,
-                                       MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
+               nodemask_t nmask = node_states[N_MEMORY];
+               struct migration_target_control mtc = {
+                       .nmask = &nmask,
+                       .gfp_mask = GFP_USER | __GFP_MOVABLE | 
__GFP_RETRY_MAYFAIL,
+               };
+
+               /*
+                * We have checked that migration range is on a single zone so
+                * we can use the nid of the first page to all the others.
+                */
+               mtc.nid = page_to_nid(list_first_entry(&source, struct page, 
lru));
+
+               /*
+                * try to allocate from a different node but reuse this node
+                * if there are no other online nodes to be used (e.g. we are
+                * offlining a part of the only existing node)
+                */
+               node_clear(mtc.nid, nmask);
+               if (nodes_empty(nmask))
+                       node_set(mtc.nid, nmask);
+               ret = migrate_pages(&source, alloc_migration_target, NULL,
+                       (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
                if (ret) {
                        list_for_each_entry(page, &source, lru) {
                                pr_warn("migrating pfn %lx failed ret:%d ",
-- 
2.7.4

Reply via email to