Provide a way for a caller external to numa to ensure memblocks in the
memblock reserved list do not cross node boundaries and have a node id
assigned to them.  This will be used by PKRAM to ensure initialization of
page structs for preserved pages can be deferred and multithreaded
efficiently.

Signed-off-by: Anthony Yznaga <anthony.yzn...@oracle.com>
---
 arch/x86/include/asm/numa.h |  4 ++++
 arch/x86/mm/numa.c          | 32 ++++++++++++++++++++------------
 2 files changed, 24 insertions(+), 12 deletions(-)

diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index e3bae2b60a0d..632b5b6d8cb3 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -41,6 +41,7 @@ static inline void set_apicid_to_node(int apicid, s16 node)
 }
 
 extern int numa_cpu_node(int cpu);
+extern void __init numa_isolate_memblocks(void);
 
 #else  /* CONFIG_NUMA */
 static inline void set_apicid_to_node(int apicid, s16 node)
@@ -51,6 +52,9 @@ static inline int numa_cpu_node(int cpu)
 {
        return NUMA_NO_NODE;
 }
+static inline void numa_isolate_memblocks(void)
+{
+}
 #endif /* CONFIG_NUMA */
 
 #ifdef CONFIG_X86_32
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 5eb4dc2b97da..dd85098f9d72 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -473,6 +473,25 @@ static bool __init numa_meminfo_cover_memory(const struct 
numa_meminfo *mi)
        return true;
 }
 
+void __init numa_isolate_memblocks(void)
+{
+       int i;
+
+       /*
+        * Iterate over all memory known to the x86 architecture,
+        * and use those ranges to set the nid in memblock.reserved.
+        * This will split up the memblock regions along node
+        * boundaries and will set the node IDs as well.
+        */
+       for (i = 0; i < numa_meminfo.nr_blks; i++) {
+               struct numa_memblk *mb = numa_meminfo.blk + i;
+               int ret;
+
+               ret = memblock_set_node(mb->start, mb->end - mb->start, 
&memblock.reserved, mb->nid);
+               WARN_ON_ONCE(ret);
+       }
+}
+
 /*
  * Mark all currently memblock-reserved physical memory (which covers the
  * kernel's own memory ranges) as hot-unswappable.
@@ -491,19 +510,8 @@ static void __init numa_clear_kernel_node_hotplug(void)
         * used by the kernel, but those regions are not split up
         * along node boundaries yet, and don't necessarily have their
         * node ID set yet either.
-        *
-        * So iterate over all memory known to the x86 architecture,
-        * and use those ranges to set the nid in memblock.reserved.
-        * This will split up the memblock regions along node
-        * boundaries and will set the node IDs as well.
         */
-       for (i = 0; i < numa_meminfo.nr_blks; i++) {
-               struct numa_memblk *mb = numa_meminfo.blk + i;
-               int ret;
-
-               ret = memblock_set_node(mb->start, mb->end - mb->start, 
&memblock.reserved, mb->nid);
-               WARN_ON_ONCE(ret);
-       }
+       numa_isolate_memblocks();
 
        /*
         * Now go over all reserved memblock regions, to construct a
-- 
1.8.3.1

Reply via email to