mhp_supports_memmap_on_memory is meant to be used by the caller prior
to hot-adding memory in order to figure out whether it can enable
MHP_MEMMAP_ON_MEMORY or not.

Enabling MHP_MEMMAP_ON_MEMORY requires:

 - CONFIG_SPARSEMEM_VMEMMAP
 - architecture support for altmap
 - hot-added range spans a single memory block

At the moment, only three architectures support passing altmap when
building the page tables: x86, POWERPC and ARM.
Define an arch_support_memmap_on_memory function on those architectures
that returns true, and define a __weak variant of it that will be used
on the others.

Signed-off-by: Oscar Salvador <[email protected]>
---
 arch/arm64/mm/mmu.c   |  5 +++++
 arch/powerpc/mm/mem.c |  5 +++++
 arch/x86/mm/init_64.c |  5 +++++
 mm/memory_hotplug.c   | 24 ++++++++++++++++++++++++
 4 files changed, 39 insertions(+)

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index ca692a815731..0da4e4f8794f 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1456,6 +1456,11 @@ static bool inside_linear_region(u64 start, u64 size)
               (start + size - 1) <= __pa(PAGE_END - 1);
 }
 
+bool arch_support_memmap_on_memory(void)
+{
+       return true;
+}
+
 int arch_add_memory(int nid, u64 start, u64 size,
                    struct mhp_params *params)
 {
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 3fc325bebe4d..18e7e28fe713 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -121,6 +121,11 @@ static void flush_dcache_range_chunked(unsigned long 
start, unsigned long stop,
        }
 }
 
+bool arch_support_memmap_on_memory(void)
+{
+       return true;
+}
+
 int __ref arch_add_memory(int nid, u64 start, u64 size,
                          struct mhp_params *params)
 {
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index b5a3fa4033d3..ffb9d87c77e8 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -860,6 +860,11 @@ int add_pages(int nid, unsigned long start_pfn, unsigned 
long nr_pages,
        return ret;
 }
 
+bool arch_support_memmap_on_memory(void)
+{
+       return true;
+}
+
 int arch_add_memory(int nid, u64 start, u64 size,
                    struct mhp_params *params)
 {
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 87fbc2cc0d90..10255606ff85 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1028,6 +1028,20 @@ static int online_memory_block(struct memory_block *mem, 
void *arg)
        return device_online(&mem->dev);
 }
 
+bool __weak arch_support_memmap_on_memory(void)
+{
+       return false;
+}
+
+bool mhp_supports_memmap_on_memory(unsigned long size)
+{
+       if (!arch_support_memmap_on_memory() ||
+           !IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP) ||
+           size > memory_block_size_bytes())
+               return false;
+       return true;
+}
+
 /*
  * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
  * and online/offline operations (triggered e.g. by sysfs).
@@ -1064,6 +1078,16 @@ int __ref add_memory_resource(int nid, struct resource 
*res, mhp_t mhp_flags)
                goto error;
        new_node = ret;
 
+       /*
+        * Return -EINVAL if caller specified MHP_MEMMAP_ON_MEMORY and we do
+        * not support it.
+        */
+       if ((mhp_flags & MHP_MEMMAP_ON_MEMORY) &&
+           !mhp_supports_memmap_on_memory(size)) {
+               ret = -EINVAL;
+               goto error;
+       }
+
        /*
         * Self hosted memmap array
         */
-- 
2.26.2

Reply via email to