From: Masayoshi Mizuma <m.miz...@jp.fujitsu.com>

If the physical memory layout has huge space for memory hotplug,
the padding used for the physical memory mapping section is not enough.
For example of the layout:
  SRAT: Node 6 PXM 4 [mem 0x100000000000-0x13ffffffffff] hotplug
  SRAT: Node 7 PXM 5 [mem 0x140000000000-0x17ffffffffff] hotplug
  SRAT: Node 2 PXM 6 [mem 0x180000000000-0x1bffffffffff] hotplug
  SRAT: Node 3 PXM 7 [mem 0x1c0000000000-0x1fffffffffff] hotplug

We can increase the padding by CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING,
however, the needed padding size depends on the physical memory layout
defined by SRAT. The kernel option is better than changing the config.

Signed-off-by: Masayoshi Mizuma <m.miz...@jp.fujitsu.com>
Reviewed-by: Baoquan He <b...@redhat.com>
---
 arch/x86/mm/kaslr.c | 17 ++++++++++++++++-
 1 file changed, 16 insertions(+), 1 deletion(-)

diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 61db77b..00cf4ca 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -40,6 +40,7 @@
  */
 static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
 
+int __initdata rand_mem_physical_padding = 
CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
 /*
  * Memory regions randomized by KASLR (except modules that use a separate logic
  * earlier during boot). The list is ordered based on virtual addresses. This
@@ -69,6 +70,20 @@ static inline bool kaslr_memory_enabled(void)
        return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
 }
 
+static int __init rand_mem_physical_padding_setup(char *str)
+{
+       int max_padding = (1 << (MAX_PHYSMEM_BITS - TB_SHIFT)) - 1;
+
+       get_option(&str, &rand_mem_physical_padding);
+       if (rand_mem_physical_padding < 0)
+               rand_mem_physical_padding = 0;
+       else if (rand_mem_physical_padding > max_padding)
+               rand_mem_physical_padding = max_padding;
+
+       return 0;
+}
+early_param("rand_mem_physical_padding", rand_mem_physical_padding_setup);
+
 /* Initialize base and padding for each memory region randomized with KASLR */
 void __init kernel_randomize_memory(void)
 {
@@ -102,7 +117,7 @@ void __init kernel_randomize_memory(void)
         */
        BUG_ON(kaslr_regions[0].base != &page_offset_base);
        memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
-               CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
+               rand_mem_physical_padding;
 
        /* Adapt phyiscal memory region size based on available memory */
        if (memory_tb < kaslr_regions[0].size_tb)
-- 
2.18.0

Reply via email to