From: Zhen Lei <thunder.leiz...@huawei.com>

Use page-level mappings for crashkernel region so that we can use
set_memory_valid() to do access protection for it.

Signed-off-by: Zhen Lei <thunder.leiz...@huawei.com>
---
 arch/arm64/mm/mmu.c | 21 +++++++++++++++++++++
 1 file changed, 21 insertions(+)

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 95d360805f8aeb3..e0a197ebe14837d 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -594,6 +594,11 @@ static void __init map_mem(pgd_t *pgdp)
         */
        memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
 
+#ifdef CONFIG_KEXEC_CORE
+       if (crashk_res.end)
+               memblock_mark_nomap(crashk_res.start, 
resource_size(&crashk_res));
+#endif
+
        /* map all the memory banks */
        for_each_mem_range(i, &start, &end) {
                if (start >= end)
@@ -621,6 +626,22 @@ static void __init map_mem(pgd_t *pgdp)
                       PAGE_KERNEL, NO_CONT_MAPPINGS);
        memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
        arm64_kfence_map_pool(early_kfence_pool, pgdp);
+
+       /*
+        * Use page-level mappings here so that we can shrink the region
+        * in page granularity and put back unused memory to buddy system
+        * through /sys/kernel/kexec_crash_size interface.
+        */
+#ifdef CONFIG_KEXEC_CORE
+       if (crashk_res.end) {
+               __map_memblock(pgdp, crashk_res.start,
+                              crashk_res.end + 1,
+                              PAGE_KERNEL,
+                              NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
+               memblock_clear_nomap(crashk_res.start,
+                                    resource_size(&crashk_res));
+       }
+#endif
 }
 
 void mark_rodata_ro(void)
-- 
2.25.1


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

Reply via email to