Reserve memory from &_text to &_end. Otherwise if kernel address
was modified, the memory range of start_pfn to kernel_start_pfn
would be reserved. Then we could not use this range.

Signed-off-by: Jinyang He <hejiny...@loongson.cn>
---
 arch/mips/loongson64/numa.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c
index 509b360..c6f0c48 100644
--- a/arch/mips/loongson64/numa.c
+++ b/arch/mips/loongson64/numa.c
@@ -151,6 +151,9 @@ static void __init node_mem_init(unsigned int node)
        NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
 
        if (node == 0) {
+               /* kernel start address */
+               unsigned long kernel_start_pfn = PFN_DOWN(__pa_symbol(&_text));
+
                /* kernel end address */
                unsigned long kernel_end_pfn = PFN_UP(__pa_symbol(&_end));
 
@@ -158,8 +161,8 @@ static void __init node_mem_init(unsigned int node)
                max_low_pfn = end_pfn;
 
                /* Reserve the kernel text/data/bss */
-               memblock_reserve(start_pfn << PAGE_SHIFT,
-                                ((kernel_end_pfn - start_pfn) << PAGE_SHIFT));
+               memblock_reserve(kernel_start_pfn << PAGE_SHIFT,
+                                ((kernel_end_pfn - kernel_start_pfn) << 
PAGE_SHIFT));
 
                /* Reserve 0xfe000000~0xffffffff for RS780E integrated GPU */
                if (node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT))
-- 
2.1.0

Reply via email to