The type of the VMLINUX_LOAD_ADDRESS macro is the (unsigned long long)
in 32bits kernel but (unsigned long) in the 64-bit kernel. Although there
is no error here, avoid using it to calculate kaslr_offset. And here we
may need is that the address of __kaslr_offset rather than (void *)offset.

Signed-off-by: Jinyang He <hejiny...@loongson.cn>
---
 arch/mips/kernel/relocate.c | 8 ++------
 1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index 95abb9c..52018a3 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -430,13 +430,9 @@ void *__init relocate_kernel(void)
  */
 static void show_kernel_relocation(const char *level)
 {
-       unsigned long offset;
-
-       offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
-
-       if (IS_ENABLED(CONFIG_RELOCATABLE) && offset > 0) {
+       if (__kaslr_offset > 0) {
                printk(level);
-               pr_cont("Kernel relocated by 0x%pK\n", (void *)offset);
+               pr_cont("Kernel relocated by 0x%pK\n", &__kaslr_offset);
                pr_cont(" .text @ 0x%pK\n", _text);
                pr_cont(" .data @ 0x%pK\n", _sdata);
                pr_cont(" .bss  @ 0x%pK\n", __bss_start);
-- 
2.1.0

Reply via email to