Currently, kexec relocation function (arm64_relocate_new_kernel) accepts
the following arguments:

head:           start of array that contains relocation information.
entry:          entry point for new kernel or purgatory.
dtb_mem:        first and only argument to entry.

The number of arguments cannot be easily expended, because this
function is also called from HVC_SOFT_RESTART, which preserves only
three arguments. And, also arm64_relocate_new_kernel is written in
assembly but called without stack, thus no place to move extra arguments
to free registers.

Soon, we will need to pass more arguments: once we enable MMU we
will need to pass information about page tables.

Pass kimage to arm64_relocate_new_kernel, and teach it to get the
required fields from kimage.

Suggested-by: James Morse <james.mo...@arm.com>
Signed-off-by: Pavel Tatashin <pasha.tatas...@soleen.com>
---
 arch/arm64/kernel/asm-offsets.c     |  7 +++++++
 arch/arm64/kernel/machine_kexec.c   |  6 ++++--
 arch/arm64/kernel/relocate_kernel.S | 10 ++++------
 3 files changed, 15 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index a36e2fc330d4..0c92e193f866 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -9,6 +9,7 @@
 
 #include <linux/arm_sdei.h>
 #include <linux/sched.h>
+#include <linux/kexec.h>
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
 #include <linux/kvm_host.h>
@@ -153,6 +154,12 @@ int main(void)
   DEFINE(PTRAUTH_USER_KEY_APGA,                offsetof(struct 
ptrauth_keys_user, apga));
   DEFINE(PTRAUTH_KERNEL_KEY_APIA,      offsetof(struct ptrauth_keys_kernel, 
apia));
   BLANK();
+#endif
+#ifdef CONFIG_KEXEC_CORE
+  DEFINE(KIMAGE_ARCH_DTB_MEM,          offsetof(struct kimage, arch.dtb_mem));
+  DEFINE(KIMAGE_HEAD,                  offsetof(struct kimage, head));
+  DEFINE(KIMAGE_START,                 offsetof(struct kimage, start));
+  BLANK();
 #endif
   return 0;
 }
diff --git a/arch/arm64/kernel/machine_kexec.c 
b/arch/arm64/kernel/machine_kexec.c
index b150b65f0b84..2e734e4ae12e 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -83,6 +83,8 @@ static void kexec_list_flush(struct kimage *kimage)
 {
        kimage_entry_t *entry;
 
+       __flush_dcache_area(kimage, sizeof(*kimage));
+
        for (entry = &kimage->head; ; entry++) {
                unsigned int flag;
                void *addr;
@@ -198,8 +200,8 @@ void machine_kexec(struct kimage *kimage)
                restart(is_hyp_callable(), kimage->start, kimage->arch.dtb_mem,
                        0, 0);
        } else {
-               cpu_soft_restart(kimage->arch.kern_reloc, kimage->head,
-                                kimage->start, kimage->arch.dtb_mem);
+               cpu_soft_restart(kimage->arch.kern_reloc, virt_to_phys(kimage),
+                                0, 0);
        }
 
        BUG(); /* Should never get here. */
diff --git a/arch/arm64/kernel/relocate_kernel.S 
b/arch/arm64/kernel/relocate_kernel.S
index 718037bef560..36b4496524c3 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -27,9 +27,7 @@
  */
 SYM_CODE_START(arm64_relocate_new_kernel)
        /* Setup the list loop variables. */
-       mov     x18, x2                         /* x18 = dtb address */
-       mov     x17, x1                         /* x17 = kimage_start */
-       mov     x16, x0                         /* x16 = kimage_head */
+       ldr     x16, [x0, #KIMAGE_HEAD]         /* x16 = kimage_head */
        mov     x14, xzr                        /* x14 = entry ptr */
        mov     x13, xzr                        /* x13 = copy dest */
        raw_dcache_line_size x15, x1            /* x15 = dcache line size */
@@ -63,12 +61,12 @@ SYM_CODE_START(arm64_relocate_new_kernel)
        isb
 
        /* Start new image. */
-       mov     x0, x18
+       ldr     x4, [x0, #KIMAGE_START]         /* relocation start */
+       ldr     x0, [x0, #KIMAGE_ARCH_DTB_MEM]  /* dtb address */
        mov     x1, xzr
        mov     x2, xzr
        mov     x3, xzr
-       br      x17
-
+       br      x4
 SYM_CODE_END(arm64_relocate_new_kernel)
 
 .align 3       /* To keep the 64-bit values below naturally aligned. */
-- 
2.25.1


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

Reply via email to