From: Alexander Shishkin <alexander.shish...@linux.intel.com>

Signed-off-by: Alexander Shishkin <alexander.shish...@linux.intel.com>
---
 arch/x86/mm/cpu_entry_area.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
index d1da5cf4b2de..fb1fbc8538fa 100644
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -3,6 +3,7 @@
 #include <linux/spinlock.h>
 #include <linux/percpu.h>
 #include <linux/kallsyms.h>
+#include <linux/kcore.h>
 
 #include <asm/cpu_entry_area.h>
 #include <asm/pgtable.h>
@@ -14,6 +15,7 @@
 #ifdef CONFIG_X86_64
 static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
        [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
+static DEFINE_PER_CPU(struct kcore_list, kcore_entry_trampoline);
 #endif
 
 struct cpu_entry_area *get_cpu_entry_area(int cpu)
@@ -147,6 +149,9 @@ static void __init setup_cpu_entry_area(int cpu)
 
        cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
                     __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
+       kclist_add(&per_cpu(kcore_entry_trampoline, cpu),
+                  &get_cpu_entry_area(cpu)->entry_trampoline, PAGE_SIZE,
+                  KCORE_TEXT);
 #endif
        percpu_setup_debug_store(cpu);
 }
-- 
1.9.1

Reply via email to