Linux support KAsan for VMALLOC since commit 3c5c3cfb9ef4da9
("kasan: support backing vmalloc space with real shadow memory")

Like how the MODULES_VADDR does now, just not to early populate
the VMALLOC_START between VMALLOC_END.
similarly, the kernel code mapping is now in the VMALLOC area and
should keep these area populated.

Signed-off-by: Lecopzer Chen <lecopzer.c...@mediatek.com>
---
 arch/arm64/mm/kasan_init.c | 23 ++++++++++++++++++-----
 1 file changed, 18 insertions(+), 5 deletions(-)

diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index d8e66c78440e..d7ad3f1e9c4d 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -214,6 +214,7 @@ static void __init kasan_init_shadow(void)
 {
        u64 kimg_shadow_start, kimg_shadow_end;
        u64 mod_shadow_start, mod_shadow_end;
+       u64 vmalloc_shadow_start, vmalloc_shadow_end;
        phys_addr_t pa_start, pa_end;
        u64 i;
 
@@ -223,6 +224,9 @@ static void __init kasan_init_shadow(void)
        mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
        mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
 
+       vmalloc_shadow_start = (u64)kasan_mem_to_shadow((void *)VMALLOC_START);
+       vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
+
        /*
         * We are going to perform proper setup of shadow memory.
         * At first we should unmap early shadow (clear_pgds() call below).
@@ -241,12 +245,21 @@ static void __init kasan_init_shadow(void)
 
        kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
                                   (void *)mod_shadow_start);
-       kasan_populate_early_shadow((void *)kimg_shadow_end,
-                                  (void *)KASAN_SHADOW_END);
+       if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
+               kasan_populate_early_shadow((void *)vmalloc_shadow_end,
+                                          (void *)KASAN_SHADOW_END);
+               if (vmalloc_shadow_start > mod_shadow_end)
+                       kasan_populate_early_shadow((void *)mod_shadow_end,
+                                                   (void 
*)vmalloc_shadow_start);
+
+       }       else {
+               kasan_populate_early_shadow((void *)kimg_shadow_end,
+                                          (void *)KASAN_SHADOW_END);
+               if (kimg_shadow_start > mod_shadow_end)
+                       kasan_populate_early_shadow((void *)mod_shadow_end,
+                                                   (void *)kimg_shadow_start);
+       }
 
-       if (kimg_shadow_start > mod_shadow_end)
-               kasan_populate_early_shadow((void *)mod_shadow_end,
-                                           (void *)kimg_shadow_start);
 
        for_each_mem_range(i, &pa_start, &pa_end) {
                void *start = (void *)__phys_to_virt(pa_start);
-- 
2.25.1

Reply via email to