Allow architectures to define a kasan_arch_is_ready() hook that bails
out of any function that's about to touch the shadow unless the arch
says that it is ready for the memory to be accessed. This is fairly
uninvasive and should have a negligible performance penalty.

This will only work in outline mode, so an arch must specify
HAVE_ARCH_NO_KASAN_INLINE if it requires this.

Cc: Balbir Singh <bsinghar...@gmail.com>
Cc: Aneesh Kumar K.V <aneesh.ku...@linux.vnet.ibm.com>
Signed-off-by: Christophe Leroy <christophe.le...@c-s.fr>
Signed-off-by: Daniel Axtens <d...@axtens.net>

--

I discuss the justfication for this later in the series. Also,
both previous RFCs for ppc64 - by 2 different people - have
needed this trick! See:
 - https://lore.kernel.org/patchwork/patch/592820/ # ppc64 hash series
 - https://patchwork.ozlabs.org/patch/795211/      # ppc radix series
---
 include/linux/kasan.h |  4 ++++
 mm/kasan/common.c     | 10 ++++++++++
 mm/kasan/generic.c    |  3 +++
 3 files changed, 17 insertions(+)

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 30d343b4a40a..3df66fdf6662 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -20,6 +20,10 @@ struct kunit_kasan_expectation {
        bool report_found;
 };
 
+#ifndef kasan_arch_is_ready
+static inline bool kasan_arch_is_ready(void)   { return true; }
+#endif
+
 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
 extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
 extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 950fd372a07e..ba7744d3e319 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -117,6 +117,9 @@ void kasan_poison_shadow(const void *address, size_t size, 
u8 value)
 {
        void *shadow_start, *shadow_end;
 
+       if (!kasan_arch_is_ready())
+               return;
+
        /*
         * Perform shadow offset calculation based on untagged address, as
         * some of the callers (e.g. kasan_poison_object_data) pass tagged
@@ -134,6 +137,9 @@ void kasan_unpoison_shadow(const void *address, size_t size)
 {
        u8 tag = get_tag(address);
 
+       if (!kasan_arch_is_ready())
+               return;
+
        /*
         * Perform shadow offset calculation based on untagged address, as
         * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
@@ -406,6 +412,10 @@ static bool __kasan_slab_free(struct kmem_cache *cache, 
void *object,
        if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
                return false;
 
+       /* We can't read the shadow byte if the arch isn't ready */
+       if (!kasan_arch_is_ready())
+               return false;
+
        shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
        if (shadow_invalid(tag, shadow_byte)) {
                kasan_report_invalid_free(tagged_object, ip);
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 248264b9cb76..e87404026b2b 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -169,6 +169,9 @@ static __always_inline bool 
check_memory_region_inline(unsigned long addr,
                                                size_t size, bool write,
                                                unsigned long ret_ip)
 {
+       if (!kasan_arch_is_ready())
+               return true;
+
        if (unlikely(size == 0))
                return true;
 
-- 
2.25.1

Reply via email to