Unify checks in kasan_kfree_large() and in kasan_slab_free_mempool()
for large allocations as it's done for small kfree() allocations.

With this change, kasan_slab_free_mempool() starts checking that the
first byte of the memory that's being freed is accessible.

Reviewed-by: Marco Elver <el...@google.com>
Signed-off-by: Andrey Konovalov <andreyk...@google.com>
---
 include/linux/kasan.h | 16 ++++++++--------
 mm/kasan/common.c     | 36 ++++++++++++++++++++++++++----------
 2 files changed, 34 insertions(+), 18 deletions(-)

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 2d5de4092185..d53ea3c047bc 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -200,6 +200,13 @@ static __always_inline bool kasan_slab_free(struct 
kmem_cache *s, void *object)
        return false;
 }
 
+void __kasan_kfree_large(void *ptr, unsigned long ip);
+static __always_inline void kasan_kfree_large(void *ptr)
+{
+       if (kasan_enabled())
+               __kasan_kfree_large(ptr, _RET_IP_);
+}
+
 void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
 static __always_inline void kasan_slab_free_mempool(void *ptr)
 {
@@ -247,13 +254,6 @@ static __always_inline void * __must_check 
kasan_krealloc(const void *object,
        return (void *)object;
 }
 
-void __kasan_kfree_large(void *ptr, unsigned long ip);
-static __always_inline void kasan_kfree_large(void *ptr)
-{
-       if (kasan_enabled())
-               __kasan_kfree_large(ptr, _RET_IP_);
-}
-
 /*
  * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
  * the hardware tag-based mode that doesn't rely on compiler instrumentation.
@@ -302,6 +302,7 @@ static inline bool kasan_slab_free(struct kmem_cache *s, 
void *object)
 {
        return false;
 }
+static inline void kasan_kfree_large(void *ptr) {}
 static inline void kasan_slab_free_mempool(void *ptr) {}
 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
                                   gfp_t flags)
@@ -322,7 +323,6 @@ static inline void *kasan_krealloc(const void *object, 
size_t new_size,
 {
        return (void *)object;
 }
-static inline void kasan_kfree_large(void *ptr) {}
 static inline bool kasan_check_byte(const void *address)
 {
        return true;
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index da24b144d46c..7ea643f7e69c 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -364,6 +364,31 @@ bool __kasan_slab_free(struct kmem_cache *cache, void 
*object, unsigned long ip)
        return ____kasan_slab_free(cache, object, ip, true);
 }
 
+static bool ____kasan_kfree_large(void *ptr, unsigned long ip)
+{
+       if (ptr != page_address(virt_to_head_page(ptr))) {
+               kasan_report_invalid_free(ptr, ip);
+               return true;
+       }
+
+       if (!kasan_byte_accessible(ptr)) {
+               kasan_report_invalid_free(ptr, ip);
+               return true;
+       }
+
+       /*
+        * The object will be poisoned by kasan_free_pages() or
+        * kasan_slab_free_mempool().
+        */
+
+       return false;
+}
+
+void __kasan_kfree_large(void *ptr, unsigned long ip)
+{
+       ____kasan_kfree_large(ptr, ip);
+}
+
 void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
 {
        struct page *page;
@@ -377,10 +402,8 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
         * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
         */
        if (unlikely(!PageSlab(page))) {
-               if (ptr != page_address(page)) {
-                       kasan_report_invalid_free(ptr, ip);
+               if (____kasan_kfree_large(ptr, ip))
                        return;
-               }
                kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE);
        } else {
                ____kasan_slab_free(page->slab_cache, ptr, ip, false);
@@ -539,13 +562,6 @@ void * __must_check __kasan_krealloc(const void *object, 
size_t size, gfp_t flag
                return ____kasan_kmalloc(page->slab_cache, object, size, flags);
 }
 
-void __kasan_kfree_large(void *ptr, unsigned long ip)
-{
-       if (ptr != page_address(virt_to_head_page(ptr)))
-               kasan_report_invalid_free(ptr, ip);
-       /* The object will be poisoned by kasan_free_pages(). */
-}
-
 bool __kasan_check_byte(const void *address, unsigned long ip)
 {
        if (!kasan_byte_accessible(address)) {
-- 
2.30.0.365.g02bc693789-goog

Reply via email to