From: Alexander Potapenko <gli...@google.com>

Inserts KFENCE hooks into the SLUB allocator.

We note the addition of the 'orig_size' argument to slab_alloc*()
functions, to be able to pass the originally requested size to KFENCE.
When KFENCE is disabled, there is no additional overhead, since these
functions are __always_inline.

Co-developed-by: Marco Elver <el...@google.com>
Signed-off-by: Marco Elver <el...@google.com>
Signed-off-by: Alexander Potapenko <gli...@google.com>
---
 mm/slub.c | 72 ++++++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 53 insertions(+), 19 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index d4177aecedf6..5c5a13a7857c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -27,6 +27,7 @@
 #include <linux/ctype.h>
 #include <linux/debugobjects.h>
 #include <linux/kallsyms.h>
+#include <linux/kfence.h>
 #include <linux/memory.h>
 #include <linux/math64.h>
 #include <linux/fault-inject.h>
@@ -1557,6 +1558,11 @@ static inline bool slab_free_freelist_hook(struct 
kmem_cache *s,
        void *old_tail = *tail ? *tail : *head;
        int rsize;
 
+       if (is_kfence_address(next)) {
+               slab_free_hook(s, next);
+               return true;
+       }
+
        /* Head and tail of the reconstructed freelist */
        *head = NULL;
        *tail = NULL;
@@ -2660,7 +2666,8 @@ static inline void *get_freelist(struct kmem_cache *s, 
struct page *page)
  * already disabled (which is the case for bulk allocation).
  */
 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
-                         unsigned long addr, struct kmem_cache_cpu *c)
+                         unsigned long addr, struct kmem_cache_cpu *c,
+                         size_t orig_size)
 {
        void *freelist;
        struct page *page;
@@ -2763,7 +2770,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t 
gfpflags, int node,
  * cpu changes by refetching the per cpu area pointer.
  */
 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
-                         unsigned long addr, struct kmem_cache_cpu *c)
+                         unsigned long addr, struct kmem_cache_cpu *c,
+                         size_t orig_size)
 {
        void *p;
        unsigned long flags;
@@ -2778,7 +2786,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t 
gfpflags, int node,
        c = this_cpu_ptr(s->cpu_slab);
 #endif
 
-       p = ___slab_alloc(s, gfpflags, node, addr, c);
+       p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size);
        local_irq_restore(flags);
        return p;
 }
@@ -2805,7 +2813,7 @@ static __always_inline void maybe_wipe_obj_freeptr(struct 
kmem_cache *s,
  * Otherwise we can simply pick the next object from the lockless free list.
  */
 static __always_inline void *slab_alloc_node(struct kmem_cache *s,
-               gfp_t gfpflags, int node, unsigned long addr)
+               gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
 {
        void *object;
        struct kmem_cache_cpu *c;
@@ -2816,6 +2824,11 @@ static __always_inline void *slab_alloc_node(struct 
kmem_cache *s,
        s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags);
        if (!s)
                return NULL;
+
+       object = kfence_alloc(s, orig_size, gfpflags);
+       if (unlikely(object))
+               goto out;
+
 redo:
        /*
         * Must read kmem_cache cpu data via this cpu ptr. Preemption is
@@ -2853,7 +2866,7 @@ static __always_inline void *slab_alloc_node(struct 
kmem_cache *s,
        object = c->freelist;
        page = c->page;
        if (unlikely(!object || !node_match(page, node))) {
-               object = __slab_alloc(s, gfpflags, node, addr, c);
+               object = __slab_alloc(s, gfpflags, node, addr, c, orig_size);
                stat(s, ALLOC_SLOWPATH);
        } else {
                void *next_object = get_freepointer_safe(s, object);
@@ -2889,20 +2902,21 @@ static __always_inline void *slab_alloc_node(struct 
kmem_cache *s,
        if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
                memset(object, 0, s->object_size);
 
+out:
        slab_post_alloc_hook(s, objcg, gfpflags, 1, &object);
 
        return object;
 }
 
 static __always_inline void *slab_alloc(struct kmem_cache *s,
-               gfp_t gfpflags, unsigned long addr)
+               gfp_t gfpflags, unsigned long addr, size_t orig_size)
 {
-       return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
+       return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr, orig_size);
 }
 
 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
 {
-       void *ret = slab_alloc(s, gfpflags, _RET_IP_);
+       void *ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size);
 
        trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
                                s->size, gfpflags);
@@ -2914,7 +2928,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
 #ifdef CONFIG_TRACING
 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
 {
-       void *ret = slab_alloc(s, gfpflags, _RET_IP_);
+       void *ret = slab_alloc(s, gfpflags, _RET_IP_, size);
        trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
        ret = kasan_kmalloc(s, ret, size, gfpflags);
        return ret;
@@ -2925,7 +2939,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
 #ifdef CONFIG_NUMA
 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
 {
-       void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
+       void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, 
s->object_size);
 
        trace_kmem_cache_alloc_node(_RET_IP_, ret,
                                    s->object_size, s->size, gfpflags, node);
@@ -2939,7 +2953,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
                                    gfp_t gfpflags,
                                    int node, size_t size)
 {
-       void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
+       void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, size);
 
        trace_kmalloc_node(_RET_IP_, ret,
                           size, s->size, gfpflags, node);
@@ -2973,6 +2987,9 @@ static void __slab_free(struct kmem_cache *s, struct page 
*page,
 
        stat(s, FREE_SLOWPATH);
 
+       if (kfence_free(head))
+               return;
+
        if (kmem_cache_debug(s) &&
            !free_debug_processing(s, page, head, tail, cnt, addr))
                return;
@@ -3216,6 +3233,13 @@ int build_detached_freelist(struct kmem_cache *s, size_t 
size,
                df->s = cache_from_obj(s, object); /* Support for memcg */
        }
 
+       if (is_kfence_address(object)) {
+               slab_free_hook(df->s, object);
+               WARN_ON(!kfence_free(object));
+               p[size] = NULL; /* mark object processed */
+               return size;
+       }
+
        /* Start new detached freelist */
        df->page = page;
        set_freepointer(df->s, object, NULL);
@@ -3290,8 +3314,14 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t 
flags, size_t size,
        c = this_cpu_ptr(s->cpu_slab);
 
        for (i = 0; i < size; i++) {
-               void *object = c->freelist;
+               void *object = kfence_alloc(s, s->object_size, flags);
 
+               if (unlikely(object)) {
+                       p[i] = object;
+                       continue;
+               }
+
+               object = c->freelist;
                if (unlikely(!object)) {
                        /*
                         * We may have removed an object from c->freelist using
@@ -3307,7 +3337,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t 
flags, size_t size,
                         * of re-populating per CPU c->freelist
                         */
                        p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
-                                           _RET_IP_, c);
+                                           _RET_IP_, c, size);
                        if (unlikely(!p[i]))
                                goto error;
 
@@ -3962,7 +3992,7 @@ void *__kmalloc(size_t size, gfp_t flags)
        if (unlikely(ZERO_OR_NULL_PTR(s)))
                return s;
 
-       ret = slab_alloc(s, flags, _RET_IP_);
+       ret = slab_alloc(s, flags, _RET_IP_, size);
 
        trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
 
@@ -4010,7 +4040,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
        if (unlikely(ZERO_OR_NULL_PTR(s)))
                return s;
 
-       ret = slab_alloc_node(s, flags, node, _RET_IP_);
+       ret = slab_alloc_node(s, flags, node, _RET_IP_, size);
 
        trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
 
@@ -4036,6 +4066,7 @@ void __check_heap_object(const void *ptr, unsigned long 
n, struct page *page,
        struct kmem_cache *s;
        unsigned int offset;
        size_t object_size;
+       bool is_kfence = is_kfence_address(ptr);
 
        ptr = kasan_reset_tag(ptr);
 
@@ -4048,10 +4079,13 @@ void __check_heap_object(const void *ptr, unsigned long 
n, struct page *page,
                               to_user, 0, n);
 
        /* Find offset within object. */
-       offset = (ptr - page_address(page)) % s->size;
+       if (is_kfence)
+               offset = ptr - kfence_object_start(ptr);
+       else
+               offset = (ptr - page_address(page)) % s->size;
 
        /* Adjust for redzone and reject if within the redzone. */
-       if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
+       if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
                if (offset < s->red_left_pad)
                        usercopy_abort("SLUB object in left red zone",
                                       s->name, to_user, offset, n);
@@ -4460,7 +4494,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, 
unsigned long caller)
        if (unlikely(ZERO_OR_NULL_PTR(s)))
                return s;
 
-       ret = slab_alloc(s, gfpflags, caller);
+       ret = slab_alloc(s, gfpflags, caller, size);
 
        /* Honor the call site pointer we received. */
        trace_kmalloc(caller, ret, size, s->size, gfpflags);
@@ -4491,7 +4525,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t 
gfpflags,
        if (unlikely(ZERO_OR_NULL_PTR(s)))
                return s;
 
-       ret = slab_alloc_node(s, gfpflags, node, caller);
+       ret = slab_alloc_node(s, gfpflags, node, caller, size);
 
        /* Honor the call site pointer we received. */
        trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
-- 
2.28.0.526.ge36021eeef-goog

Reply via email to