Slab objects that are allocated with kmalloc_nolock() must be freed using kfree_nolock() because only a subset of alloc hooks are called, since kmalloc_nolock() can't spin on a lock during allocation.
This imposes a limitation: such objects cannot be freed with kfree_rcu(), forcing users to work around this limitation by calling call_rcu() with a callback that frees the object using kfree_nolock(). Remove this limitation by teaching kmemleak to gracefully ignore cases when kmemleak_free() is called without a prior kmemleak_alloc(). Unlike kmemleak, kfence already handles this case, because, due to its design, only a subset of allocations are served from kfence. With this change, kfree() and kfree_rcu() can be used to free objects that are allocated using kmalloc_nolock(). Suggested-by: Alexei Starovoitov <[email protected]> Signed-off-by: Harry Yoo <[email protected]> --- include/linux/rcupdate.h | 4 ++-- mm/kmemleak.c | 11 +++++------ mm/slub.c | 21 ++++++++++++++++++++- 3 files changed, 27 insertions(+), 9 deletions(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 8924edf7e8c1..db5053a7b0cb 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -1084,8 +1084,8 @@ static inline void rcu_read_unlock_migrate(void) * If this error is triggered, you can either fall back to use of call_rcu() * or rearrange the structure to position @rf into the first 4096 bytes. * - * The object to be freed can be allocated either by kmalloc() or - * kmem_cache_alloc(). + * The object to be freed can be allocated either by kmalloc(), + * kmalloc_nolock(), or kmem_cache_alloc(). * * Note that the allowable offset might decrease in the future. * diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 1ac56ceb29b6..de32db5c4f23 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -837,13 +837,12 @@ static void delete_object_full(unsigned long ptr, unsigned int objflags) struct kmemleak_object *object; object = find_and_remove_object(ptr, 0, objflags); - if (!object) { -#ifdef DEBUG - kmemleak_warn("Freeing unknown object at 0x%08lx\n", - ptr); -#endif + if (!object) + /* + * kmalloc_nolock() -> kfree() calls kmemleak_free() + * without kmemleak_alloc() + */ return; - } __delete_object(object); } diff --git a/mm/slub.c b/mm/slub.c index 102fb47ae013..a118ac009b61 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2581,6 +2581,24 @@ struct rcu_delayed_free { * Returns true if freeing of the object can proceed, false if its reuse * was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned * to KFENCE. + * + * For objects allocated via kmalloc_nolock(), only a subset of alloc hooks + * are invoked, so some free hooks must handle asymmetric hook calls. + * + * Alloc hooks called for kmalloc_nolock(): + * - kmsan_slab_alloc() + * - kasan_slab_alloc() + * - memcg_slab_post_alloc_hook() + * - alloc_tagging_slab_alloc_hook() + * + * Free hooks that must handle missing corresponding alloc hooks: + * - kmemleak_free_recursive() + * - kfence_free() + * + * Free hooks that have no alloc hook counterpart, and thus safe to call: + * - debug_check_no_locks_freed() + * - debug_check_no_obj_freed() + * - __kcsan_check_access() */ static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x, bool init, @@ -6365,7 +6383,7 @@ void kvfree_rcu_cb(struct rcu_head *head) /** * kfree - free previously allocated memory - * @object: pointer returned by kmalloc() or kmem_cache_alloc() + * @object: pointer returned by kmalloc(), kmalloc_nolock(), or kmem_cache_alloc() * * If @object is NULL, no operation is performed. */ @@ -6384,6 +6402,7 @@ void kfree(const void *object) page = virt_to_page(object); slab = page_slab(page); if (!slab) { + /* kmalloc_nolock() doesn't support large kmalloc */ free_large_kmalloc(page, (void *)object); return; } -- 2.43.0
