On Fri, Nov 28, 2025 at 08:37:40PM +0900, Harry Yoo wrote: > Currently, kvfree_rcu_barrier() flushes RCU sheaves across all slab > caches when a cache is destroyed. This is unnecessary when destroying > a slab cache; only the RCU sheaves belonging to the cache being destroyed > need to be flushed. > > As suggested by Vlastimil Babka, introduce a weaker form of > kvfree_rcu_barrier() that operates on a specific slab cache and call it > on cache destruction. > > The performance benefit is evaluated on a 12 core 24 threads AMD Ryzen > 5900X machine (1 socket), by loading slub_kunit module. > > Before: > Total calls: 19 > Average latency (us): 8529 > Total time (us): 162069 > > After: > Total calls: 19 > Average latency (us): 3804 > Total time (us): 72287
Ooh, I just realized that I messed up the config and have only two cores enabled. Will update the numbers after enabling 22 more :) > Link: > https://lore.kernel.org/linux-mm/[email protected] > Link: > https://lore.kernel.org/linux-mm/[email protected] > Link: > https://lore.kernel.org/linux-mm/[email protected] > Suggested-by: Vlastimil Babka <[email protected]> > Signed-off-by: Harry Yoo <[email protected]> > --- > > Not sure if the regression is worse on the reporters' machines due to > higher core count (or because some cores were busy doing other things, > dunno). > > Hopefully this will reduce the time to complete tests, > and Suren could add his patch on top of this ;) > > include/linux/slab.h | 5 ++++ > mm/slab.h | 1 + > mm/slab_common.c | 52 +++++++++++++++++++++++++++++------------ > mm/slub.c | 55 ++++++++++++++++++++++++-------------------- > 4 files changed, 73 insertions(+), 40 deletions(-) > > diff --git a/include/linux/slab.h b/include/linux/slab.h > index cf443f064a66..937c93d44e8c 100644 > --- a/include/linux/slab.h > +++ b/include/linux/slab.h > @@ -1149,6 +1149,10 @@ static inline void kvfree_rcu_barrier(void) > { > rcu_barrier(); > } > +static inline void kvfree_rcu_barrier_on_cache(struct kmem_cache *s) > +{ > + rcu_barrier(); > +} > > static inline void kfree_rcu_scheduler_running(void) { } > #else > @@ -1156,6 +1160,7 @@ void kvfree_rcu_barrier(void); > > void kfree_rcu_scheduler_running(void); > #endif > +void kvfree_rcu_barrier_on_cache(struct kmem_cache *s); > > /** > * kmalloc_size_roundup - Report allocation bucket size for the given size > diff --git a/mm/slab.h b/mm/slab.h > index f730e012553c..e767aa7e91b0 100644 > --- a/mm/slab.h > +++ b/mm/slab.h > @@ -422,6 +422,7 @@ static inline bool is_kmalloc_normal(struct kmem_cache *s) > > bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj); > void flush_all_rcu_sheaves(void); > +void flush_rcu_sheaves_on_cache(struct kmem_cache *s); > > #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ > SLAB_CACHE_DMA32 | SLAB_PANIC | \ > diff --git a/mm/slab_common.c b/mm/slab_common.c > index 84dfff4f7b1f..dd8a49d6f9cc 100644 > --- a/mm/slab_common.c > +++ b/mm/slab_common.c > @@ -492,7 +492,7 @@ void kmem_cache_destroy(struct kmem_cache *s) > return; > > /* in-flight kfree_rcu()'s may include objects from our cache */ > - kvfree_rcu_barrier(); > + kvfree_rcu_barrier_on_cache(s); > > if (IS_ENABLED(CONFIG_SLUB_RCU_DEBUG) && > (s->flags & SLAB_TYPESAFE_BY_RCU)) { > @@ -2038,25 +2038,13 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr) > } > EXPORT_SYMBOL_GPL(kvfree_call_rcu); > > -/** > - * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete. > - * > - * Note that a single argument of kvfree_rcu() call has a slow path that > - * triggers synchronize_rcu() following by freeing a pointer. It is done > - * before the return from the function. Therefore for any single-argument > - * call that will result in a kfree() to a cache that is to be destroyed > - * during module exit, it is developer's responsibility to ensure that all > - * such calls have returned before the call to kmem_cache_destroy(). > - */ > -void kvfree_rcu_barrier(void) > +static inline void __kvfree_rcu_barrier(void) > { > struct kfree_rcu_cpu_work *krwp; > struct kfree_rcu_cpu *krcp; > bool queued; > int i, cpu; > > - flush_all_rcu_sheaves(); > - > /* > * Firstly we detach objects and queue them over an RCU-batch > * for all CPUs. Finally queued works are flushed for each CPU. > @@ -2118,8 +2106,43 @@ void kvfree_rcu_barrier(void) > } > } > } > + > +/** > + * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete. > + * > + * Note that a single argument of kvfree_rcu() call has a slow path that > + * triggers synchronize_rcu() following by freeing a pointer. It is done > + * before the return from the function. Therefore for any single-argument > + * call that will result in a kfree() to a cache that is to be destroyed > + * during module exit, it is developer's responsibility to ensure that all > + * such calls have returned before the call to kmem_cache_destroy(). > + */ > +void kvfree_rcu_barrier(void) > +{ > + flush_all_rcu_sheaves(); > + __kvfree_rcu_barrier(); > +} > EXPORT_SYMBOL_GPL(kvfree_rcu_barrier); > > +/** > + * kvfree_rcu_barrier_on_cache - Wait for in-flight kvfree_rcu() calls on a > + * specific slab cache. > + * @s: slab cache to wait for > + * > + * See the description of kvfree_rcu_barrier() for details. > + */ > +void kvfree_rcu_barrier_on_cache(struct kmem_cache *s) > +{ > + if (s->cpu_sheaves) > + flush_rcu_sheaves_on_cache(s); > + /* > + * TODO: Introduce a version of __kvfree_rcu_barrier() that works > + * on a specific slab cache. > + */ > + __kvfree_rcu_barrier(); > +} > +EXPORT_SYMBOL_GPL(kvfree_rcu_barrier_on_cache); > + > static unsigned long > kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) > { > @@ -2215,4 +2238,3 @@ void __init kvfree_rcu_init(void) > } > > #endif /* CONFIG_KVFREE_RCU_BATCHED */ > - > diff --git a/mm/slub.c b/mm/slub.c > index 785e25a14999..7cec2220712b 100644 > --- a/mm/slub.c > +++ b/mm/slub.c > @@ -4118,42 +4118,47 @@ static void flush_rcu_sheaf(struct work_struct *w) > > > /* needed for kvfree_rcu_barrier() */ > -void flush_all_rcu_sheaves(void) > +void flush_rcu_sheaves_on_cache(struct kmem_cache *s) > { > struct slub_flush_work *sfw; > - struct kmem_cache *s; > unsigned int cpu; > > - cpus_read_lock(); > - mutex_lock(&slab_mutex); > + mutex_lock(&flush_lock); > > - list_for_each_entry(s, &slab_caches, list) { > - if (!s->cpu_sheaves) > - continue; > + for_each_online_cpu(cpu) { > + sfw = &per_cpu(slub_flush, cpu); > > - mutex_lock(&flush_lock); > + /* > + * we don't check if rcu_free sheaf exists - racing > + * __kfree_rcu_sheaf() might have just removed it. > + * by executing flush_rcu_sheaf() on the cpu we make > + * sure the __kfree_rcu_sheaf() finished its call_rcu() > + */ > > - for_each_online_cpu(cpu) { > - sfw = &per_cpu(slub_flush, cpu); > + INIT_WORK(&sfw->work, flush_rcu_sheaf); > + sfw->s = s; > + queue_work_on(cpu, flushwq, &sfw->work); > + } > > - /* > - * we don't check if rcu_free sheaf exists - racing > - * __kfree_rcu_sheaf() might have just removed it. > - * by executing flush_rcu_sheaf() on the cpu we make > - * sure the __kfree_rcu_sheaf() finished its call_rcu() > - */ > + for_each_online_cpu(cpu) { > + sfw = &per_cpu(slub_flush, cpu); > + flush_work(&sfw->work); > + } > > - INIT_WORK(&sfw->work, flush_rcu_sheaf); > - sfw->s = s; > - queue_work_on(cpu, flushwq, &sfw->work); > - } > + mutex_unlock(&flush_lock); > +} > > - for_each_online_cpu(cpu) { > - sfw = &per_cpu(slub_flush, cpu); > - flush_work(&sfw->work); > - } > +void flush_all_rcu_sheaves(void) > +{ > + struct kmem_cache *s; > + > + cpus_read_lock(); > + mutex_lock(&slab_mutex); > > - mutex_unlock(&flush_lock); > + list_for_each_entry(s, &slab_caches, list) { > + if (!s->cpu_sheaves) > + continue; > + flush_rcu_sheaves_on_cache(s); > } > > mutex_unlock(&slab_mutex); > -- > 2.43.0 > -- Cheers, Harry / Hyeonggon

