On 9/9/25 12:20, Vlastimil Babka wrote:
> On 9/9/25 11:08, Uladzislau Rezki wrote:
>> On Mon, Sep 08, 2025 at 02:45:11PM +0200, Vlastimil Babka wrote:
>>> 
>>> Hm I could not find where that distinction is in the code, can you give a
>>> hint please. In __kfree_rcu_sheaf() I do only have a GFP_NOWAIT attempt.
>>> 
>> For PREEMPT_RT a regular spin-lock is an rt-mutex which can sleep. We
>> made kvfree_rcu() to make it possible to invoke it from non-sleep contexts:
> 
> Oh you mean it's not allocating even on !RT so there's no RT-specific code.
> 
>> CONFIG_PREEMPT_RT
>> 
>> preempt_disable() or something similar;
>>  kvfree_rcu();
>>   GFP_NOWAIT - lock rt-mutex
>> 
>> If GFP_NOWAIT semantic does not access any spin-locks then we are safe
>> or if it uses raw_spin_locks.
> 
> It does access spinlocks so it's not safe. Thanks, I didn't realize that
> aspect of kfree_rcu(). We'll need to solve this before making sheaves
> enabled everywhere. I don't think the vma or maple tree code would
> kfree_rcu() vma or maple_node in such a restricted context. But to be safe
> I'll just disable the kfree rcu sheaves for PREEMPT_RT for now.

So I came up with this fixup to avoid PREEMPT_RT troubles and make
kvfree_rcu_barrier() work.

----8<----
>From 15a8db2ef716b5db547f2d86ab30d8774333fb04 Mon Sep 17 00:00:00 2001
From: Vlastimil Babka <[email protected]>
Date: Tue, 9 Sep 2025 16:18:52 +0200
Subject: [PATCH] slub: fix issues with kfree_rcu sheaf handling

Fix two issues reported by Ulad:

- on PREEMPT_RT if kfree_rcu() comes from an atomic context, taking a
  spinlock on the barn or doing a GFP_NOWAIT allocation of a new sheaf
  might not be possible. For now just limit the usage of
  kfree_rcu_sheaf() to !PREEMPT_RT

- kvfree_rcu_barrier() must flush all rcu_free sheaves to deliver on its
  promise. The usage isn't limited to destroying of a single cache. Add
  flush_all_rcu_sheaves() to do that.

Reported-by: Uladzislau Rezki <[email protected]>
Signed-off-by: Vlastimil Babka <[email protected]>
---
 mm/slab.h        |  1 +
 mm/slab_common.c |  4 ++-
 mm/slub.c        | 74 ++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 78 insertions(+), 1 deletion(-)

diff --git a/mm/slab.h b/mm/slab.h
index f1866f2d9b21..e82e51c44bd0 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -436,6 +436,7 @@ static inline bool is_kmalloc_normal(struct kmem_cache *s)
 }
 
 bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj);
+void flush_all_rcu_sheaves(void);
 
 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
                         SLAB_CACHE_DMA32 | SLAB_PANIC | \
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 2d806e025685..005a4319c06a 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1973,7 +1973,7 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
        if (!head)
                might_sleep();
 
-       if (kfree_rcu_sheaf(ptr))
+       if (!IS_ENABLED(CONFIG_PREEMPT_RT) && kfree_rcu_sheaf(ptr))
                return;
 
        // Queue the object but don't yet schedule the batch.
@@ -2050,6 +2050,8 @@ void kvfree_rcu_barrier(void)
        bool queued;
        int i, cpu;
 
+       flush_all_rcu_sheaves();
+
        /*
         * Firstly we detach objects and queue them over an RCU-batch
         * for all CPUs. Finally queued works are flushed for each CPU.
diff --git a/mm/slub.c b/mm/slub.c
index 9f9b7e1fa356..19cd8444ae5d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3895,6 +3895,80 @@ static void flush_all(struct kmem_cache *s)
        cpus_read_unlock();
 }
 
+static void flush_rcu_sheaf(struct work_struct *w)
+{
+       struct slub_percpu_sheaves *pcs;
+       struct slab_sheaf *rcu_free;
+       struct slub_flush_work *sfw;
+       struct kmem_cache *s;
+
+       sfw = container_of(w, struct slub_flush_work, work);
+       s = sfw->s;
+
+       local_lock(&s->cpu_sheaves->lock);
+       pcs = this_cpu_ptr(s->cpu_sheaves);
+
+       rcu_free = pcs->rcu_free;
+       pcs->rcu_free = NULL;
+
+       local_unlock(&s->cpu_sheaves->lock);
+
+       if (rcu_free)
+               call_rcu(&rcu_free->rcu_head, rcu_free_sheaf_nobarn);
+}
+
+
+/* needed for kvfree_rcu_barrier() */
+void flush_all_rcu_sheaves()
+{
+       struct slub_percpu_sheaves *pcs;
+       struct slub_flush_work *sfw;
+       struct kmem_cache *s;
+       bool flushed = false;
+       unsigned int cpu;
+
+       cpus_read_lock();
+       mutex_lock(&slab_mutex);
+
+       list_for_each_entry(s, &slab_caches, list) {
+               if (!s->cpu_sheaves)
+                       continue;
+
+               mutex_lock(&flush_lock);
+
+               for_each_online_cpu(cpu) {
+                       sfw = &per_cpu(slub_flush, cpu);
+                       pcs = per_cpu_ptr(s->cpu_sheaves, cpu);
+
+                       if (!pcs->rcu_free || !pcs->rcu_free->size) {
+                               sfw->skip = true;
+                               continue;
+                       }
+
+                       INIT_WORK(&sfw->work, flush_rcu_sheaf);
+                       sfw->skip = false;
+                       sfw->s = s;
+                       queue_work_on(cpu, flushwq, &sfw->work);
+                       flushed = true;
+               }
+
+               for_each_online_cpu(cpu) {
+                       sfw = &per_cpu(slub_flush, cpu);
+                       if (sfw->skip)
+                               continue;
+                       flush_work(&sfw->work);
+               }
+
+               mutex_unlock(&flush_lock);
+       }
+
+       mutex_unlock(&slab_mutex);
+       cpus_read_unlock();
+
+       if (flushed)
+               rcu_barrier();
+}
+
 /*
  * Use the cpu notifier to insure that the cpu slabs are flushed when
  * necessary.
-- 
2.51.0



Reply via email to