The following commit has been merged into the core/rcu branch of tip:

Commit-ID:     148e3731d124079a036b3acf780f3d35c1b9c0aa
Gitweb:        
https://git.kernel.org/tip/148e3731d124079a036b3acf780f3d35c1b9c0aa
Author:        Uladzislau Rezki (Sony) <ure...@gmail.com>
AuthorDate:    Wed, 20 Jan 2021 17:21:46 +01:00
Committer:     Paul E. McKenney <paul...@kernel.org>
CommitterDate: Mon, 08 Mar 2021 14:18:07 -08:00

kvfree_rcu: Directly allocate page for single-argument case

Single-argument kvfree_rcu() must be invoked from sleepable contexts,
so we can directly allocate pages.  Furthermmore, the fallback in case
of page-allocation failure is the high-latency synchronize_rcu(), so it
makes sense to do these page allocations from the fastpath, and even to
permit limited sleeping within the allocator.

This commit therefore allocates if needed on the fastpath using
GFP_KERNEL|__GFP_RETRY_MAYFAIL.  This also has the beneficial effect
of leaving kvfree_rcu()'s per-CPU caches to the double-argument variant
of kvfree_rcu(), given that the double-argument variant cannot directly
invoke the allocator.

[ paulmck: Add add_ptr_to_bulk_krc_lock header comment per Michal Hocko. ]
Signed-off-by: Uladzislau Rezki (Sony) <ure...@gmail.com>
Signed-off-by: Paul E. McKenney <paul...@kernel.org>
---
 kernel/rcu/tree.c | 42 ++++++++++++++++++++++++++----------------
 1 file changed, 26 insertions(+), 16 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index da6f521..1f8c980 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3493,37 +3493,50 @@ run_page_cache_worker(struct kfree_rcu_cpu *krcp)
        }
 }
 
+// Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
+// state specified by flags.  If can_alloc is true, the caller must
+// be schedulable and not be holding any locks or mutexes that might be
+// acquired by the memory allocator or anything that it might invoke.
+// Returns true if ptr was successfully recorded, else the caller must
+// use a fallback.
 static inline bool
-kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr)
+add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
+       unsigned long *flags, void *ptr, bool can_alloc)
 {
        struct kvfree_rcu_bulk_data *bnode;
        int idx;
 
-       if (unlikely(!krcp->initialized))
+       *krcp = krc_this_cpu_lock(flags);
+       if (unlikely(!(*krcp)->initialized))
                return false;
 
-       lockdep_assert_held(&krcp->lock);
        idx = !!is_vmalloc_addr(ptr);
 
        /* Check if a new block is required. */
-       if (!krcp->bkvhead[idx] ||
-                       krcp->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) 
{
-               bnode = get_cached_bnode(krcp);
-               /* Switch to emergency path. */
+       if (!(*krcp)->bkvhead[idx] ||
+                       (*krcp)->bkvhead[idx]->nr_records == 
KVFREE_BULK_MAX_ENTR) {
+               bnode = get_cached_bnode(*krcp);
+               if (!bnode && can_alloc) {
+                       krc_this_cpu_unlock(*krcp, *flags);
+                       bnode = (struct kvfree_rcu_bulk_data *)
+                               __get_free_page(GFP_KERNEL | 
__GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+                       *krcp = krc_this_cpu_lock(flags);
+               }
+
                if (!bnode)
                        return false;
 
                /* Initialize the new block. */
                bnode->nr_records = 0;
-               bnode->next = krcp->bkvhead[idx];
+               bnode->next = (*krcp)->bkvhead[idx];
 
                /* Attach it to the head. */
-               krcp->bkvhead[idx] = bnode;
+               (*krcp)->bkvhead[idx] = bnode;
        }
 
        /* Finally insert. */
-       krcp->bkvhead[idx]->records
-               [krcp->bkvhead[idx]->nr_records++] = ptr;
+       (*krcp)->bkvhead[idx]->records
+               [(*krcp)->bkvhead[idx]->nr_records++] = ptr;
 
        return true;
 }
@@ -3561,8 +3574,6 @@ void kvfree_call_rcu(struct rcu_head *head, 
rcu_callback_t func)
                ptr = (unsigned long *) func;
        }
 
-       krcp = krc_this_cpu_lock(&flags);
-
        // Queue the object but don't yet schedule the batch.
        if (debug_rcu_head_queue(ptr)) {
                // Probable double kfree_rcu(), just leak.
@@ -3570,12 +3581,11 @@ void kvfree_call_rcu(struct rcu_head *head, 
rcu_callback_t func)
                          __func__, head);
 
                // Mark as success and leave.
-               success = true;
-               goto unlock_return;
+               return;
        }
 
        kasan_record_aux_stack(ptr);
-       success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr);
+       success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
        if (!success) {
                run_page_cache_worker(krcp);
 

Reply via email to