Add a support of dynamically attaching an rcu_head to an object
which gets freed via the single argument of kvfree_rcu(). This is
used in the path, when a page allocation fails due to a high memory
pressure.

The basic idea behind of this is to minimize a hit of slow path
which requires a caller to wait until a grace period is passed.

Signed-off-by: Uladzislau Rezki (Sony) <ure...@gmail.com>
---
 kernel/rcu/tree.c | 53 +++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 49 insertions(+), 4 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index be00aac5f4e7..0124411fecfb 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3425,6 +3425,11 @@ kvfree_rcu_bulk(struct kfree_rcu_cpu *krcp,
        cond_resched_tasks_rcu_qs();
 }
 
+struct dyn_rcu_head {
+       unsigned long *ptr;
+       struct rcu_head rh;
+};
+
 static void
 kvfree_rcu_list(struct rcu_head *head)
 {
@@ -3433,15 +3438,32 @@ kvfree_rcu_list(struct rcu_head *head)
        for (; head; head = next) {
                void *ptr = (void *) head->func;
                unsigned long offset = (void *) head - ptr;
+               struct dyn_rcu_head *drhp = NULL;
+
+               /*
+                * For dynamically attached rcu_head, a ->func field
+                * points to _offset_, i.e. not to a pointer which has
+                * to be freed. For such objects, adjust an offset and
+                * pointer.
+                */
+               if (__is_kvfree_rcu_offset((unsigned long) ptr)) {
+                       drhp = container_of(head, struct dyn_rcu_head, rh);
+                       offset = (unsigned long) drhp->rh.func;
+                       ptr = drhp->ptr;
+               }
 
                next = head->next;
                debug_rcu_head_unqueue((struct rcu_head *)ptr);
                rcu_lock_acquire(&rcu_callback_map);
                trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
 
-               if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
+               if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset))) {
                        kvfree(ptr);
 
+                       if (drhp)
+                               kvfree(drhp);
+               }
+
                rcu_lock_release(&rcu_callback_map);
                cond_resched_tasks_rcu_qs();
        }
@@ -3787,6 +3809,21 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
        return true;
 }
 
+static struct rcu_head *
+attach_rcu_head_to_object(void *obj)
+{
+       struct dyn_rcu_head *rhp;
+
+       rhp = kmalloc(sizeof(struct dyn_rcu_head), GFP_KERNEL |
+               __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+
+       if (!rhp)
+               return NULL;
+
+       rhp->ptr = obj;
+       return &rhp->rh;
+}
+
 /*
  * Queue a request for lazy invocation of the appropriate free routine
  * after a grace period.  Please note that three paths are maintained,
@@ -3830,9 +3867,17 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
        if (!success) {
                run_page_cache_worker(krcp);
 
-               if (head == NULL)
-                       // Inline if kvfree_rcu(one_arg) call.
-                       goto unlock_return;
+               if (!head) {
+                       krc_this_cpu_unlock(krcp, flags);
+                       head = attach_rcu_head_to_object(ptr);
+                       krcp = krc_this_cpu_lock(&flags);
+
+                       if (!head)
+                               // Inline if kvfree_rcu(one_arg) call.
+                               goto unlock_return;
+
+                       ptr = (rcu_callback_t) offsetof(struct dyn_rcu_head, 
rh);
+               }
 
                head->func = ptr;
                head->next = krcp->head;
-- 
2.39.2


Reply via email to