For a single argument use a polled API to see if a GP is
already passed. This allows to bypass an extra GP request
in a slow path.

Allocating a page or dynamic rcu_head might take some and
still fail, in that scenario a GP which is requested on entry
of kvfree_call_rcu() can be elapsed. Benefit from this.

Signed-off-by: Uladzislau Rezki (Sony) <ure...@gmail.com>
---
 kernel/rcu/tree.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index e641cc681901..182772494cb0 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3789,6 +3789,7 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
 void kvfree_call_rcu(struct rcu_head *head, void *ptr)
 {
        unsigned long flags;
+       struct rcu_gp_oldstate old_snap;
        struct kfree_rcu_cpu *krcp;
        bool success;
 
@@ -3799,8 +3800,10 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
         * only. For other places please embed an rcu_head to
         * your data.
         */
-       if (!head)
+       if (!head) {
                might_sleep();
+               start_poll_synchronize_rcu_full(&old_snap);
+       }
 
        // Queue the object but don't yet schedule the batch.
        if (debug_rcu_head_queue(ptr)) {
@@ -3853,7 +3856,7 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
         */
        if (!success) {
                debug_rcu_head_unqueue((struct rcu_head *) ptr);
-               synchronize_rcu();
+               cond_synchronize_rcu_full(&old_snap);
                kvfree(ptr);
        }
 }
-- 
2.39.2


Reply via email to