The following commit has been merged into the core/rcu branch of tip:

Commit-ID:     952371d6fc0bc360d1d5780f86bb355836117ca2
Gitweb:        
https://git.kernel.org/tip/952371d6fc0bc360d1d5780f86bb355836117ca2
Author:        Uladzislau Rezki (Sony) <[email protected]>
AuthorDate:    Mon, 25 May 2020 23:47:50 +02:00
Committer:     Paul E. McKenney <[email protected]>
CommitterDate: Mon, 29 Jun 2020 11:59:25 -07:00

rcu/tree: Move kfree_rcu_cpu locking/unlocking to separate functions

Introduce helpers to lock and unlock per-cpu "kfree_rcu_cpu"
structures. That will make kfree_call_rcu() more readable
and prevent programming errors.

Reviewed-by: Joel Fernandes (Google) <[email protected]>
Signed-off-by: Uladzislau Rezki (Sony) <[email protected]>
Signed-off-by: Paul E. McKenney <[email protected]>
---
 kernel/rcu/tree.c | 31 +++++++++++++++++++++++--------
 1 file changed, 23 insertions(+), 8 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index bcdc063..368bdc4 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3035,6 +3035,27 @@ debug_rcu_bhead_unqueue(struct kfree_rcu_bulk_data 
*bhead)
 #endif
 }
 
+static inline struct kfree_rcu_cpu *
+krc_this_cpu_lock(unsigned long *flags)
+{
+       struct kfree_rcu_cpu *krcp;
+
+       local_irq_save(*flags); // For safely calling this_cpu_ptr().
+       krcp = this_cpu_ptr(&krc);
+       if (likely(krcp->initialized))
+               raw_spin_lock(&krcp->lock);
+
+       return krcp;
+}
+
+static inline void
+krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
+{
+       if (likely(krcp->initialized))
+               raw_spin_unlock(&krcp->lock);
+       local_irq_restore(flags);
+}
+
 /*
  * This function is invoked in workqueue context after a grace period.
  * It frees all the objects queued on ->bhead_free or ->head_free.
@@ -3260,11 +3281,7 @@ void kfree_call_rcu(struct rcu_head *head, 
rcu_callback_t func)
        struct kfree_rcu_cpu *krcp;
        void *ptr;
 
-       local_irq_save(flags);  // For safely calling this_cpu_ptr().
-       krcp = this_cpu_ptr(&krc);
-       if (krcp->initialized)
-               raw_spin_lock(&krcp->lock);
-
+       krcp = krc_this_cpu_lock(&flags);
        ptr = (void *)head - (unsigned long)func;
 
        // Queue the object but don't yet schedule the batch.
@@ -3295,9 +3312,7 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t 
func)
        }
 
 unlock_return:
-       if (krcp->initialized)
-               raw_spin_unlock(&krcp->lock);
-       local_irq_restore(flags);
+       krc_this_cpu_unlock(krcp, flags);
 }
 EXPORT_SYMBOL_GPL(kfree_call_rcu);
 

Reply via email to