From: Frederic Weisbecker <frede...@kernel.org>

RCU core can't be running anymore while in the middle of (de-)offloading
since this sort of transition now only applies to offline CPUs.

The locked callback acceleration handling during the transition can
therefore be removed, along with concurrent batch execution.

Signed-off-by: Frederic Weisbecker <frede...@kernel.org>
Signed-off-by: Paul E. McKenney <paul...@kernel.org>
Reviewed-by: Paul E. McKenney <paul...@kernel.org>
Signed-off-by: Neeraj Upadhyay <neeraj.upadh...@kernel.org>
---
 kernel/rcu/tree.c | 26 ++++----------------------
 1 file changed, 4 insertions(+), 22 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 60f271f5c079..1a272c678533 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2781,24 +2781,6 @@ static __latent_entropy void rcu_core(void)
        unsigned long flags;
        struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
        struct rcu_node *rnp = rdp->mynode;
-       /*
-        * On RT rcu_core() can be preempted when IRQs aren't disabled.
-        * Therefore this function can race with concurrent NOCB (de-)offloading
-        * on this CPU and the below condition must be considered volatile.
-        * However if we race with:
-        *
-        * _ Offloading:   In the worst case we accelerate or process callbacks
-        *                 concurrently with NOCB kthreads. We are guaranteed to
-        *                 call rcu_nocb_lock() if that happens.
-        *
-        * _ Deoffloading: In the worst case we miss callbacks acceleration or
-        *                 processing. This is fine because the early stage
-        *                 of deoffloading invokes rcu_core() after setting
-        *                 SEGCBLIST_RCU_CORE. So we guarantee that we'll 
process
-        *                 what could have been dismissed without the need to 
wait
-        *                 for the next rcu_pending() check in the next jiffy.
-        */
-       const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
 
        if (cpu_is_offline(smp_processor_id()))
                return;
@@ -2818,17 +2800,17 @@ static __latent_entropy void rcu_core(void)
 
        /* No grace period and unregistered callbacks? */
        if (!rcu_gp_in_progress() &&
-           rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) {
-               rcu_nocb_lock_irqsave(rdp, flags);
+           rcu_segcblist_is_enabled(&rdp->cblist) && 
!rcu_rdp_is_offloaded(rdp)) {
+               local_irq_save(flags);
                if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
                        rcu_accelerate_cbs_unlocked(rnp, rdp);
-               rcu_nocb_unlock_irqrestore(rdp, flags);
+               local_irq_restore(flags);
        }
 
        rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
 
        /* If there are callbacks ready, invoke them. */
-       if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
+       if (!rcu_rdp_is_offloaded(rdp) && rcu_segcblist_ready_cbs(&rdp->cblist) 
&&
            likely(READ_ONCE(rcu_scheduler_fully_active))) {
                rcu_do_batch(rdp);
                /* Re-invoke RCU core processing if there are callbacks 
remaining. */
-- 
2.40.1


Reply via email to