During the offloading or de-offloading process, make sure to process
the callbacks batch locally whenever the segcblist isn't entirely
offloaded. This enforces callback service processing while we are still
in intermediate (de-)offloading state.

FIXME: Note that __call_rcu_core() isn't called during these intermediate
states. Some pieces there may still be necessary.

Inspired-by: Paul E. McKenney <paul...@kernel.org>
Signed-off-by: Frederic Weisbecker <frede...@kernel.org>
Cc: Paul E. McKenney <paul...@kernel.org>
Cc: Josh Triplett <j...@joshtriplett.org>
Cc: Steven Rostedt <rost...@goodmis.org>
Cc: Mathieu Desnoyers <mathieu.desnoy...@efficios.com>
Cc: Lai Jiangshan <jiangshan...@gmail.com>
Cc: Joel Fernandes <j...@joelfernandes.org>
Cc: Neeraj Upadhyay <neer...@codeaurora.org>
---
 kernel/rcu/rcu_segcblist.h | 12 ++++++++++++
 kernel/rcu/tree.c          |  3 ++-
 2 files changed, 14 insertions(+), 1 deletion(-)

diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h
index 00ebeb8d39b7..f7da3d535888 100644
--- a/kernel/rcu/rcu_segcblist.h
+++ b/kernel/rcu/rcu_segcblist.h
@@ -92,6 +92,18 @@ static inline bool rcu_segcblist_is_offloaded(struct 
rcu_segcblist *rsclp)
        return false;
 }
 
+static inline bool rcu_segcblist_completely_offloaded(struct rcu_segcblist 
*rsclp)
+{
+       int flags = SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | 
SEGCBLIST_OFFLOADED;
+
+       if (IS_ENABLED(CONFIG_RCU_NOCB_CPU)) {
+               if ((rsclp->flags & flags) == flags)
+                       return true;
+       }
+
+       return false;
+}
+
 /*
  * Are all segments following the specified segment of the specified
  * rcu_segcblist structure empty of callbacks?  (The specified
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 35834ce2d042..45fad6977bea 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2674,6 +2674,7 @@ static __latent_entropy void rcu_core(void)
        struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
        struct rcu_node *rnp = rdp->mynode;
        const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
+       const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
 
        if (cpu_is_offline(smp_processor_id()))
                return;
@@ -2703,7 +2704,7 @@ static __latent_entropy void rcu_core(void)
        rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
 
        /* If there are callbacks ready, invoke them. */
-       if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) &&
+       if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
            likely(READ_ONCE(rcu_scheduler_fully_active)))
                rcu_do_batch(rdp);
 
-- 
2.25.1

Reply via email to