This is essentially the reverse operation of de-offloading. For now it's
only supported on CPUs that used to be offloaded and therefore still have
the relevant nocb_cb/nocb_gp kthreads around.

Inspired-by: Paul E. McKenney <paul...@kernel.org>
Signed-off-by: Frederic Weisbecker <frede...@kernel.org>
Cc: Paul E. McKenney <paul...@kernel.org>
Cc: Josh Triplett <j...@joshtriplett.org>
Cc: Steven Rostedt <rost...@goodmis.org>
Cc: Mathieu Desnoyers <mathieu.desnoy...@efficios.com>
Cc: Lai Jiangshan <jiangshan...@gmail.com>
Cc: Joel Fernandes <j...@joelfernandes.org>
---
 include/linux/rcupdate.h |  2 ++
 kernel/rcu/tree_plugin.h | 44 ++++++++++++++++++++++++++++++++++++++++
 2 files changed, 46 insertions(+)

diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 1d3a4c37c3c1..ee95e49d675f 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -96,9 +96,11 @@ static inline void rcu_user_exit(void) { }
 
 #ifdef CONFIG_RCU_NOCB_CPU
 void rcu_init_nohz(void);
+void rcu_nocb_cpu_offload(int cpu);
 void rcu_nocb_cpu_deoffload(int cpu);
 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
 static inline void rcu_init_nohz(void) { }
+static inline void rcu_nocb_cpu_offload(int cpu) { }
 static inline void rcu_nocb_cpu_deoffload(int cpu) { }
 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
 
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index c74a4df8d5f2..ae4b5e9f2fc5 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2224,6 +2224,50 @@ void rcu_nocb_cpu_deoffload(int cpu)
        mutex_unlock(&rcu_state.barrier_mutex);
 }
 
+static void __rcu_nocb_rdp_offload(struct rcu_data *rdp)
+{
+       unsigned long flags;
+       struct rcu_node *rnp = rdp->mynode;
+
+       printk("Offloading %d\n", rdp->cpu);
+
+       raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
+       raw_spin_lock_rcu_node(rnp);
+       rcu_segcblist_offload(&rdp->cblist, true);
+       raw_spin_unlock_rcu_node(rnp);
+       raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
+
+       kthread_unpark(rdp->nocb_cb_kthread);
+}
+
+static long rcu_nocb_rdp_offload(void *arg)
+{
+       struct rcu_data *rdp = arg;
+
+       WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
+       __rcu_nocb_rdp_offload(rdp);
+
+       return 0;
+}
+
+void rcu_nocb_cpu_offload(int cpu)
+{
+       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+
+       mutex_lock(&rcu_state.barrier_mutex);
+       cpus_read_lock();
+       if (!rcu_segcblist_is_offloaded(&rdp->cblist) && rdp->nocb_cb_kthread) {
+               if (cpu_online(cpu)) {
+                       work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
+               } else {
+                       __rcu_nocb_rdp_offload(rdp);
+               }
+               cpumask_set_cpu(cpu, rcu_nocb_mask);
+       }
+       cpus_read_unlock();
+       mutex_unlock(&rcu_state.barrier_mutex);
+}
+
 void __init rcu_init_nohz(void)
 {
        int cpu;
-- 
2.25.0

Reply via email to