Context:
The RCU Non-Callback (NOCB) infrastructure traditionally requires
boot-time parameters (e.g., rcu_nocbs) to allocate masks and spawn
management kthreads (rcuog/rcuo). This prevents systems from activating
offloading on-demand without a reboot.
Problem:
Dynamic Housekeeping & Enhanced Isolation (DHEI) requires CPUs to
transition to NOCB mode at runtime. Without boot-time setup, the
NOCB masks are unallocated, and critical kthreads are missing,
preventing effective tick suppression and isolation.
Solution:
Refactor RCU initialization to support dynamic on-demand setup.
- Introduce rcu_init_nocb_dynamic() to allocate masks and organize
kthreads if the system wasn't initially configured for NOCB.
- Update rcu_housekeeping_reconfigure() to iterate over CPUs and
perform safe offload/deoffload transitions via hotplug sequences
(cpu_down -> offload -> cpu_up).
- Remove __init from rcu_organize_nocb_kthreads to allow runtime
reconfiguration of the callback management hierarchy.
This enables a true "Zero-Conf" isolation experience where any CPU
can be fully isolated at runtime regardless of boot parameters.
---
kernel/rcu/rcu.h | 4 +++
kernel/rcu/tree.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++
kernel/rcu/tree.h | 2 +-
kernel/rcu/tree_nocb.h | 27 ++++++++++++------
4 files changed, 99 insertions(+), 10 deletions(-)
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 9cf01832a6c3d..fa9de9a3918b1 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -658,8 +658,12 @@ unsigned long srcu_batches_completed(struct srcu_struct
*sp);
#endif // #else // #ifdef CONFIG_TINY_SRCU
#ifdef CONFIG_RCU_NOCB_CPU
+void rcu_init_nocb_dynamic(void);
+void rcu_spawn_cpu_nocb_kthread(int cpu);
void rcu_bind_current_to_nocb(void);
#else
+static inline void rcu_init_nocb_dynamic(void) { }
+static inline void rcu_spawn_cpu_nocb_kthread(int cpu) { }
static inline void rcu_bind_current_to_nocb(void) { }
#endif
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 293bbd9ac3f4e..3fd12ac20957f 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -48,6 +48,7 @@
#include <linux/delay.h>
#include <linux/random.h>
#include <linux/trace_events.h>
+#include <linux/sched/isolation.h>
#include <linux/suspend.h>
#include <linux/ftrace.h>
#include <linux/tick.h>
@@ -4916,4 +4917,79 @@ void __init rcu_init(void)
#include "tree_stall.h"
#include "tree_exp.h"
#include "tree_nocb.h"
+
+#ifdef CONFIG_SMP
+static int rcu_housekeeping_reconfigure(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct housekeeping_update *upd = data;
+ struct task_struct *t;
+ int cpu;
+
+ if (action != HK_UPDATE_MASK || upd->type != HK_TYPE_RCU)
+ return NOTIFY_OK;
+
+ rcu_init_nocb_dynamic();
+
+ for_each_possible_cpu(cpu) {
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+ bool isolated = !cpumask_test_cpu(cpu, upd->new_mask);
+ bool offloaded = rcu_rdp_is_offloaded(rdp);
+
+ if (isolated && !offloaded) {
+ /* Transition to NOCB */
+ pr_info("rcu: CPU %d transitioning to NOCB mode\n",
cpu);
+ if (cpu_online(cpu)) {
+ remove_cpu(cpu);
+ rcu_spawn_cpu_nocb_kthread(cpu);
+ rcu_nocb_cpu_offload(cpu);
+ add_cpu(cpu);
+ } else {
+ rcu_spawn_cpu_nocb_kthread(cpu);
+ rcu_nocb_cpu_offload(cpu);
+ }
+ } else if (!isolated && offloaded) {
+ /* Transition to CB */
+ pr_info("rcu: CPU %d transitioning to CB mode\n", cpu);
+ if (cpu_online(cpu)) {
+ remove_cpu(cpu);
+ rcu_nocb_cpu_deoffload(cpu);
+ add_cpu(cpu);
+ } else {
+ rcu_nocb_cpu_deoffload(cpu);
+ }
+ }
+ }
+
+ t = READ_ONCE(rcu_state.gp_kthread);
+ if (t)
+ housekeeping_affine(t, HK_TYPE_RCU);
+
+#ifdef CONFIG_TASKS_RCU
+ t = get_rcu_tasks_gp_kthread();
+ if (t)
+ housekeeping_affine(t, HK_TYPE_RCU);
+#endif
+
+#ifdef CONFIG_TASKS_RUDE_RCU
+ t = get_rcu_tasks_rude_gp_kthread();
+ if (t)
+ housekeeping_affine(t, HK_TYPE_RCU);
+#endif
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block rcu_housekeeping_nb = {
+ .notifier_call = rcu_housekeeping_reconfigure,
+};
+
+static int __init rcu_init_housekeeping_notifier(void)
+{
+ housekeeping_register_notifier(&rcu_housekeeping_nb);
+ return 0;
+}
+late_initcall(rcu_init_housekeeping_notifier);
+#endif
+
#include "tree_plugin.h"
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index b8bbe7960cda7..5322656a5a359 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -518,7 +518,7 @@ static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
unsigned long flags);
static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp);
#ifdef CONFIG_RCU_NOCB_CPU
-static void __init rcu_organize_nocb_kthreads(void);
+static void rcu_organize_nocb_kthreads(void);
/*
* Disable IRQs before checking offloaded state so that local
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index e6cd56603cad4..9f5f446e70b3f 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -1285,6 +1285,22 @@ lazy_rcu_shrink_scan(struct shrinker *shrink, struct
shrink_control *sc)
}
#endif // #ifdef CONFIG_RCU_LAZY
+void rcu_init_nocb_dynamic(void)
+{
+ if (rcu_state.nocb_is_setup)
+ return;
+
+ if (!cpumask_available(rcu_nocb_mask)) {
+ if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
+ pr_info("rcu_nocb_mask allocation failed, dynamic
offloading disabled.\n");
+ return;
+ }
+ }
+
+ rcu_state.nocb_is_setup = true;
+ rcu_organize_nocb_kthreads();
+}
+
void __init rcu_init_nohz(void)
{
int cpu;
@@ -1302,15 +1318,8 @@ void __init rcu_init_nohz(void)
cpumask = cpu_possible_mask;
if (cpumask) {
- if (!cpumask_available(rcu_nocb_mask)) {
- if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
- pr_info("rcu_nocb_mask allocation failed,
callback offloading disabled.\n");
- return;
- }
- }
-
+ rcu_init_nocb_dynamic();
cpumask_or(rcu_nocb_mask, rcu_nocb_mask, cpumask);
- rcu_state.nocb_is_setup = true;
}
if (!rcu_state.nocb_is_setup)
@@ -1442,7 +1451,7 @@ module_param(rcu_nocb_gp_stride, int, 0444);
/*
* Initialize GP-CB relationships for all no-CBs CPU.
*/
-static void __init rcu_organize_nocb_kthreads(void)
+static void rcu_organize_nocb_kthreads(void)
{
int cpu;
bool firsttime = true;
--
2.43.0