We can make use of the rcu_nocb_cpu_offload()/rcu_nocb_cpu_deoffload() APIs to enable RCU NO-CB CPU offloading of newly isolated CPUs and deoffloading of de-isolated CPUs.
Add a new rcu_nocb_update_cpus() helper to do that and call it directly from housekeeping_update() when the HK_TYPE_KERNEL_NOISE cpumask is being changed. This dynamic RCU NO-CB CPU offloading feature can only used if either "rcs_nocbs" or the "nohz_full" boot command parameters are used with or without parameter so that the proper RCU NO-CB resources are properly initialized at boot time. Signed-off-by: Waiman Long <[email protected]> --- include/linux/rcupdate.h | 2 ++ kernel/rcu/tree_nocb.h | 22 ++++++++++++++++++++++ kernel/sched/isolation.c | 4 +++- 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 04f3f86a4145..987e3d1d413e 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -150,6 +150,7 @@ void rcu_init_nohz(void); int rcu_nocb_cpu_offload(int cpu); int rcu_nocb_cpu_deoffload(int cpu); void rcu_nocb_flush_deferred_wakeup(void); +void rcu_nocb_update_cpus(struct cpumask *cpumask); #define RCU_NOCB_LOCKDEP_WARN(c, s) RCU_LOCKDEP_WARN(c, s) @@ -159,6 +160,7 @@ static inline void rcu_init_nohz(void) { } static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; } static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; } static inline void rcu_nocb_flush_deferred_wakeup(void) { } +static inline void rcu_nocb_update_cpus(struct cpumask *cpumask) { } #define RCU_NOCB_LOCKDEP_WARN(c, s) diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 2d06dcb61f37..b2daba1e5cb9 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -1173,6 +1173,28 @@ int rcu_nocb_cpu_offload(int cpu) } EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload); +void rcu_nocb_update_cpus(struct cpumask *cpumask) +{ + int cpu, ret; + + if (!rcu_state.nocb_is_setup) { + pr_warn_once("Dynamic RCU NOCB cannot be enabled without nohz_full/rcu_nocbs kernel boot parameter!\n"); + return; + } + + for_each_cpu_andnot(cpu, cpumask, rcu_nocb_mask) { + ret = rcu_nocb_cpu_offload(cpu); + if (WARN_ON_ONCE(ret)) + return; + } + + for_each_cpu_andnot(cpu, rcu_nocb_mask, cpumask) { + ret = rcu_nocb_cpu_deoffload(cpu); + if (WARN_ON_ONCE(ret)) + return; + } +} + #ifdef CONFIG_RCU_LAZY static unsigned long lazy_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index 48b155e0b290..b5635484ec69 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -181,8 +181,10 @@ int housekeeping_update(struct cpumask *isol_mask, unsigned long flags) if ((housekeeping.flags & flags) != flags) WRITE_ONCE(housekeeping.flags, housekeeping.flags | flags); - if (flags & HK_FLAG_KERNEL_NOISE) + if (flags & HK_FLAG_KERNEL_NOISE) { tick_nohz_full_update_cpus(isol_mask); + rcu_nocb_update_cpus(isol_mask); + } synchronize_rcu(); -- 2.53.0

