As the HK_TYPE_TICK cpumask is going to be changeable at run time, we need to use RCU to protect access to the cpumask to prevent it from going away in the middle of the operation.
Signed-off-by: Waiman Long <[email protected]> --- arch/arm64/kernel/topology.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index b32f13358fbb..48f150801689 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -173,6 +173,7 @@ void arch_cpu_idle_enter(void) if (!amu_fie_cpu_supported(cpu)) return; + guard(rcu)(); /* Kick in AMU update but only if one has not happened already */ if (housekeeping_cpu(cpu, HK_TYPE_TICK) && time_is_before_jiffies(per_cpu(cpu_amu_samples.last_scale_update, cpu))) @@ -187,11 +188,16 @@ int arch_freq_get_on_cpu(int cpu) unsigned int start_cpu = cpu; unsigned long last_update; unsigned int freq = 0; + bool hk_cpu; u64 scale; if (!amu_fie_cpu_supported(cpu) || !arch_scale_freq_ref(cpu)) return -EOPNOTSUPP; + scoped_guard(rcu) { + hk_cpu = housekeeping_cpu(cpu, HK_TYPE_TICK); + } + while (1) { amu_sample = per_cpu_ptr(&cpu_amu_samples, cpu); @@ -204,16 +210,21 @@ int arch_freq_get_on_cpu(int cpu) * (and thus freq scale), if available, for given policy: this boils * down to identifying an active cpu within the same freq domain, if any. */ - if (!housekeeping_cpu(cpu, HK_TYPE_TICK) || + if (!hk_cpu || time_is_before_jiffies(last_update + msecs_to_jiffies(AMU_SAMPLE_EXP_MS))) { struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + bool hk_intersects; int ref_cpu; if (!policy) return -EINVAL; - if (!cpumask_intersects(policy->related_cpus, - housekeeping_cpumask(HK_TYPE_TICK))) { + scoped_guard(rcu) { + hk_intersects = cpumask_intersects(policy->related_cpus, + housekeeping_cpumask(HK_TYPE_TICK)); + } + + if (!hk_intersects) { cpufreq_cpu_put(policy); return -EOPNOTSUPP; } -- 2.53.0

