Replace the manual cpufreq_cpu_put() with __free(put_cpufreq_policy) annotation for policy references. This reduces the risk of reference counting mistakes and aligns the code with the latest kernel style.
No functional change intended. Signed-off-by: Zihuan Zhang <zhangzih...@kylinos.cn> Reviewed-by: Jonathan Cameron <jonathan.came...@huawei.com> --- drivers/cpufreq/intel_pstate.c | 59 +++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 26 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index f366d35c5840..0b54e08f9447 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1502,9 +1502,8 @@ static void __intel_pstate_update_max_freq(struct cpufreq_policy *policy, static bool intel_pstate_update_max_freq(struct cpudata *cpudata) { - struct cpufreq_policy *policy __free(put_cpufreq_policy); + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu); - policy = cpufreq_cpu_get(cpudata->cpu); if (!policy) return false; @@ -1695,41 +1694,49 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, return count; } -static void update_qos_request(enum freq_qos_req_type type) +static bool intel_pstate_cpufreq_update_limits(int cpu, enum freq_qos_req_type type) { struct freq_qos_request *req; - struct cpufreq_policy *policy; - int i; + unsigned int freq, perf_pct; + struct cpudata *data = all_cpu_data[cpu]; + struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); - for_each_possible_cpu(i) { - struct cpudata *cpu = all_cpu_data[i]; - unsigned int freq, perf_pct; + if (!policy) + return false; - policy = cpufreq_cpu_get(i); - if (!policy) - continue; + req = policy->driver_data; - req = policy->driver_data; - cpufreq_cpu_put(policy); + if (!req) + return false; - if (!req) - continue; + if (hwp_active) + intel_pstate_get_hwp_cap(data); - if (hwp_active) - intel_pstate_get_hwp_cap(cpu); + if (type == FREQ_QOS_MIN) { + perf_pct = global.min_perf_pct; + } else { + req++; + perf_pct = global.max_perf_pct; + } - if (type == FREQ_QOS_MIN) { - perf_pct = global.min_perf_pct; - } else { - req++; - perf_pct = global.max_perf_pct; - } + freq = DIV_ROUND_UP(data->pstate.turbo_freq * perf_pct, 100); - freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100); + if (freq_qos_update_request(req, freq) < 0) + pr_warn("Failed to update freq constraint: CPU%d\n", cpu); - if (freq_qos_update_request(req, freq) < 0) - pr_warn("Failed to update freq constraint: CPU%d\n", i); + return true; +} + + +static void update_qos_request(enum freq_qos_req_type type) +{ + int i; + + for_each_possible_cpu(i) { + if (!intel_pstate_cpufreq_update_limits(i, type)) + continue; } + } static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, -- 2.25.1