Hi Rui, On 6/26/2024 7:48 PM, Zhang, Rui wrote: > On Mon, 2024-06-24 at 05:58 +0000, Dhananjay Ugwekar wrote: >> After commit ("x86/cpu/topology: Add support for the AMD 0x80000026 >> leaf"), >> on AMD processors that support extended CPUID leaf 0x80000026, the >> topology_die_cpumask() and topology_logical_die_id() macros, no >> longer >> return the package cpumask and package id, instead they return the >> CCD >> (Core Complex Die) mask and id respectively. This leads to the >> energy-pkg >> event scope to be modified to CCD instead of package. >> >> Replacing these macros with their package counterparts fixes the >> energy-pkg event for AMD CPUs. >> >> However due to the difference between the scope of energy-pkg event >> for >> Intel and AMD CPUs, we have to replace these macros conditionally >> only for >> AMD CPUs. >> >> On a 12 CCD 1 Package AMD Zen4 Genoa machine: >> >> Before: >> $ cat /sys/devices/power/cpumask >> 0,8,16,24,32,40,48,56,64,72,80,88. >> >> The expected cpumask here is supposed to be just "0", as it is a >> package >> scope event, only one CPU will be collecting the event for all the >> CPUs in >> the package. >> >> After: >> $ cat /sys/devices/power/cpumask >> 0 >> >> Signed-off-by: Dhananjay Ugwekar <dhananjay.ugwe...@amd.com> >> Fixes: 63edbaa48a57 ("x86/cpu/topology: Add support for the AMD >> 0x80000026 leaf") > > As there is no code change compared with V1, I think you missed my > Reviewed-by tag > https://lore.kernel.org/all/e1f70a09f85dbd0ee3f32dffea37993e141269d0.ca...@intel.com/
Yes!, forgot to add your Reviewed-by tag, will add in the next version. Thanks, Dhananjay > > thanks, > rui > >> --- >> arch/x86/events/rapl.c | 30 ++++++++++++++++++++++++++---- >> 1 file changed, 26 insertions(+), 4 deletions(-) >> >> diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c >> index b985ca79cf97..73be25e1f4b4 100644 >> --- a/arch/x86/events/rapl.c >> +++ b/arch/x86/events/rapl.c >> @@ -103,6 +103,10 @@ static struct perf_pmu_events_attr >> event_attr_##v = { \ >> .event_str = >> str, \ >> }; >> >> +#define rapl_pmu_is_pkg_scope() \ >> + (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || \ >> + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) >> + >> struct rapl_pmu { >> raw_spinlock_t lock; >> int n_active; >> @@ -140,9 +144,21 @@ static unsigned int rapl_cntr_mask; >> static u64 rapl_timer_ms; >> static struct perf_msr *rapl_msrs; >> >> +static inline unsigned int get_rapl_pmu_idx(int cpu) >> +{ >> + return rapl_pmu_is_pkg_scope() ? >> topology_logical_package_id(cpu) : >> + >> topology_logical_die_id(cpu); >> +} >> + >> +static inline const struct cpumask *get_rapl_pmu_cpumask(int cpu) >> +{ >> + return rapl_pmu_is_pkg_scope() ? topology_core_cpumask(cpu) : >> + topology_die_cpumask(cpu); >> +} >> + >> static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu) >> { >> - unsigned int rapl_pmu_idx = topology_logical_die_id(cpu); >> + unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu); >> >> /* >> * The unsigned check also catches the '-1' return value for >> non >> @@ -543,6 +559,7 @@ static struct perf_msr amd_rapl_msrs[] = { >> >> static int rapl_cpu_offline(unsigned int cpu) >> { >> + const struct cpumask *rapl_pmu_cpumask = >> get_rapl_pmu_cpumask(cpu); >> struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); >> int target; >> >> @@ -552,7 +569,7 @@ static int rapl_cpu_offline(unsigned int cpu) >> >> pmu->cpu = -1; >> /* Find a new cpu to collect rapl events */ >> - target = cpumask_any_but(topology_die_cpumask(cpu), cpu); >> + target = cpumask_any_but(rapl_pmu_cpumask, cpu); >> >> /* Migrate rapl events to the new target */ >> if (target < nr_cpu_ids) { >> @@ -565,6 +582,8 @@ static int rapl_cpu_offline(unsigned int cpu) >> >> static int rapl_cpu_online(unsigned int cpu) >> { >> + unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu); >> + const struct cpumask *rapl_pmu_cpumask = >> get_rapl_pmu_cpumask(cpu); >> struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); >> int target; >> >> @@ -579,14 +598,14 @@ static int rapl_cpu_online(unsigned int cpu) >> pmu->timer_interval = ms_to_ktime(rapl_timer_ms); >> rapl_hrtimer_init(pmu); >> >> - rapl_pmus->pmus[topology_logical_die_id(cpu)] = pmu; >> + rapl_pmus->pmus[rapl_pmu_idx] = pmu; >> } >> >> /* >> * Check if there is an online cpu in the package which >> collects rapl >> * events already. >> */ >> - target = cpumask_any_and(&rapl_cpu_mask, >> topology_die_cpumask(cpu)); >> + target = cpumask_any_and(&rapl_cpu_mask, rapl_pmu_cpumask); >> if (target < nr_cpu_ids) >> return 0; >> >> @@ -677,6 +696,9 @@ static int __init init_rapl_pmus(void) >> { >> int nr_rapl_pmu = topology_max_packages() * >> topology_max_dies_per_package(); >> >> + if (rapl_pmu_is_pkg_scope()) >> + nr_rapl_pmu = topology_max_packages(); >> + >> rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus, >> nr_rapl_pmu), GFP_KERNEL); >> if (!rapl_pmus) >> return -ENOMEM; >