On Fri, 26 Feb 2016, Huang Rui wrote:
> +/* Event code: LSB 8 bits, passed in attr->config any other bit is reserved. 
> */
> +#define AMD_POWER_EVENT_MASK 0xFFULL
> +
> +#define MAX_CUS      8

What's that define for? Max compute units? So is that stuff eternaly limited
to 8?

> +struct power_pmu_masks {
> +     /*
> +      * These two cpumasks are used for avoiding the allocations on the
> +      * CPU_STARTING phase because power_cpu_prepare() will be called with
> +      * IRQs disabled.
> +      */
> +     cpumask_var_t           mask;
> +     cpumask_var_t           tmp_mask;
> +};
> +
> +static struct pmu pmu_class;
> +
> +/*
> + * Accumulated power represents the sum of each compute unit's (CU) power
> + * consumption. On any core of each CU we read the total accumulated power 
> from
> + * MSR_F15H_CU_PWR_ACCUMULATOR. cpu_mask represents CPU bit map of all cores
> + * which are picked to measure the power for the CUs they belong to.
> + */
> +static cpumask_t cpu_mask;
> +
> +static DEFINE_PER_CPU(struct power_pmu_masks *, amd_power_pmu);

This is complete overkill, really.

> +static int power_cpu_exit(int cpu)
> +{
> +     struct power_pmu_masks *pmu = per_cpu(amd_power_pmu, cpu);
> +     int target = nr_cpumask_bits;
> +     int ret = 0;
> +
> +     cpumask_copy(pmu->mask, topology_sibling_cpumask(cpu));
> +
> +     cpumask_clear_cpu(cpu, &cpu_mask);
> +     cpumask_clear_cpu(cpu, pmu->mask);
> +
> +     if (!cpumask_and(pmu->tmp_mask, pmu->mask, cpu_online_mask))
> +             goto out;
> +     /*
> +      * Find a new CPU on the same compute unit, if was set in cpumask
> +      * and still some CPUs on compute unit. Then move on to the new CPU.
> +      */
> +     target = cpumask_any(pmu->tmp_mask);
> +     if (target < nr_cpumask_bits && target != cpu)
> +             cpumask_set_cpu(target, &cpu_mask);
> +
> +     WARN_ON(cpumask_empty(&cpu_mask));
> +
> +out:
> +     /*
> +      * Migrate event and context to new CPU.
> +      */
> +     if (target < nr_cpumask_bits)
> +             perf_pmu_migrate_context(&pmu_class, cpu, target);
> +
> +     return ret;

Sigh. That whole thing can be simply done with:

        if (!cpumask_test_and_clear_cpu(cpu, &cpu_mask))
                return;

        target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
        if (target < nr_cpumask_bits) {
                 cpumask_set_cpu(target, &uncore_cpu_mask);  
                 perf_pmu_migrate_context(&pmu_class, cpu, target);
        }


> +
> +}
> +
> +static int power_cpu_init(int cpu)
> +{
> +     struct power_pmu_masks *pmu = per_cpu(amd_power_pmu, cpu);
> +
> +     if (!pmu)
> +             return 0;
> +
> +     if (!cpumask_and(pmu->mask, topology_sibling_cpumask(cpu), &cpu_mask))
> +             cpumask_set_cpu(cpu, &cpu_mask);

And that's equally convoluted and can be done with:

        target = cpumask_any_and(&cpu_mask, topology_sibling_cpumask(cpu));
        if (target < nr_cpu_ids)
                cpumask_set_cpu(cpu, &cpu_mask);

> +static int power_cpu_prepare(int cpu)
> +{
> +     struct power_pmu_masks *pmu = per_cpu(amd_power_pmu, cpu);
> +     int phys_id = topology_physical_package_id(cpu);
> +     int ret = 0;
> +
> +     if (pmu)
> +             return 0;
> +
> +     if (phys_id < 0)
> +             return -EINVAL;
> +
> +     pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
> +     if (!pmu)
> +             return -ENOMEM;
> +
> +     if (!zalloc_cpumask_var(&pmu->mask, GFP_KERNEL)) {
> +             ret = -ENOMEM;
> +             goto out;
> +     }
> +
> +     if (!zalloc_cpumask_var(&pmu->tmp_mask, GFP_KERNEL)) {
> +             ret = -ENOMEM;
> +             goto out1;
> +     }
> +
> +     per_cpu(amd_power_pmu, cpu) = pmu;
> +     return 0;
> +
> +out1:
> +     free_cpumask_var(pmu->mask);
> +out:
> +     kfree(pmu);
> +
> +     return ret;
> +}
> +
> +static void power_cpu_kfree(int cpu)
> +{
> +     struct power_pmu_masks *pmu = per_cpu(amd_power_pmu, cpu);
> +
> +     if (!pmu)
> +             return;
> +
> +     free_cpumask_var(pmu->mask);
> +     free_cpumask_var(pmu->tmp_mask);
> +     kfree(pmu);
> +
> +     per_cpu(amd_power_pmu, cpu) = NULL;
> +}

Both prepare and free can go away.

> +static int __init amd_power_pmu_init(void)
> +{
> +     int i, ret;
> +     u64 tmp;
> +
> +     if (!x86_match_cpu(cpu_match))
> +             return 0;
> +
> +     if (!boot_cpu_has(X86_FEATURE_ACC_POWER))
> +             return -ENODEV;
> +
> +     cores_per_cu = amd_get_cores_per_cu();
> +     cu_num = boot_cpu_data.x86_max_cores / cores_per_cu;

Please use the new package management functions which are on the way to tip.

> +
> +     if (WARN_ON_ONCE(cu_num > MAX_CUS))
> +             return -EINVAL;
> +
> +     cpu_pwr_sample_ratio = cpuid_ecx(0x80000007);
> +
> +     if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &tmp)) {
> +             pr_err("Failed to read max compute unit power accumulator 
> MSR\n");
> +             return -ENODEV;
> +     }
> +     max_cu_acc_power = tmp;
> +
> +     cpu_notifier_register_begin();
> +
> +     /* Choose one online core of each compute unit.  */
> +     for (i = 0; i < boot_cpu_data.x86_max_cores; i += cores_per_cu) {
> +             WARN_ON(cpumask_empty(topology_sibling_cpumask(i)));
> +             cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(i)), 
> &cpu_mask);
> +     }



> +
> +     for_each_present_cpu(i) {
> +             ret = power_cpu_prepare(i);
> +             if (ret) {
> +                     /* Unwind on [0 ... i-1] CPUs. */
> +                     while (i--)
> +                             power_cpu_kfree(i);
> +                     goto out;
> +             }

                Not needed.

> +             ret = power_cpu_init(i);
> +             if (ret) {
> +                     /* Unwind on [0 ... i] CPUs. */
> +                     while (i >= 0)
> +                             power_cpu_kfree(i--);
> +                     goto out;
> +             }

You cannot issue the CPU STARTING callback on present, but not online cpus.

> +     }
> +
> +     __perf_cpu_notifier(power_cpu_notifier);
> +
> +     ret = perf_pmu_register(&pmu_class, "power", -1);
> +     if (WARN_ON(ret)) {
> +             pr_warn("AMD Power PMU registration failed\n");


So that leaves the notifier installed and leaks all the allocated memory.

> +             goto out;
> +     }
> +
> +     pr_info("AMD Power PMU detected, %d compute units\n", cu_num);
> +
> +out:
> +     cpu_notifier_register_done();
> +
> +     return ret;
> +}
> +device_initcall(amd_power_pmu_init);

Please make this as a module right away. There is no reason to compile all
this in.

Thanks,

        tglx


Reply via email to