On Tue, Jun 10, 2014 at 05:37:09PM +0800, Zhouyi Zhou wrote:
> This is version 2.0 of "[PATCH 1/1] perf/amd: NULL return of kzalloc_node 
> should be handled"
> (http://www.spinics.net/lists/kernel/msg1760689.html),
> 
> Try to correctly handle mem allocation failure in perf_event_amd_uncore.c
> 
> Signed-off-by: Zhouyi Zhou <yizhouz...@ict.ac.cn>
> ---
>  arch/x86/kernel/cpu/perf_event_amd_uncore.c |   36 
> ++++++++++++++++++++++++---
>  1 file changed, 33 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c 
> b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
> index 3bbdf4c..bdc8e49 100644
> --- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
> +++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
> @@ -294,12 +294,14 @@ static struct amd_uncore *amd_uncore_alloc(unsigned int 
> cpu)
>                       cpu_to_node(cpu));
>  }
>  
> -static void amd_uncore_cpu_up_prepare(unsigned int cpu)
> +static int amd_uncore_cpu_up_prepare(unsigned int cpu)
>  {
>       struct amd_uncore *uncore;
>  
>       if (amd_uncore_nb) {
>               uncore = amd_uncore_alloc(cpu);
> +             if (!uncore)
> +                     return 1;
>               uncore->cpu = cpu;
>               uncore->num_counters = NUM_COUNTERS_NB;
>               uncore->rdpmc_base = RDPMC_BASE_NB;
> @@ -311,6 +313,11 @@ static void amd_uncore_cpu_up_prepare(unsigned int cpu)
>  
>       if (amd_uncore_l2) {
>               uncore = amd_uncore_alloc(cpu);
> +             if (!uncore) {
> +                     if (amd_uncore_nb)
> +                             kfree(*per_cpu_ptr(amd_uncore_nb, cpu));
> +                     return 1;
> +             }
>               uncore->cpu = cpu;
>               uncore->num_counters = NUM_COUNTERS_L2;
>               uncore->rdpmc_base = RDPMC_BASE_L2;
> @@ -319,6 +326,8 @@ static void amd_uncore_cpu_up_prepare(unsigned int cpu)
>               uncore->pmu = &amd_l2_pmu;
>               *per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
>       }
> +
> +     return 0;
>  }

Maybe return -ENOMEM here already.

>  static struct amd_uncore *
> @@ -461,7 +470,8 @@ amd_uncore_cpu_notifier(struct notifier_block *self, 
> unsigned long action,
>  
>       switch (action & ~CPU_TASKS_FROZEN) {
>       case CPU_UP_PREPARE:
> -             amd_uncore_cpu_up_prepare(cpu);
> +             if (amd_uncore_cpu_up_prepare(cpu))
> +                     return notifier_from_errno(-ENOMEM),
>               break;
>  
>       case CPU_STARTING:
> @@ -514,6 +524,8 @@ static int __init amd_uncore_init(void)
>  
>       if (cpu_has_perfctr_nb) {
>               amd_uncore_nb = alloc_percpu(struct amd_uncore *);
> +             if (!amd_uncore_nb)
> +                     return -ENOMEM;
>               perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
>  
>               printk(KERN_INFO "perf: AMD NB counters detected\n");
> @@ -522,6 +534,13 @@ static int __init amd_uncore_init(void)
>  
>       if (cpu_has_perfctr_l2) {
>               amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
> +             if (!amd_uncore_l2) {
> +                     if (cpu_has_perfctr_nb) {
> +                             perf_pmu_unregister(&amd_nb_pmu);

Combined with the below (extra for_each_cpu loop) you might want to
think of the 'normal' error handling goto chain.

err_online:
        for_each_online_cpu(cpu2) {
                if (cpu2 == cpu)
                        break;

                /* cleanup cpu2 */
        }
err_l2:
        perf_pmu_unregister(nb);
        free_percpu(nb);
err_nb:
        return ret;
        

> +                             free_percpu(amd_uncore_nb);
> +                     }
> +                     return -ENOMEM;
> +             }
>               perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1);

Then you can propagate the error here, instead of assuming -ENOMEM.

>               printk(KERN_INFO "perf: AMD L2I counters detected\n");
> @@ -535,7 +554,18 @@ static int __init amd_uncore_init(void)
>  
>       /* init cpus already online before registering for hotplug notifier */
>       for_each_online_cpu(cpu) {
> -             amd_uncore_cpu_up_prepare(cpu);
> +             if (amd_uncore_cpu_up_prepare(cpu)) {
> +                     if (cpu_has_perfctr_nb) {
> +                             perf_pmu_unregister(&amd_nb_pmu);
> +                             free_percpu(amd_uncore_nb);
> +                     }
> +                     if (cpu_has_perfctr_l2) {
> +                             perf_pmu_unregister(&amd_l2_pmu);
> +                             free_percpu(amd_uncore_l2);
> +                     }
> +                     cpu_notifier_register_done();
> +                     return -ENOMEM;
> +             }
>               smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
>       }

I think you want a second for_each_online_cpu() loop and undo the work
for the other CPUs before calling unregister etc..


Attachment: pgp9W1DLXyALG.pgp
Description: PGP signature

Reply via email to