> -----Original Message-----
> From: Jason Wang [mailto:[email protected]]
> Sent: Tuesday, June 18, 2013 11:28 PM
> To: KY Srinivasan; Haiyang Zhang; [email protected]; linux-
> [email protected]; [email protected]
> Cc: Jason Wang
> Subject: [PATCH] drivers: hv: allocate synic structures before hv_synic_init()
> 
> We currently allocate synic structures in hv_sync_init(), but there's no way 
> for
> the driver to know about the allocation failure and it may continue to use the
> uninitialized pointers. Solve this by introducing helpers for allocating and
> freeing and doing the allocation before the on_each_cpu() call in
> vmbus_bus_init().
> 
> Cc: K. Y. Srinivasan <[email protected]>
> Cc: Haiyang Zhang <[email protected]>
> Signed-off-by: Jason Wang <[email protected]>
Signed-off-by: K. Y. Srinivasan <[email protected]>

> ---
>  drivers/hv/hv.c           |   85 
> ++++++++++++++++++++++++++++-----------------
>  drivers/hv/hyperv_vmbus.h |    4 ++
>  drivers/hv/vmbus_drv.c    |    8 +++-
>  3 files changed, 63 insertions(+), 34 deletions(-)
> 
> diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
> index 3f88681..0039373 100644
> --- a/drivers/hv/hv.c
> +++ b/drivers/hv/hv.c
> @@ -265,6 +265,59 @@ u16 hv_signal_event(void *con_id)
>       return status;
>  }
> 
> +
> +int hv_synic_alloc(void)
> +{
> +     size_t size = sizeof(struct tasklet_struct);
> +     int cpu;
> +
> +     for_each_online_cpu(cpu) {
> +             hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
> +             if (hv_context.event_dpc[cpu] == NULL) {
> +                     pr_err("Unable to allocate event dpc\n");
> +                     goto err;
> +             }
> +             tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
> +
> +             hv_context.synic_message_page[cpu] =
> +                     (void *)get_zeroed_page(GFP_ATOMIC);
> +
> +             if (hv_context.synic_message_page[cpu] == NULL) {
> +                     pr_err("Unable to allocate SYNIC message page\n");
> +                     goto err;
> +             }
> +
> +             hv_context.synic_event_page[cpu] =
> +                     (void *)get_zeroed_page(GFP_ATOMIC);
> +
> +             if (hv_context.synic_event_page[cpu] == NULL) {
> +                     pr_err("Unable to allocate SYNIC event page\n");
> +                     goto err;
> +             }
> +     }
> +
> +     return 0;
> +err:
> +     return -ENOMEM;
> +}
> +
> +void hv_synic_free_cpu(int cpu)
> +{
> +     kfree(hv_context.event_dpc[cpu]);
> +     if (hv_context.synic_message_page[cpu])
> +             free_page((unsigned long)hv_context.synic_event_page[cpu]);
> +     if (hv_context.synic_message_page[cpu])
> +             free_page((unsigned
> long)hv_context.synic_message_page[cpu]);
> +}
> +
> +void hv_synic_free(void)
> +{
> +     int cpu;
> +
> +     for_each_online_cpu(cpu)
> +             hv_synic_free_cpu(cpu);
> +}
> +
>  /*
>   * hv_synic_init - Initialize the Synthethic Interrupt Controller.
>   *
> @@ -289,30 +342,6 @@ void hv_synic_init(void *arg)
>       /* Check the version */
>       rdmsrl(HV_X64_MSR_SVERSION, version);
> 
> -     hv_context.event_dpc[cpu] = kmalloc(sizeof(struct tasklet_struct),
> -                                         GFP_ATOMIC);
> -     if (hv_context.event_dpc[cpu] == NULL) {
> -             pr_err("Unable to allocate event dpc\n");
> -             goto cleanup;
> -     }
> -     tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
> -
> -     hv_context.synic_message_page[cpu] =
> -             (void *)get_zeroed_page(GFP_ATOMIC);
> -
> -     if (hv_context.synic_message_page[cpu] == NULL) {
> -             pr_err("Unable to allocate SYNIC message page\n");
> -             goto cleanup;
> -     }
> -
> -     hv_context.synic_event_page[cpu] =
> -             (void *)get_zeroed_page(GFP_ATOMIC);
> -
> -     if (hv_context.synic_event_page[cpu] == NULL) {
> -             pr_err("Unable to allocate SYNIC event page\n");
> -             goto cleanup;
> -     }
> -
>       /* Setup the Synic's message page */
>       rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
>       simp.simp_enabled = 1;
> @@ -355,14 +384,6 @@ void hv_synic_init(void *arg)
>       rdmsrl(HV_X64_MSR_VP_INDEX, vp_index);
>       hv_context.vp_index[cpu] = (u32)vp_index;
>       return;
> -
> -cleanup:
> -     if (hv_context.synic_event_page[cpu])
> -             free_page((unsigned long)hv_context.synic_event_page[cpu]);
> -
> -     if (hv_context.synic_message_page[cpu])
> -             free_page((unsigned
> long)hv_context.synic_message_page[cpu]);
> -     return;
>  }
> 
>  /*
> diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
> index 12f2f9e..d84918f 100644
> --- a/drivers/hv/hyperv_vmbus.h
> +++ b/drivers/hv/hyperv_vmbus.h
> @@ -527,6 +527,10 @@ extern int hv_post_message(union hv_connection_id
> connection_id,
> 
>  extern u16 hv_signal_event(void *con_id);
> 
> +extern int hv_synic_alloc(void);
> +
> +extern void hv_synic_free(void);
> +
>  extern void hv_synic_init(void *irqarg);
> 
>  extern void hv_synic_cleanup(void *arg);
> diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
> index 4004e54..a2464bf 100644
> --- a/drivers/hv/vmbus_drv.c
> +++ b/drivers/hv/vmbus_drv.c
> @@ -563,6 +563,9 @@ static int vmbus_bus_init(int irq)
>        */
>       hv_register_vmbus_handler(irq, vmbus_isr);
> 
> +     ret = hv_synic_alloc();
> +     if (ret)
> +             goto err_alloc;
>       /*
>        * Initialize the per-cpu interrupt state and
>        * connect to the host.
> @@ -570,13 +573,14 @@ static int vmbus_bus_init(int irq)
>       on_each_cpu(hv_synic_init, NULL, 1);
>       ret = vmbus_connect();
>       if (ret)
> -             goto err_irq;
> +             goto err_alloc;
> 
>       vmbus_request_offers();
> 
>       return 0;
> 
> -err_irq:
> +err_alloc:
> +     hv_synic_free();
>       free_irq(irq, hv_acpi_dev);
> 
>  err_unregister:
> --
> 1.7.1
> 
> 


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to