On Thu, 2020-10-01 at 09:56 -0500, Gustavo A. R. Silva wrote:
> There is a regular need in the kernel to provide a way to declare having
> a dynamically sized set of trailing elements in a structure. Kernel code
> should always use “flexible array members”[1] for these cases. The older
> style of one-element or zero-length arrays should no longer be used[2].
> 
> struct uv_rtc_timer_head contains a one-element array cpu[1].
> 
> Switch it to a flexible array and use the struct_size() helper to
> calculate the allocation size. Also, save some heap space in the
> process[3].

trivia:

> diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
[]
> @@ -148,9 +148,8 @@ static __init int uv_rtc_allocate_timers(void)
>               struct uv_rtc_timer_head *head = blade_info[bid];
>  
>               if (!head) {
> -                     head = kmalloc_node(sizeof(struct uv_rtc_timer_head) +
> -                             (uv_blade_nr_possible_cpus(bid) *
> -                                     2 * sizeof(u64)),
> +                     head = kmalloc_node(struct_size(head, cpu,
> +                             uv_blade_nr_possible_cpus(bid)),
>                               GFP_KERNEL, nid);
>                       if (!head) {
>                               uv_rtc_deallocate_timers();

Maybe save the value of uv_blade_nr_possible_cpus(bid)
to reduce duplication and make the sizeof_struct more
readable?

                if (!head) {
                        int ncpus = uv_blade_nr_possible_cpus(bid);

                        head = kmalloc_node(struct_size(head, cpu, ncpus),
                                            GFP_KERNEL, nid);
                        if (!head) {
                                uv_rtc_deallocate_timers();
                                return -ENOMEM;
                        }
                        spin_lock_init(&head->lock);
                        head->ncpus = ncpus;
                        head->next_cpu = -1;
                        blade_info[bid] = head;
                }

Reply via email to