On Wed, Nov 08, 2017 at 11:47:20AM -0800, Dave Hansen wrote:
> +static
> +DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(struct debug_store, 
> cpu_debug_store);
> +
>  /* The size of a BTS record in bytes: */
>  #define BTS_RECORD_SIZE              24
>  
> @@ -278,6 +282,39 @@ void fini_debug_store_on_cpu(int cpu)
>  
>  static DEFINE_PER_CPU(void *, insn_buffer);
>  
> +static void *dsalloc(size_t size, gfp_t flags, int node)
> +{
> +#ifdef CONFIG_KAISER
> +     unsigned int order = get_order(size);
> +     struct page *page;
> +     unsigned long addr;
> +
> +     page = __alloc_pages_node(node, flags | __GFP_ZERO, order);
> +     if (!page)
> +             return NULL;
> +     addr = (unsigned long)page_address(page);
> +     if (kaiser_add_mapping(addr, size, __PAGE_KERNEL | _PAGE_GLOBAL) < 0) {
> +             __free_pages(page, order);
> +             addr = 0;
> +     }
> +     return (void *)addr;
> +#else
> +     return kmalloc_node(size, flags | __GFP_ZERO, node);
> +#endif
> +}
> +
> +static void dsfree(const void *buffer, size_t size)
> +{
> +#ifdef CONFIG_KAISER
> +     if (!buffer)
> +             return;
> +     kaiser_remove_mapping((unsigned long)buffer, size);
> +     free_pages((unsigned long)buffer, get_order(size));
> +#else
> +     kfree(buffer);
> +#endif
> +}

You might as well use __alloc_pages_node() / free_pages()
unconditionally. Those buffers are at least one page in size.

That should also get rid of the #ifdef muck.

>  static int alloc_ds_buffer(int cpu)
>  {
> -     int node = cpu_to_node(cpu);
> -     struct debug_store *ds;
> -
> -     ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node);
> -     if (unlikely(!ds))
> -             return -ENOMEM;
> +     struct debug_store *ds = per_cpu_ptr(&cpu_debug_store, cpu);
>  
> +     memset(ds, 0, sizeof(*ds));

Why the memset() ? isn't static per-cpu memory 0 initialized

Reply via email to