>  static void *__vmalloc_node(unsigned long size, unsigned long align,
> -                         gfp_t gfp_mask, pgprot_t prot,
> -                         int node, const void *caller);
> +                     gfp_t gfp_mask, pgprot_t prot, unsigned long vm_flags,
> +                     int node, const void *caller);
>  static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
> -                              pgprot_t prot, int node)
> +                              pgprot_t prot, unsigned int page_shift,
> +                              int node)
>  {
>       struct page **pages;
> +     unsigned long addr = (unsigned long)area->addr;
> +     unsigned long size = get_vm_area_size(area);
> +     unsigned int page_order = page_shift - PAGE_SHIFT;
>       unsigned int nr_pages, array_size, i;
>       const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
>       const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
>       const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
> -                                     0 :
> -                                     __GFP_HIGHMEM;
> +                                     0 : __GFP_HIGHMEM;
>  
> -     nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
> +     nr_pages = size >> page_shift;

while try out this patchset, we encountered a BUG_ON in account_kernel_stack()
in kernel/fork.c.

BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);

which obviously should be updated accordingly.

>       array_size = (nr_pages * sizeof(struct page *));
>  
>       /* Please note that the recursion is strictly bounded. */
>       if (array_size > PAGE_SIZE) {
>               pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
> -                             PAGE_KERNEL, node, area->caller);
> +                             PAGE_KERNEL, 0, node, area->caller);
>       } else {
>               pages = kmalloc_node(array_size, nested_gfp, node);
>       }

Reply via email to