On Mon 12-06-17 11:06:56, Michal Hocko wrote:
[...]
> @@ -1723,29 +1729,26 @@ struct page *alloc_huge_page_node(struct hstate *h, 
> int nid)
>       return page;
>  }
>  
> -struct page *alloc_huge_page_nodemask(struct hstate *h, const nodemask_t 
> *nmask)
> +struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
> +             nodemask_t *nmask)
>  {
>       struct page *page = NULL;
> -     int node;
>  
>       spin_lock(&hugetlb_lock);
>       if (h->free_huge_pages - h->resv_huge_pages > 0) {
> -             for_each_node_mask(node, *nmask) {
> -                     page = dequeue_huge_page_node_exact(h, node);
> -                     if (page)
> -                             break;
> -             }
> +             page = dequeue_huge_page_nodemask(h, preferred_nid, nmask);
> +             if (page)
> +                     goto unlock;
>       }
> +unlock:
>       spin_unlock(&hugetlb_lock);
>       if (page)
>               return page;
>  
>       /* No reservations, try to overcommit */
> -     for_each_node_mask(node, *nmask) {
> -             page = __alloc_buddy_huge_page_no_mpol(h, node);
> -             if (page)
> -                     return page;
> -     }
> +     page = __alloc_buddy_huge_page_no_mpol(h, preferred_nid);
> +     if (page)
> +             return page;

I was too quick. The fallback allocation needs some more love. I am
working on this but it quickly gets quite hairy so let's see whether
this still can converge to something reasonable.
-- 
Michal Hocko
SUSE Labs

Reply via email to