On 08/07/20 at 05:12pm, Wei Yang wrote:
> Function dequeue_huge_page_node_exact() iterates the free list and
> return the first non-isolated one.
> 
> Instead of break and check the loop variant, we could return in the loop
> directly. This could reduce some redundant check.
> 
> Signed-off-by: Wei Yang <richard.weiy...@linux.alibaba.com>
> ---
>  mm/hugetlb.c | 26 ++++++++++++--------------
>  1 file changed, 12 insertions(+), 14 deletions(-)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index b8e844911b5a..9473eb6800e9 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1035,20 +1035,18 @@ static struct page 
> *dequeue_huge_page_node_exact(struct hstate *h, int nid)
>  {
>       struct page *page;
>  
> -     list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
> -             if (!PageHWPoison(page))
> -                     break;

I don't see how it can reduce redundant check, just two different
styles.

> -     /*
> -      * if 'non-isolated free hugepage' not found on the list,
> -      * the allocation fails.

But the above code comment seems stale, it checks HWPoision page
directly, but not the old isolated page checking.

> -      */
> -     if (&h->hugepage_freelists[nid] == &page->lru)
> -             return NULL;
> -     list_move(&page->lru, &h->hugepage_activelist);
> -     set_page_refcounted(page);
> -     h->free_huge_pages--;
> -     h->free_huge_pages_node[nid]--;
> -     return page;
> +     list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
> +             if (PageHWPoison(page))
> +                     continue;
> +
> +             list_move(&page->lru, &h->hugepage_activelist);
> +             set_page_refcounted(page);
> +             h->free_huge_pages--;
> +             h->free_huge_pages_node[nid]--;
> +             return page;
> +     }
> +
> +     return NULL;
>  }
>  
>  static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t 
> gfp_mask, int nid,
> -- 
> 2.20.1 (Apple Git-117)
> 
> 

Reply via email to