On Fri 19-03-21 15:42:05, Mike Kravetz wrote:
> With the introduction of remove_hugetlb_page(), there is no need for
> update_and_free_page to hold the hugetlb lock.  Change all callers to
> drop the lock before calling.
> 
> With additional code modifications, this will allow loops which decrease
> the huge page pool to drop the hugetlb_lock with each page to reduce
> long hold times.
> 
> The ugly unlock/lock cycle in free_pool_huge_page will be removed in
> a subsequent patch which restructures free_pool_huge_page.
> 
> Signed-off-by: Mike Kravetz <mike.krav...@oracle.com>

Looks good to me. I will not ack it right now though. I am still
crawling through the series and want to get a full picture. So far it
looks promising ;).

> ---
>  mm/hugetlb.c | 21 +++++++++++++--------
>  1 file changed, 13 insertions(+), 8 deletions(-)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index ae185d3315e0..3028cf10d504 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1362,14 +1362,8 @@ static void update_and_free_page(struct hstate *h, 
> struct page *page)
>                               1 << PG_writeback);
>       }
>       if (hstate_is_gigantic(h)) {
> -             /*
> -              * Temporarily drop the hugetlb_lock, because
> -              * we might block in free_gigantic_page().
> -              */
> -             spin_unlock(&hugetlb_lock);
>               destroy_compound_gigantic_page(page, huge_page_order(h));
>               free_gigantic_page(page, huge_page_order(h));
> -             spin_lock(&hugetlb_lock);
>       } else {
>               __free_pages(page, huge_page_order(h));
>       }
> @@ -1435,16 +1429,18 @@ static void __free_huge_page(struct page *page)
>  
>       if (HPageTemporary(page)) {
>               remove_hugetlb_page(h, page, false);
> +             spin_unlock(&hugetlb_lock);
>               update_and_free_page(h, page);
>       } else if (h->surplus_huge_pages_node[nid]) {
>               /* remove the page from active list */
>               remove_hugetlb_page(h, page, true);
> +             spin_unlock(&hugetlb_lock);
>               update_and_free_page(h, page);
>       } else {
>               arch_clear_hugepage_flags(page);
>               enqueue_huge_page(h, page);
> +             spin_unlock(&hugetlb_lock);
>       }
> -     spin_unlock(&hugetlb_lock);
>  }
>  
>  /*
> @@ -1725,7 +1721,13 @@ static int free_pool_huge_page(struct hstate *h, 
> nodemask_t *nodes_allowed,
>                               list_entry(h->hugepage_freelists[node].next,
>                                         struct page, lru);
>                       remove_hugetlb_page(h, page, acct_surplus);
> +                     /*
> +                      * unlock/lock around update_and_free_page is temporary
> +                      * and will be removed with subsequent patch.
> +                      */
> +                     spin_unlock(&hugetlb_lock);
>                       update_and_free_page(h, page);
> +                     spin_lock(&hugetlb_lock);
>                       ret = 1;
>                       break;
>               }
> @@ -1794,8 +1796,9 @@ int dissolve_free_huge_page(struct page *page)
>               }
>               remove_hugetlb_page(h, page, false);
>               h->max_huge_pages--;
> +             spin_unlock(&hugetlb_lock);
>               update_and_free_page(h, head);
> -             rc = 0;
> +             return 0;
>       }
>  out:
>       spin_unlock(&hugetlb_lock);
> @@ -2572,7 +2575,9 @@ static void try_to_free_low(struct hstate *h, unsigned 
> long count,
>                       remove_hugetlb_page(h, page, false);
>                       h->free_huge_pages--;
>                       h->free_huge_pages_node[page_to_nid(page)]--;
> +                     spin_unlock(&hugetlb_lock);
>                       update_and_free_page(h, page);
> +                     spin_lock(&hugetlb_lock);
>  
>                       /*
>                        * update_and_free_page could have dropped lock so
> -- 
> 2.30.2
> 

-- 
Michal Hocko
SUSE Labs

Reply via email to