On Mon 08-03-21 18:28:03, Muchun Song wrote:
> Because we reuse the first tail vmemmap page frame and remap it
> with read-only, we cannot set the PageHWPosion on some tail pages.
> So we can use the head[4].private (There are at least 128 struct
> page structures associated with the optimized HugeTLB page, so
> using head[4].private is safe) to record the real error page index
> and set the raw error page PageHWPoison later.

Can we have more poisoned tail pages? Also who does consume that index
and set the HWPoison on the proper tail page?
 
> Signed-off-by: Muchun Song <[email protected]>
> Reviewed-by: Oscar Salvador <[email protected]>
> Acked-by: David Rientjes <[email protected]>
> Tested-by: Chen Huang <[email protected]>
> Tested-by: Bodeddula Balasubramaniam <[email protected]>
> ---
>  mm/hugetlb.c | 80 
> ++++++++++++++++++++++++++++++++++++++++++++++++++++++------
>  1 file changed, 72 insertions(+), 8 deletions(-)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 377e0c1b283f..c0c1b7635ca9 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1304,6 +1304,74 @@ static inline void 
> destroy_compound_gigantic_page(struct page *page,
>                                               unsigned int order) { }
>  #endif
>  
> +#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
> +static inline void hwpoison_subpage_deliver(struct hstate *h, struct page 
> *head)
> +{
> +     struct page *page;
> +
> +     if (!PageHWPoison(head) || !free_vmemmap_pages_per_hpage(h))
> +             return;
> +
> +     page = head + page_private(head + 4);
> +
> +     /*
> +      * Move PageHWPoison flag from head page to the raw error page,
> +      * which makes any subpages rather than the error page reusable.
> +      */
> +     if (page != head) {
> +             SetPageHWPoison(page);
> +             ClearPageHWPoison(head);
> +     }
> +}
> +
> +static inline void hwpoison_subpage_set(struct hstate *h, struct page *head,
> +                                     struct page *page)
> +{
> +     if (!PageHWPoison(head))
> +             return;
> +
> +     if (free_vmemmap_pages_per_hpage(h)) {
> +             set_page_private(head + 4, page - head);
> +     } else if (page != head) {
> +             /*
> +              * Move PageHWPoison flag from head page to the raw error page,
> +              * which makes any subpages rather than the error page reusable.
> +              */
> +             SetPageHWPoison(page);
> +             ClearPageHWPoison(head);
> +     }
> +}
> +
> +static inline void hwpoison_subpage_clear(struct hstate *h, struct page 
> *head)
> +{
> +     if (!PageHWPoison(head) || !free_vmemmap_pages_per_hpage(h))
> +             return;
> +
> +     set_page_private(head + 4, 0);
> +}
> +#else
> +static inline void hwpoison_subpage_deliver(struct hstate *h, struct page 
> *head)
> +{
> +}
> +
> +static inline void hwpoison_subpage_set(struct hstate *h, struct page *head,
> +                                     struct page *page)
> +{
> +     if (PageHWPoison(head) && page != head) {
> +             /*
> +              * Move PageHWPoison flag from head page to the raw error page,
> +              * which makes any subpages rather than the error page reusable.
> +              */
> +             SetPageHWPoison(page);
> +             ClearPageHWPoison(head);
> +     }
> +}
> +
> +static inline void hwpoison_subpage_clear(struct hstate *h, struct page 
> *head)
> +{
> +}
> +#endif
> +
>  static int update_and_free_page(struct hstate *h, struct page *page)
>       __releases(&hugetlb_lock) __acquires(&hugetlb_lock)
>  {
> @@ -1357,6 +1425,8 @@ static int update_and_free_page(struct hstate *h, 
> struct page *page)
>               return -ENOMEM;
>       }
>  
> +     hwpoison_subpage_deliver(h, page);
> +
>       for (i = 0; i < pages_per_huge_page(h);
>            i++, subpage = mem_map_next(subpage, page, i)) {
>               subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
> @@ -1801,14 +1871,7 @@ int dissolve_free_huge_page(struct page *page)
>                       goto retry;
>               }
>  
> -             /*
> -              * Move PageHWPoison flag from head page to the raw error page,
> -              * which makes any subpages rather than the error page reusable.
> -              */
> -             if (PageHWPoison(head) && page != head) {
> -                     SetPageHWPoison(page);
> -                     ClearPageHWPoison(head);
> -             }
> +             hwpoison_subpage_set(h, head, page);
>               list_del(&head->lru);
>               h->free_huge_pages--;
>               h->free_huge_pages_node[nid]--;
> @@ -1818,6 +1881,7 @@ int dissolve_free_huge_page(struct page *page)
>                       h->surplus_huge_pages--;
>                       h->surplus_huge_pages_node[nid]--;
>                       h->max_huge_pages++;
> +                     hwpoison_subpage_clear(h, head);
>               }
>       }
>  out:
> -- 
> 2.11.0
> 

-- 
Michal Hocko
SUSE Labs

Reply via email to