On Sat 31-08-24 08:28:23, Barry Song wrote:
> From: Barry Song <v-songbao...@oppo.com>
> 
> Three points for this change:
> 
> 1. We should consolidate all warnings in one place. Currently, the
>    order > 1 warning is in the hotpath, while others are in less
>    likely scenarios. Moving all warnings to the slowpath will reduce
>    the overhead for order > 1 and increase the visibility of other
>    warnings.
> 
> 2. We currently have two warnings for order: one for order > 1 in
>    the hotpath and another for order > costly_order in the laziest
>    path. I suggest standardizing on order > 1 since it’s been in
>    use for a long time.
> 
> 3. We don't need to check for __GFP_NOWARN in this case. __GFP_NOWARN
>    is meant to suppress allocation failure reports, but here we're
>    dealing with bug detection, not allocation failures. So replace
>    WARN_ON_ONCE_GFP by WARN_ON_ONCE.
> 
> Suggested-by: Vlastimil Babka <vba...@suse.cz>
> Signed-off-by: Barry Song <v-songbao...@oppo.com>

Acked-by: Michal Hocko <mho...@suse.com>

Updating the doc about order > 1 sounds like it would still fall into
the scope of this patch. I don not think we absolutely have to document
each unsupported gfp flags combination for GFP_NOFAIL but the order is a
good addition with a note that kvmalloc should be used instead in such a
case.

> ---
>  mm/page_alloc.c | 50 ++++++++++++++++++++++++-------------------------
>  1 file changed, 25 insertions(+), 25 deletions(-)
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index c81ee5662cc7..e790b4227322 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -3033,12 +3033,6 @@ struct page *rmqueue(struct zone *preferred_zone,
>  {
>       struct page *page;
>  
> -     /*
> -      * We most definitely don't want callers attempting to
> -      * allocate greater than order-1 page units with __GFP_NOFAIL.
> -      */
> -     WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
> -
>       if (likely(pcp_allowed_order(order))) {
>               page = rmqueue_pcplist(preferred_zone, zone, order,
>                                      migratetype, alloc_flags);
> @@ -4175,6 +4169,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int 
> order,
>  {
>       bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
>       bool can_compact = gfp_compaction_allowed(gfp_mask);
> +     bool nofail = gfp_mask & __GFP_NOFAIL;
>       const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
>       struct page *page = NULL;
>       unsigned int alloc_flags;
> @@ -4187,6 +4182,25 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int 
> order,
>       unsigned int zonelist_iter_cookie;
>       int reserve_flags;
>  
> +     if (unlikely(nofail)) {
> +             /*
> +              * We most definitely don't want callers attempting to
> +              * allocate greater than order-1 page units with __GFP_NOFAIL.
> +              */
> +             WARN_ON_ONCE(order > 1);
> +             /*
> +              * Also we don't support __GFP_NOFAIL without 
> __GFP_DIRECT_RECLAIM,
> +              * otherwise, we may result in lockup.
> +              */
> +             WARN_ON_ONCE(!can_direct_reclaim);
> +             /*
> +              * PF_MEMALLOC request from this context is rather bizarre
> +              * because we cannot reclaim anything and only can loop waiting
> +              * for somebody to do a work for us.
> +              */
> +             WARN_ON_ONCE(current->flags & PF_MEMALLOC);
> +     }
> +
>  restart:
>       compaction_retries = 0;
>       no_progress_loops = 0;
> @@ -4404,29 +4418,15 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int 
> order,
>        * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
>        * we always retry
>        */
> -     if (gfp_mask & __GFP_NOFAIL) {
> +     if (unlikely(nofail)) {
>               /*
> -              * All existing users of the __GFP_NOFAIL are blockable, so warn
> -              * of any new users that actually require GFP_NOWAIT
> +              * Lacking direct_reclaim we can't do anything to reclaim 
> memory,
> +              * we disregard these unreasonable nofail requests and still
> +              * return NULL
>                */
> -             if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
> +             if (!can_direct_reclaim)
>                       goto fail;
>  
> -             /*
> -              * PF_MEMALLOC request from this context is rather bizarre
> -              * because we cannot reclaim anything and only can loop waiting
> -              * for somebody to do a work for us
> -              */
> -             WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
> -
> -             /*
> -              * non failing costly orders are a hard requirement which we
> -              * are not prepared for much so let's warn about these users
> -              * so that we can identify them and convert them to something
> -              * else.
> -              */
> -             WARN_ON_ONCE_GFP(costly_order, gfp_mask);
> -
>               /*
>                * Help non-failing allocations by giving some access to memory
>                * reserves normally used for high priority non-blocking
> -- 
> 2.34.1

-- 
Michal Hocko
SUSE Labs

Reply via email to