On Mon, 26 Feb 2018, Aaron Lu wrote:

> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 3154859cccd6..35576da0a6c9 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -1116,13 +1116,11 @@ static void free_pcppages_bulk(struct zone *zone, int 
> count,
>       int migratetype = 0;
>       int batch_free = 0;
>       bool isolated_pageblocks;
> +     struct page *page, *tmp;
> +     LIST_HEAD(head);
>  
>       pcp->count -= count;
> -     spin_lock(&zone->lock);
> -     isolated_pageblocks = has_isolate_pageblock(zone);
> -
>       while (count) {
> -             struct page *page;
>               struct list_head *list;
>  
>               /*
> @@ -1144,26 +1142,31 @@ static void free_pcppages_bulk(struct zone *zone, int 
> count,
>                       batch_free = count;
>  
>               do {
> -                     int mt; /* migratetype of the to-be-freed page */
> -
>                       page = list_last_entry(list, struct page, lru);
>                       /* must delete as __free_one_page list manipulates */

Looks good in general, but I'm not sure how I reconcile this comment with 
the new implementation that later links page->lru again.

>                       list_del(&page->lru);
>  
> -                     mt = get_pcppage_migratetype(page);
> -                     /* MIGRATE_ISOLATE page should not go to pcplists */
> -                     VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
> -                     /* Pageblock could have been isolated meanwhile */
> -                     if (unlikely(isolated_pageblocks))
> -                             mt = get_pageblock_migratetype(page);
> -
>                       if (bulkfree_pcp_prepare(page))
>                               continue;
>  
> -                     __free_one_page(page, page_to_pfn(page), zone, 0, mt);
> -                     trace_mm_page_pcpu_drain(page, 0, mt);
> +                     list_add_tail(&page->lru, &head);
>               } while (--count && --batch_free && !list_empty(list));
>       }
> +
> +     spin_lock(&zone->lock);
> +     isolated_pageblocks = has_isolate_pageblock(zone);
> +
> +     list_for_each_entry_safe(page, tmp, &head, lru) {
> +             int mt = get_pcppage_migratetype(page);
> +             /* MIGRATE_ISOLATE page should not go to pcplists */
> +             VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
> +             /* Pageblock could have been isolated meanwhile */
> +             if (unlikely(isolated_pageblocks))
> +                     mt = get_pageblock_migratetype(page);
> +
> +             __free_one_page(page, page_to_pfn(page), zone, 0, mt);
> +             trace_mm_page_pcpu_drain(page, 0, mt);
> +     }
>       spin_unlock(&zone->lock);
>  }
>  

Reply via email to