On 8.12.20 г. 21:46 ч., Matthew Wilcox (Oracle) wrote:
> Pages being added to the page cache should already be folios, so
> turn add_to_page_cache_lru() into a wrapper.  Saves hundreds of
> bytes of text.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <wi...@infradead.org>
> ---
>  include/linux/pagemap.h | 13 +++++++--
>  mm/filemap.c            | 62 ++++++++++++++++++++---------------------
>  2 files changed, 41 insertions(+), 34 deletions(-)
> 
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index 060faeb8d701..3bc56b3aa384 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -778,9 +778,9 @@ static inline int fault_in_pages_readable(const char 
> __user *uaddr, int size)
>  }
>  
>  int add_to_page_cache_locked(struct page *page, struct address_space 
> *mapping,
> -                             pgoff_t index, gfp_t gfp_mask);
> -int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
> -                             pgoff_t index, gfp_t gfp_mask);
> +                             pgoff_t index, gfp_t gfp);
> +int folio_add_to_page_cache(struct folio *folio, struct address_space 
> *mapping,
> +                             pgoff_t index, gfp_t gfp);
>  extern void delete_from_page_cache(struct page *page);
>  extern void __delete_from_page_cache(struct page *page, void *shadow);
>  int replace_page_cache_page(struct page *old, struct page *new, gfp_t 
> gfp_mask);
> @@ -805,6 +805,13 @@ static inline int add_to_page_cache(struct page *page,
>       return error;
>  }
>  
> +static inline int add_to_page_cache_lru(struct page *page,
> +             struct address_space *mapping, pgoff_t index, gfp_t gfp)
> +{
> +     return folio_add_to_page_cache((struct folio *)page, mapping,
> +                     index, gfp);
> +}
> +
>  /**
>   * struct readahead_control - Describes a readahead request.
>   *
> diff --git a/mm/filemap.c b/mm/filemap.c
> index 56ff6aa24265..297144524f58 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -828,25 +828,25 @@ int replace_page_cache_page(struct page *old, struct 
> page *new, gfp_t gfp_mask)
>  }
>  EXPORT_SYMBOL_GPL(replace_page_cache_page);
>  
> -static noinline int __add_to_page_cache_locked(struct page *page,
> +static noinline int __add_to_page_cache_locked(struct folio *folio,
>                                       struct address_space *mapping,
> -                                     pgoff_t offset, gfp_t gfp,
> +                                     pgoff_t index, gfp_t gfp,
>                                       void **shadowp)
>  {
> -     XA_STATE(xas, &mapping->i_pages, offset);
> -     int huge = PageHuge(page);
> +     XA_STATE(xas, &mapping->i_pages, index);
> +     int huge = PageHuge(&folio->page);

PageHuge also does page_compound, since you know this is either the head
page or not you could use PageHeadHuge which simply checks if it's a
head page and then goes directly to perform the hugepage check via the
dtor.

<snip>

Reply via email to