On Mon, Feb 17, 2020 at 10:46:00AM -0800, Matthew Wilcox wrote:
> From: "Matthew Wilcox (Oracle)" <wi...@infradead.org>
> 
> Use the new readahead operation in erofs.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <wi...@infradead.org>

It looks good to me, although some further optimization exists
but we could make a straight-forward transform first, and
I haven't tested the whole series for now...
Will test it later.

Acked-by: Gao Xiang <gaoxian...@huawei.com>

Thanks,
Gao Xiang

> ---
>  fs/erofs/zdata.c | 29 +++++++++--------------------
>  1 file changed, 9 insertions(+), 20 deletions(-)
> 
> diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
> index 17f45fcb8c5c..7c02015d501d 100644
> --- a/fs/erofs/zdata.c
> +++ b/fs/erofs/zdata.c
> @@ -1303,28 +1303,23 @@ static bool should_decompress_synchronously(struct 
> erofs_sb_info *sbi,
>       return nr <= sbi->max_sync_decompress_pages;
>  }
>  
> -static int z_erofs_readpages(struct file *filp, struct address_space 
> *mapping,
> -                          struct list_head *pages, unsigned int nr_pages)
> +static void z_erofs_readahead(struct readahead_control *rac)
>  {
> -     struct inode *const inode = mapping->host;
> +     struct inode *const inode = rac->mapping->host;
>       struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
>  
> -     bool sync = should_decompress_synchronously(sbi, nr_pages);
> +     bool sync = should_decompress_synchronously(sbi, readahead_count(rac));
>       struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
> -     gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
> -     struct page *head = NULL;
> +     struct page *page, *head = NULL;
>       LIST_HEAD(pagepool);
>  
> -     trace_erofs_readpages(mapping->host, lru_to_page(pages)->index,
> -                           nr_pages, false);
> +     trace_erofs_readpages(inode, readahead_index(rac),
> +                     readahead_count(rac), false);
>  
> -     f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT;
> -
> -     for (; nr_pages; --nr_pages) {
> -             struct page *page = lru_to_page(pages);
> +     f.headoffset = readahead_offset(rac);
>  
> +     readahead_for_each(rac, page) {
>               prefetchw(&page->flags);
> -             list_del(&page->lru);
>  
>               /*
>                * A pure asynchronous readahead is indicated if
> @@ -1333,11 +1328,6 @@ static int z_erofs_readpages(struct file *filp, struct 
> address_space *mapping,
>                */
>               sync &= !(PageReadahead(page) && !head);
>  
> -             if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
> -                     list_add(&page->lru, &pagepool);
> -                     continue;
> -             }
> -
>               set_page_private(page, (unsigned long)head);
>               head = page;
>       }
> @@ -1366,11 +1356,10 @@ static int z_erofs_readpages(struct file *filp, 
> struct address_space *mapping,
>  
>       /* clean up the remaining free pages */
>       put_pages_list(&pagepool);
> -     return 0;
>  }
>  
>  const struct address_space_operations z_erofs_aops = {
>       .readpage = z_erofs_readpage,
> -     .readpages = z_erofs_readpages,
> +     .readahead = z_erofs_readahead,
>  };
>  
> -- 
> 2.25.0
> 
> 

Reply via email to