On Fri, Apr 15, 2022 at 08:36:13PM +0800, Jeffle Xu wrote:
> Implement fscache-based data readahead. Also registers an individual
> bdi for each erofs instance to enable readahead.
> 
> Signed-off-by: Jeffle Xu <jeffl...@linux.alibaba.com>
> ---
>  fs/erofs/fscache.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++
>  fs/erofs/super.c   |  4 +++
>  2 files changed, 90 insertions(+)
> 
> diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
> index 08849c15500f..eaa50692ddba 100644
> --- a/fs/erofs/fscache.c
> +++ b/fs/erofs/fscache.c
> @@ -163,12 +163,98 @@ static int erofs_fscache_readpage(struct file *file, 
> struct page *page)
>       return ret;
>  }
>  
> +static void erofs_fscache_unlock_folios(struct readahead_control *rac,
> +                                     size_t len)
> +{
> +     while (len) {
> +             struct folio *folio = readahead_folio(rac);
> +
> +             len -= folio_size(folio);
> +             folio_mark_uptodate(folio);
> +             folio_unlock(folio);
> +     }
> +}
> +
> +static void erofs_fscache_readahead(struct readahead_control *rac)
> +{
> +     struct inode *inode = rac->mapping->host;
> +     struct super_block *sb = inode->i_sb;
> +     size_t len, count, done = 0;
> +     erofs_off_t pos;
> +     loff_t start, offset;
> +     int ret;
> +
> +     if (!readahead_count(rac))
> +             return;
> +
> +     start = readahead_pos(rac);
> +     len = readahead_length(rac);
> +
> +     do {
> +             struct erofs_map_blocks map;
> +             struct erofs_map_dev mdev;
> +
> +             pos = start + done;
> +             map.m_la = pos;
> +
> +             ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
> +             if (ret)
> +                     return;
> +
> +             offset = start + done;
> +             count = min_t(size_t, map.m_llen - (pos - map.m_la),
> +                           len - done);
> +
> +             if (!(map.m_flags & EROFS_MAP_MAPPED)) {
> +                     struct iov_iter iter;
> +
> +                     iov_iter_xarray(&iter, READ, &rac->mapping->i_pages,
> +                                     offset, count);
> +                     iov_iter_zero(count, &iter);
> +
> +                     erofs_fscache_unlock_folios(rac, count);
> +                     ret = count;
> +                     continue;
> +             }
> +
> +             if (map.m_flags & EROFS_MAP_META) {
> +                     struct folio *folio = readahead_folio(rac);
> +
> +                     ret = erofs_fscache_readpage_inline(folio, &map);
> +                     if (!ret) {
> +                             folio_mark_uptodate(folio);
> +                             ret = folio_size(folio);
> +                     }
> +
> +                     folio_unlock(folio);
> +                     continue;
> +             }
> +
> +             mdev = (struct erofs_map_dev) {
> +                     .m_deviceid = map.m_deviceid,
> +                     .m_pa = map.m_pa,
> +             };
> +             ret = erofs_map_dev(sb, &mdev);
> +             if (ret)
> +                     return;
> +
> +             ret = erofs_fscache_read_folios(mdev.m_fscache->cookie,
> +                             rac->mapping, offset, count,
> +                             mdev.m_pa + (pos - map.m_la));
> +             if (!ret) {
> +                     erofs_fscache_unlock_folios(rac, count);
> +                     ret = count;
> +             }

I think this really needs a comment why we don't need to unlock folios
for the error cases.

Thanks,
Gao Xiang

> +     } while (ret > 0 && ((done += ret) < len));
> +}
> +

--
Linux-cachefs mailing list
Linux-cachefs@redhat.com
https://listman.redhat.com/mailman/listinfo/linux-cachefs

Reply via email to