From: Hongzhen Luo <[email protected]> This patch adds page cache sharing functionality for compressed inodes.
Signed-off-by: Hongzhen Luo <[email protected]> Signed-off-by: Hongbo Li <[email protected]> --- fs/erofs/zdata.c | 42 ++++++++++++++++++++++++++++++++---------- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 65da21504632..465918093984 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -493,7 +493,7 @@ enum z_erofs_pclustermode { }; struct z_erofs_frontend { - struct inode *const inode; + struct inode *inode; struct erofs_map_blocks map; struct z_erofs_bvec_iter biter; @@ -1883,10 +1883,18 @@ static void z_erofs_pcluster_readmore(struct z_erofs_frontend *f, static int z_erofs_read_folio(struct file *file, struct folio *folio) { - struct inode *const inode = folio->mapping->host; - Z_EROFS_DEFINE_FRONTEND(f, inode, folio_pos(folio)); + struct inode *const inode = folio->mapping->host, *realinode; + Z_EROFS_DEFINE_FRONTEND(f, NULL, folio_pos(folio)); int err; + if (erofs_is_ishare_inode(inode)) + realinode = erofs_ishare_iget(inode); + else + realinode = inode; + + if (!realinode) + return -EIO; + f.inode = realinode; trace_erofs_read_folio(folio, false); z_erofs_pcluster_readmore(&f, NULL, true); err = z_erofs_scan_folio(&f, folio, false); @@ -1896,23 +1904,34 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio) /* if some pclusters are ready, need submit them anyway */ err = z_erofs_runqueue(&f, 0) ?: err; if (err && err != -EINTR) - erofs_err(inode->i_sb, "read error %d @ %lu of nid %llu", - err, folio->index, EROFS_I(inode)->nid); + erofs_err(realinode->i_sb, "read error %d @ %lu of nid %llu", + err, folio->index, EROFS_I(realinode)->nid); erofs_put_metabuf(&f.map.buf); erofs_release_pages(&f.pagepool); + + if (erofs_is_ishare_inode(inode)) + erofs_ishare_iput(realinode); return err; } static void z_erofs_readahead(struct readahead_control *rac) { - struct inode *const inode = rac->mapping->host; - Z_EROFS_DEFINE_FRONTEND(f, inode, readahead_pos(rac)); + struct inode *const inode = rac->mapping->host, *realinode; + Z_EROFS_DEFINE_FRONTEND(f, NULL, readahead_pos(rac)); unsigned int nrpages = readahead_count(rac); struct folio *head = NULL, *folio; int err; - trace_erofs_readahead(inode, readahead_index(rac), nrpages, false); + if (erofs_is_ishare_inode(inode)) + realinode = erofs_ishare_iget(inode); + else + realinode = inode; + + if (!realinode) + return; + f.inode = realinode; + trace_erofs_readahead(realinode, readahead_index(rac), nrpages, false); z_erofs_pcluster_readmore(&f, rac, true); while ((folio = readahead_folio(rac))) { folio->private = head; @@ -1926,8 +1945,8 @@ static void z_erofs_readahead(struct readahead_control *rac) err = z_erofs_scan_folio(&f, folio, true); if (err && err != -EINTR) - erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu", - folio->index, EROFS_I(inode)->nid); + erofs_err(realinode->i_sb, "readahead error at folio %lu @ nid %llu", + folio->index, EROFS_I(realinode)->nid); } z_erofs_pcluster_readmore(&f, rac, false); z_erofs_pcluster_end(&f); @@ -1935,6 +1954,9 @@ static void z_erofs_readahead(struct readahead_control *rac) (void)z_erofs_runqueue(&f, nrpages); erofs_put_metabuf(&f.map.buf); erofs_release_pages(&f.pagepool); + + if (erofs_is_ishare_inode(inode)) + erofs_ishare_iput(realinode); } const struct address_space_operations z_erofs_aops = { -- 2.22.0
