Use more accurate compressed page count instead of BIO_MAX_PAGES unconditionally.
Signed-off-by: Gao Xiang <hsiang...@redhat.com> --- fs/erofs/zdata.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index d483e9fee41c..bb20f73f10e0 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -135,6 +135,8 @@ struct z_erofs_decompress_frontend { struct z_erofs_collector clt; struct erofs_map_blocks map; + unsigned int compressedblock_total; + /* used for applying cache strategy on the fly */ bool backmost; erofs_off_t headoffset; @@ -622,6 +624,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, preload_compressed_pages(clt, MNGD_MAPPING(sbi), cache_strategy, pagepool); + fe->compressedblock_total += BIT(clt->pcl->clusterbits); hitted: /* @@ -1151,7 +1154,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, } static void z_erofs_submit_queue(struct super_block *sb, - z_erofs_next_pcluster_t owned_head, + struct z_erofs_decompress_frontend *f, struct list_head *pagepool, struct z_erofs_decompressqueue *fgq, bool *force_fg) @@ -1160,10 +1163,12 @@ static void z_erofs_submit_queue(struct super_block *sb, z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; void *bi_private; + z_erofs_next_pcluster_t owned_head = f->clt.owned_head; /* since bio will be NULL, no need to initialize last_index */ pgoff_t last_index; unsigned int nr_bios = 0; struct bio *bio = NULL; + int cblks = f->compressedblock_total; bi_private = jobqueueset_init(sb, q, fgq, force_fg); qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; @@ -1207,8 +1212,8 @@ static void z_erofs_submit_queue(struct super_block *sb, } if (!bio) { - bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); - + bio = bio_alloc(GFP_NOIO, + min(cblks, BIO_MAX_PAGES)); bio->bi_end_io = z_erofs_decompressqueue_endio; bio_set_dev(bio, sb->s_bdev); bio->bi_iter.bi_sector = (sector_t)cur << @@ -1221,6 +1226,7 @@ static void z_erofs_submit_queue(struct super_block *sb, if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) goto submit_bio_retry; + --cblks; last_index = cur; bypass = false; } while (++cur < end); @@ -1234,6 +1240,8 @@ static void z_erofs_submit_queue(struct super_block *sb, if (bio) submit_bio(bio); + DBG_BUGON(cblks); + /* * although background is preferred, no one is pending for submission. * don't issue workqueue for decompression but drop it directly instead. @@ -1246,14 +1254,14 @@ static void z_erofs_submit_queue(struct super_block *sb, } static void z_erofs_runqueue(struct super_block *sb, - struct z_erofs_collector *clt, + struct z_erofs_decompress_frontend *f, struct list_head *pagepool, bool force_fg) { struct z_erofs_decompressqueue io[NR_JOBQUEUES]; - if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) + if (f->clt.owned_head == Z_EROFS_PCLUSTER_TAIL) return; - z_erofs_submit_queue(sb, clt->owned_head, pagepool, io, &force_fg); + z_erofs_submit_queue(sb, f, pagepool, io, &force_fg); /* handle bypass queue (no i/o pclusters) immediately */ z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool); @@ -1284,7 +1292,7 @@ static int z_erofs_readpage(struct file *file, struct page *page) (void)z_erofs_collector_end(&f.clt); /* if some compressed cluster ready, need submit them anyway */ - z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, true); + z_erofs_runqueue(inode->i_sb, &f, &pagepool, true); if (err) erofs_err(inode->i_sb, "failed to read, err [%d]", err); @@ -1343,7 +1351,7 @@ static void z_erofs_readahead(struct readahead_control *rac) (void)z_erofs_collector_end(&f.clt); - z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, sync); + z_erofs_runqueue(inode->i_sb, &f, &pagepool, sync); if (f.map.mpage) put_page(f.map.mpage); -- 2.18.1