When fewer pages are read, nr_pages may be smaller than nr_cpages. Due
to the nr_vecs limit, the compressed pages will be split into multiple
bios and then merged at the block level. In this case, nr_cpages should
be used to pre-allocate bvecs.

Signed-off-by: Jianan Huang <huangjia...@xiaomi.com>
Signed-off-by: Sheng Yong <shengyo...@xiaomi.com>
---
 fs/f2fs/data.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 31e892842625..c7773b09d83f 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2303,7 +2303,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct 
bio **bio_ret,
                }
 
                if (!bio) {
-                       bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
+                       bio = f2fs_grab_read_bio(inode, blkaddr,
+                                       max(nr_pages, cc->nr_cpages) - i,
                                        f2fs_ra_op_flags(rac),
                                        folio->index, for_write);
                        if (IS_ERR(bio)) {
@@ -2373,7 +2374,6 @@ static int f2fs_mpage_readpages(struct inode *inode,
        pgoff_t index;
 #endif
        unsigned nr_pages = rac ? readahead_count(rac) : 1;
-       unsigned max_nr_pages = nr_pages;
        int ret = 0;
 
        map.m_pblk = 0;
@@ -2400,7 +2400,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
                /* there are remained compressed pages, submit them */
                if (!f2fs_cluster_can_merge_page(&cc, index)) {
                        ret = f2fs_read_multi_pages(&cc, &bio,
-                                               max_nr_pages,
+                                               nr_pages,
                                                &last_block_in_bio,
                                                rac, false);
                        f2fs_destroy_compress_ctx(&cc, false);
@@ -2432,7 +2432,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
 read_single_page:
 #endif
 
-               ret = f2fs_read_single_page(inode, folio, max_nr_pages, &map,
+               ret = f2fs_read_single_page(inode, folio, nr_pages, &map,
                                        &bio, &last_block_in_bio, rac);
                if (ret) {
 #ifdef CONFIG_F2FS_FS_COMPRESSION
@@ -2450,7 +2450,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
                        /* last page */
                        if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
                                ret = f2fs_read_multi_pages(&cc, &bio,
-                                                       max_nr_pages,
+                                                       nr_pages,
                                                        &last_block_in_bio,
                                                        rac, false);
                                f2fs_destroy_compress_ctx(&cc, false);
-- 
2.43.0



_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

Reply via email to