EROFS_MAP_FULL_MAPPED is more accurate to decide if caching the last
incomplete pcluster for later read or not.

Signed-off-by: Gao Xiang <hsiang...@linux.alibaba.com>
---
 fs/erofs/zdata.c | 7 ++-----
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 4009283944ca..c28945532a02 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -528,8 +528,6 @@ struct z_erofs_decompress_frontend {
        z_erofs_next_pcluster_t owned_head;
        enum z_erofs_pclustermode mode;
 
-       /* used for applying cache strategy on the fly */
-       bool backmost;
        erofs_off_t headoffset;
 
        /* a pointer used to pick up inplace I/O pages */
@@ -538,7 +536,7 @@ struct z_erofs_decompress_frontend {
 
 #define DECOMPRESS_FRONTEND_INIT(__i) { \
        .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
-       .mode = Z_EROFS_PCLUSTER_FOLLOWED, .backmost = true }
+       .mode = Z_EROFS_PCLUSTER_FOLLOWED }
 
 static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
 {
@@ -547,7 +545,7 @@ static bool z_erofs_should_alloc_cache(struct 
z_erofs_decompress_frontend *fe)
        if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
                return false;
 
-       if (fe->backmost)
+       if (!(fe->map.m_flags & EROFS_MAP_FULL_MAPPED))
                return true;
 
        if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
@@ -939,7 +937,6 @@ static void z_erofs_pcluster_end(struct 
z_erofs_decompress_frontend *fe)
                erofs_workgroup_put(&pcl->obj);
 
        fe->pcl = NULL;
-       fe->backmost = false;
 }
 
 static int z_erofs_read_fragment(struct super_block *sb, struct page *page,
-- 
2.24.4

Reply via email to