Let's add REQ_RAHEAD flag so it'd be easier to identify
readahead I/O requests in blktrace.

Signed-off-by: Gao Xiang <hsiang...@redhat.com>
---
 fs/erofs/data.c  | 2 +-
 fs/erofs/zdata.c | 4 ++++
 2 files changed, 5 insertions(+), 1 deletion(-)

diff --git a/fs/erofs/data.c b/fs/erofs/data.c
index 459ecb42cbd3..347be146884c 100644
--- a/fs/erofs/data.c
+++ b/fs/erofs/data.c
@@ -224,7 +224,7 @@ static inline struct bio *erofs_read_raw_page(struct bio 
*bio,
                bio_set_dev(bio, sb->s_bdev);
                bio->bi_iter.bi_sector = (sector_t)blknr <<
                        LOG_SECTORS_PER_BLOCK;
-               bio->bi_opf = REQ_OP_READ;
+               bio->bi_opf = REQ_OP_READ | (ra ? REQ_RAHEAD : 0);
        }
 
        err = bio_add_page(bio, page, PAGE_SIZE, 0);
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index bb20f73f10e0..23940edf16ce 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -136,6 +136,7 @@ struct z_erofs_decompress_frontend {
        struct erofs_map_blocks map;
 
        unsigned int compressedblock_total;
+       bool readahead;
 
        /* used for applying cache strategy on the fly */
        bool backmost;
@@ -1220,6 +1221,8 @@ static void z_erofs_submit_queue(struct super_block *sb,
                                        LOG_SECTORS_PER_BLOCK;
                                bio->bi_private = bi_private;
                                bio->bi_opf = REQ_OP_READ;
+                               if (f->readahead)
+                                       bio->bi_opf |= REQ_RAHEAD;
                                ++nr_bios;
                        }
 
@@ -1318,6 +1321,7 @@ static void z_erofs_readahead(struct readahead_control 
*rac)
 
        trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false);
 
+       f.readahead = true;
        f.headoffset = readahead_pos(rac);
 
        while ((page = readahead_page(rac))) {
-- 
2.18.1

Reply via email to