All context this function needs is held within struct async_chunk. Currently we not only pass the struct but also every individual member. This is redundant, simplify it by only passing struct async_chunk and leaving it to compress_file_range to extract the values it requires. No functional changes.
Signed-off-by: Nikolay Borisov <nbori...@suse.com> Reviewed-by: Johannes Thumshirn <jthumsh...@suse.de> --- fs/btrfs/inode.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2a9d24bc8b53..df008aa195b4 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -449,14 +449,14 @@ static inline void inode_should_defrag(struct btrfs_inode *inode, * are written in the same order that the flusher thread sent them * down. */ -static noinline void compress_file_range(struct inode *inode, - struct page *locked_page, - u64 start, u64 end, - struct async_chunk *async_chunk, - int *num_added) +static noinline void compress_file_range(struct async_chunk *async_chunk, + int *num_added) { + struct inode *inode = async_chunk->inode; struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); u64 blocksize = fs_info->sectorsize; + u64 start = async_chunk->start; + u64 end = async_chunk->end; u64 actual_end; int ret = 0; struct page **pages = NULL; @@ -675,9 +675,9 @@ static noinline void compress_file_range(struct inode *inode, * to our extent and set things up for the async work queue to run * cow_file_range to do the normal delalloc dance. */ - if (page_offset(locked_page) >= start && - page_offset(locked_page) <= end) - __set_page_dirty_nobuffers(locked_page); + if (page_offset(async_chunk->locked_page) >= start && + page_offset(async_chunk->locked_page) <= end) + __set_page_dirty_nobuffers(async_chunk->locked_page); /* unlocked later on in the async handlers */ if (redirty) @@ -1141,9 +1141,7 @@ static noinline void async_cow_start(struct btrfs_work *work) int num_added = 0; async_chunk = container_of(work, struct async_chunk, work); - compress_file_range(async_chunk->inode, async_chunk->locked_page, - async_chunk->start, async_chunk->end, async_chunk, - &num_added); + compress_file_range(async_chunk, &num_added); if (num_added == 0) { btrfs_add_delayed_iput(async_chunk->inode); async_chunk->inode = NULL; -- 2.17.1