In a couple of places it's required to calculate the number of pages
given a start and end offsets. Currently this is opencoded, unify the
code base by replacing all such sites with the DIV_ROUND_UP macro. Also,
current open-coded sites were buggy in that they were adding
'PAGE_SIZE', rather than 'PAGE_SIZE - 1'.

Signed-off-by: Nikolay Borisov <[email protected]>
---
 fs/btrfs/extent_io.c | 7 +++----
 fs/btrfs/inode.c     | 8 +++-----
 2 files changed, 6 insertions(+), 9 deletions(-)

diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index b9b30e618378..0cd41ec8b698 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3189,8 +3189,8 @@ static noinline_for_stack int writepage_delalloc(struct 
inode *inode,
                 * delalloc_end is already one less than the total length, so
                 * we don't subtract one from PAGE_SIZE
                 */
-               delalloc_to_write += (delalloc_end - delalloc_start +
-                                     PAGE_SIZE) >> PAGE_SHIFT;
+               delalloc_to_write += DIV_ROUND_UP(delalloc_end - delalloc_start,
+                                                 PAGE_SIZE);
                delalloc_start = delalloc_end + 1;
        }
        if (wbc->nr_to_write < delalloc_to_write) {
@@ -4001,8 +4001,7 @@ int extent_write_locked_range(struct inode *inode, u64 
start, u64 end,
        struct address_space *mapping = inode->i_mapping;
        struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
        struct page *page;
-       unsigned long nr_pages = (end - start + PAGE_SIZE) >>
-               PAGE_SHIFT;
+       unsigned long nr_pages = DIV_ROUND_UP(end - start, PAGE_SIZE);
 
        struct extent_page_data epd = {
                .bio = NULL,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 41ad0d06b3d4..32977174826b 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -469,7 +469,7 @@ static noinline void compress_file_range(struct inode 
*inode,
        actual_end = min_t(u64, i_size_read(inode), end + 1);
 again:
        will_compress = 0;
-       nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
+       nr_pages = DIV_ROUND_UP(end - start, PAGE_SIZE);
        BUILD_BUG_ON((BTRFS_MAX_COMPRESSED % PAGE_SIZE) != 0);
        nr_pages = min_t(unsigned long, nr_pages,
                        BTRFS_MAX_COMPRESSED / PAGE_SIZE);
@@ -1157,8 +1157,7 @@ static noinline void async_cow_submit(struct btrfs_work 
*work)
        async_cow = container_of(work, struct async_cow, work);
 
        fs_info = async_cow->fs_info;
-       nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
-               PAGE_SHIFT;
+       nr_pages = DIV_ROUND_UP(async_cow->end - async_cow->start, PAGE_SIZE);
 
        /* atomic_sub_return implies a barrier */
        if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
@@ -1221,8 +1220,7 @@ static int cow_file_range_async(struct inode *inode, 
struct page *locked_page,
                                async_cow_start, async_cow_submit,
                                async_cow_free);
 
-               nr_pages = (cur_end - start + PAGE_SIZE) >>
-                       PAGE_SHIFT;
+               nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
                atomic_add(nr_pages, &fs_info->async_delalloc_pages);
 
                btrfs_queue_work(fs_info->delalloc_workers, &async_cow->work);
-- 
2.17.1

Reply via email to