Convert function to use folios throughout. This is in preparation for
the removal of find_get_pages_range_tag().

Signed-off-by: Vishal Moola (Oracle) <vishal.mo...@gmail.com>
Acked-by: Ryusuke Konishi <konishi.ryus...@gmail.com>
---
 fs/nilfs2/segment.c | 29 ++++++++++++++++-------------
 1 file changed, 16 insertions(+), 13 deletions(-)

diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index b4cebad21b48..2183e1698f8e 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -680,7 +680,7 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode 
*inode,
                                              loff_t start, loff_t end)
 {
        struct address_space *mapping = inode->i_mapping;
-       struct pagevec pvec;
+       struct folio_batch fbatch;
        pgoff_t index = 0, last = ULONG_MAX;
        size_t ndirties = 0;
        int i;
@@ -694,23 +694,26 @@ static size_t nilfs_lookup_dirty_data_buffers(struct 
inode *inode,
                index = start >> PAGE_SHIFT;
                last = end >> PAGE_SHIFT;
        }
-       pagevec_init(&pvec);
+       folio_batch_init(&fbatch);
  repeat:
        if (unlikely(index > last) ||
-           !pagevec_lookup_range_tag(&pvec, mapping, &index, last,
-                               PAGECACHE_TAG_DIRTY))
+             !filemap_get_folios_tag(mapping, &index, last,
+                     PAGECACHE_TAG_DIRTY, &fbatch))
                return ndirties;
 
-       for (i = 0; i < pagevec_count(&pvec); i++) {
+       for (i = 0; i < folio_batch_count(&fbatch); i++) {
                struct buffer_head *bh, *head;
-               struct page *page = pvec.pages[i];
+               struct folio *folio = fbatch.folios[i];
 
-               lock_page(page);
-               if (!page_has_buffers(page))
-                       create_empty_buffers(page, i_blocksize(inode), 0);
-               unlock_page(page);
+               folio_lock(folio);
+               head = folio_buffers(folio);
+               if (!head) {
+                       create_empty_buffers(&folio->page, i_blocksize(inode), 
0);
+                       head = folio_buffers(folio);
+               }
+               folio_unlock(folio);
 
-               bh = head = page_buffers(page);
+               bh = head;
                do {
                        if (!buffer_dirty(bh) || buffer_async_write(bh))
                                continue;
@@ -718,13 +721,13 @@ static size_t nilfs_lookup_dirty_data_buffers(struct 
inode *inode,
                        list_add_tail(&bh->b_assoc_buffers, listp);
                        ndirties++;
                        if (unlikely(ndirties >= nlimit)) {
-                               pagevec_release(&pvec);
+                               folio_batch_release(&fbatch);
                                cond_resched();
                                return ndirties;
                        }
                } while (bh = bh->b_this_page, bh != head);
        }
-       pagevec_release(&pvec);
+       folio_batch_release(&fbatch);
        cond_resched();
        goto repeat;
 }
-- 
2.36.1

Reply via email to