[f2fs-dev] [PATCH v5 10/23] ext4: Convert mpage_prepare_extent_to_map() to use filemap_get_folios_tag()

2023-01-04 Thread Vishal Moola (Oracle)
Converted the function to use folios throughout. This is in preparation
for the removal of find_get_pages_range_tag(). Now supports large
folios. This change removes 11 calls to compound_head().

Signed-off-by: Vishal Moola (Oracle) 
---
 fs/ext4/inode.c | 65 -
 1 file changed, 32 insertions(+), 33 deletions(-)

diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 9d9f414f99fe..fb6cd994e59a 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2595,8 +2595,8 @@ static bool ext4_page_nomap_can_writeout(struct page 
*page)
 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
 {
struct address_space *mapping = mpd->inode->i_mapping;
-   struct pagevec pvec;
-   unsigned int nr_pages;
+   struct folio_batch fbatch;
+   unsigned int nr_folios;
long left = mpd->wbc->nr_to_write;
pgoff_t index = mpd->first_page;
pgoff_t end = mpd->last_page;
@@ -2610,18 +2610,17 @@ static int mpage_prepare_extent_to_map(struct 
mpage_da_data *mpd)
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
-
-   pagevec_init(&pvec);
+   folio_batch_init(&fbatch);
mpd->map.m_len = 0;
mpd->next_page = index;
while (index <= end) {
-   nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
-   tag);
-   if (nr_pages == 0)
+   nr_folios = filemap_get_folios_tag(mapping, &index, end,
+   tag, &fbatch);
+   if (nr_folios == 0)
break;
 
-   for (i = 0; i < nr_pages; i++) {
-   struct page *page = pvec.pages[i];
+   for (i = 0; i < nr_folios; i++) {
+   struct folio *folio = fbatch.folios[i];
 
/*
 * Accumulated enough dirty pages? This doesn't apply
@@ -2635,10 +2634,10 @@ static int mpage_prepare_extent_to_map(struct 
mpage_da_data *mpd)
goto out;
 
/* If we can't merge this page, we are done. */
-   if (mpd->map.m_len > 0 && mpd->next_page != page->index)
+   if (mpd->map.m_len > 0 && mpd->next_page != 
folio->index)
goto out;
 
-   lock_page(page);
+   folio_lock(folio);
/*
 * If the page is no longer dirty, or its mapping no
 * longer corresponds to inode we are writing (which
@@ -2646,16 +2645,16 @@ static int mpage_prepare_extent_to_map(struct 
mpage_da_data *mpd)
 * page is already under writeback and we are not doing
 * a data integrity writeback, skip the page
 */
-   if (!PageDirty(page) ||
-   (PageWriteback(page) &&
+   if (!folio_test_dirty(folio) ||
+   (folio_test_writeback(folio) &&
 (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
-   unlikely(page->mapping != mapping)) {
-   unlock_page(page);
+   unlikely(folio->mapping != mapping)) {
+   folio_unlock(folio);
continue;
}
 
-   wait_on_page_writeback(page);
-   BUG_ON(PageWriteback(page));
+   folio_wait_writeback(folio);
+   BUG_ON(folio_test_writeback(folio));
 
/*
 * Should never happen but for buggy code in
@@ -2666,49 +2665,49 @@ static int mpage_prepare_extent_to_map(struct 
mpage_da_data *mpd)
 *
 * [1] 
https://lore.kernel.org/linux-mm/20180103100430.ge4...@quack2.suse.cz
 */
-   if (!page_has_buffers(page)) {
-   ext4_warning_inode(mpd->inode, "page %lu does 
not have buffers attached", page->index);
-   ClearPageDirty(page);
-   unlock_page(page);
+   if (!folio_buffers(folio)) {
+   ext4_warning_inode(mpd->inode, "page %lu does 
not have buffers attached", folio->index);
+   folio_clear_dirty(folio);
+   folio_unlock(folio);
continue;
}
 
if (mpd->map.m_len == 0)
-   mpd->first_page = page->index;
-   mpd->next_page = page->index + 1;
+   mpd->first_page = folio->index;
+   mpd->next_page =

Re: [f2fs-dev] [PATCH v5 10/23] ext4: Convert mpage_prepare_extent_to_map() to use filemap_get_folios_tag()

2023-01-12 Thread Vishal Moola
On Wed, Jan 4, 2023 at 1:15 PM Vishal Moola (Oracle)
 wrote:
>
> Converted the function to use folios throughout. This is in preparation
> for the removal of find_get_pages_range_tag(). Now supports large
> folios. This change removes 11 calls to compound_head().
>
> Signed-off-by: Vishal Moola (Oracle) 
> ---
>  fs/ext4/inode.c | 65 -
>  1 file changed, 32 insertions(+), 33 deletions(-)
>
> diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
> index 9d9f414f99fe..fb6cd994e59a 100644
> --- a/fs/ext4/inode.c
> +++ b/fs/ext4/inode.c
> @@ -2595,8 +2595,8 @@ static bool ext4_page_nomap_can_writeout(struct page 
> *page)
>  static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
>  {
> struct address_space *mapping = mpd->inode->i_mapping;
> -   struct pagevec pvec;
> -   unsigned int nr_pages;
> +   struct folio_batch fbatch;
> +   unsigned int nr_folios;
> long left = mpd->wbc->nr_to_write;
> pgoff_t index = mpd->first_page;
> pgoff_t end = mpd->last_page;
> @@ -2610,18 +2610,17 @@ static int mpage_prepare_extent_to_map(struct 
> mpage_da_data *mpd)
> tag = PAGECACHE_TAG_TOWRITE;
> else
> tag = PAGECACHE_TAG_DIRTY;
> -
> -   pagevec_init(&pvec);
> +   folio_batch_init(&fbatch);
> mpd->map.m_len = 0;
> mpd->next_page = index;
> while (index <= end) {
> -   nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, 
> end,
> -   tag);
> -   if (nr_pages == 0)
> +   nr_folios = filemap_get_folios_tag(mapping, &index, end,
> +   tag, &fbatch);
> +   if (nr_folios == 0)
> break;
>
> -   for (i = 0; i < nr_pages; i++) {
> -   struct page *page = pvec.pages[i];
> +   for (i = 0; i < nr_folios; i++) {
> +   struct folio *folio = fbatch.folios[i];
>
> /*
>  * Accumulated enough dirty pages? This doesn't apply
> @@ -2635,10 +2634,10 @@ static int mpage_prepare_extent_to_map(struct 
> mpage_da_data *mpd)
> goto out;
>
> /* If we can't merge this page, we are done. */
> -   if (mpd->map.m_len > 0 && mpd->next_page != 
> page->index)
> +   if (mpd->map.m_len > 0 && mpd->next_page != 
> folio->index)
> goto out;
>
> -   lock_page(page);
> +   folio_lock(folio);
> /*
>  * If the page is no longer dirty, or its mapping no
>  * longer corresponds to inode we are writing (which
> @@ -2646,16 +2645,16 @@ static int mpage_prepare_extent_to_map(struct 
> mpage_da_data *mpd)
>  * page is already under writeback and we are not 
> doing
>  * a data integrity writeback, skip the page
>  */
> -   if (!PageDirty(page) ||
> -   (PageWriteback(page) &&
> +   if (!folio_test_dirty(folio) ||
> +   (folio_test_writeback(folio) &&
>  (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
> -   unlikely(page->mapping != mapping)) {
> -   unlock_page(page);
> +   unlikely(folio->mapping != mapping)) {
> +   folio_unlock(folio);
> continue;
> }
>
> -   wait_on_page_writeback(page);
> -   BUG_ON(PageWriteback(page));
> +   folio_wait_writeback(folio);
> +   BUG_ON(folio_test_writeback(folio));
>
> /*
>  * Should never happen but for buggy code in
> @@ -2666,49 +2665,49 @@ static int mpage_prepare_extent_to_map(struct 
> mpage_da_data *mpd)
>  *
>  * [1] 
> https://lore.kernel.org/linux-mm/20180103100430.ge4...@quack2.suse.cz
>  */
> -   if (!page_has_buffers(page)) {
> -   ext4_warning_inode(mpd->inode, "page %lu does 
> not have buffers attached", page->index);
> -   ClearPageDirty(page);
> -   unlock_page(page);
> +   if (!folio_buffers(folio)) {
> +   ext4_warning_inode(mpd->inode, "page %lu does 
> not have buffers attached", folio->index);
> +   folio_clear_dirty(folio);
> +   folio_unlock(folio);
> continue;
> }
>