Add support for large folios and remove some accesses to page->mapping
and page->index.

Signed-off-by: Matthew Wilcox (Oracle) <wi...@infradead.org>
Tested-by: Bob Peterson <rpete...@redhat.com>
Reviewed-by: Bob Peterson <rpete...@redhat.com>
---
 fs/gfs2/aops.c | 26 +++++++++++++-------------
 1 file changed, 13 insertions(+), 13 deletions(-)

diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 749135252d52..ec5b5c1ea634 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -82,33 +82,33 @@ static int gfs2_get_block_noalloc(struct inode *inode, 
sector_t lblock,
 }
 
 /**
- * gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
- * @page: The page to write
+ * gfs2_write_jdata_folio - gfs2 jdata-specific version of 
block_write_full_page
+ * @folio: The folio to write
  * @wbc: The writeback control
  *
  * This is the same as calling block_write_full_page, but it also
  * writes pages outside of i_size
  */
-static int gfs2_write_jdata_page(struct page *page,
+static int gfs2_write_jdata_folio(struct folio *folio,
                                 struct writeback_control *wbc)
 {
-       struct inode * const inode = page->mapping->host;
+       struct inode * const inode = folio->mapping->host;
        loff_t i_size = i_size_read(inode);
-       const pgoff_t end_index = i_size >> PAGE_SHIFT;
-       unsigned offset;
 
        /*
-        * The page straddles i_size.  It must be zeroed out on each and every
+        * The folio straddles i_size.  It must be zeroed out on each and every
         * writepage invocation because it may be mmapped.  "A file is mapped
         * in multiples of the page size.  For a file that is not a multiple of
-        * the  page size, the remaining memory is zeroed when mapped, and
+        * the page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       offset = i_size & (PAGE_SIZE - 1);
-       if (page->index == end_index && offset)
-               zero_user_segment(page, offset, PAGE_SIZE);
+       if (folio_pos(folio) < i_size &&
+           i_size < folio_pos(folio) + folio_size(folio))
+               folio_zero_segment(folio, offset_in_folio(folio, i_size),
+                               folio_size(folio));
 
-       return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
+       return __block_write_full_page(inode, &folio->page,
+                                      gfs2_get_block_noalloc, wbc,
                                       end_buffer_async_write);
 }
 
@@ -137,7 +137,7 @@ static int __gfs2_jdata_write_folio(struct folio *folio,
                }
                gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio));
        }
-       return gfs2_write_jdata_page(&folio->page, wbc);
+       return gfs2_write_jdata_folio(folio, wbc);
 }
 
 /**
-- 
2.39.2

Reply via email to