This removes a hidden call to compound_head() inside
__block_commit_write() and moves it to those callers which are still
page based.  Also make block_write_end() safe for large folios.

Signed-off-by: Matthew Wilcox (Oracle) <wi...@infradead.org>
---
 fs/buffer.c | 38 +++++++++++++++++++-------------------
 1 file changed, 19 insertions(+), 19 deletions(-)

diff --git a/fs/buffer.c b/fs/buffer.c
index f34ed29b1085..8ea9edd86519 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2116,15 +2116,15 @@ int __block_write_begin(struct page *page, loff_t pos, 
unsigned len,
 }
 EXPORT_SYMBOL(__block_write_begin);
 
-static int __block_commit_write(struct inode *inode, struct page *page,
-               unsigned from, unsigned to)
+static int __block_commit_write(struct inode *inode, struct folio *folio,
+               size_t from, size_t to)
 {
-       unsigned block_start, block_end;
-       int partial = 0;
+       size_t block_start, block_end;
+       bool partial = false;
        unsigned blocksize;
        struct buffer_head *bh, *head;
 
-       bh = head = page_buffers(page);
+       bh = head = folio_buffers(folio);
        blocksize = bh->b_size;
 
        block_start = 0;
@@ -2132,7 +2132,7 @@ static int __block_commit_write(struct inode *inode, 
struct page *page,
                block_end = block_start + blocksize;
                if (block_end <= from || block_start >= to) {
                        if (!buffer_uptodate(bh))
-                               partial = 1;
+                               partial = true;
                } else {
                        set_buffer_uptodate(bh);
                        mark_buffer_dirty(bh);
@@ -2147,11 +2147,11 @@ static int __block_commit_write(struct inode *inode, 
struct page *page,
        /*
         * If this is a partial write which happened to make all buffers
         * uptodate then we can optimize away a bogus read_folio() for
-        * the next read(). Here we 'discover' whether the page went
+        * the next read(). Here we 'discover' whether the folio went
         * uptodate as a result of this (potentially partial) write.
         */
        if (!partial)
-               SetPageUptodate(page);
+               folio_mark_uptodate(folio);
        return 0;
 }
 
@@ -2188,10 +2188,9 @@ int block_write_end(struct file *file, struct 
address_space *mapping,
                        loff_t pos, unsigned len, unsigned copied,
                        struct page *page, void *fsdata)
 {
+       struct folio *folio = page_folio(page);
        struct inode *inode = mapping->host;
-       unsigned start;
-
-       start = pos & (PAGE_SIZE - 1);
+       size_t start = pos - folio_pos(folio);
 
        if (unlikely(copied < len)) {
                /*
@@ -2203,18 +2202,18 @@ int block_write_end(struct file *file, struct 
address_space *mapping,
                 * read_folio might come in and destroy our partial write.
                 *
                 * Do the simplest thing, and just treat any short write to a
-                * non uptodate page as a zero-length write, and force the
+                * non uptodate folio as a zero-length write, and force the
                 * caller to redo the whole thing.
                 */
-               if (!PageUptodate(page))
+               if (!folio_test_uptodate(folio))
                        copied = 0;
 
-               page_zero_new_buffers(page, start+copied, start+len);
+               page_zero_new_buffers(&folio->page, start+copied, start+len);
        }
-       flush_dcache_page(page);
+       flush_dcache_folio(folio);
 
        /* This could be a short (even 0-length) commit */
-       __block_commit_write(inode, page, start, start+copied);
+       __block_commit_write(inode, folio, start, start + copied);
 
        return copied;
 }
@@ -2537,8 +2536,9 @@ EXPORT_SYMBOL(cont_write_begin);
 
 int block_commit_write(struct page *page, unsigned from, unsigned to)
 {
-       struct inode *inode = page->mapping->host;
-       __block_commit_write(inode,page,from,to);
+       struct folio *folio = page_folio(page);
+       struct inode *inode = folio->mapping->host;
+       __block_commit_write(inode, folio, from, to);
        return 0;
 }
 EXPORT_SYMBOL(block_commit_write);
@@ -2586,7 +2586,7 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct 
vm_fault *vmf,
 
        ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
        if (!ret)
-               ret = block_commit_write(&folio->page, 0, end);
+               ret = __block_commit_write(inode, folio, 0, end);
 
        if (unlikely(ret < 0))
                goto out_unlock;
-- 
2.39.2

Reply via email to