Extract writeback extension into its own function to break up the writeback
function a bit.

Signed-off-by: David Howells <dhowe...@redhat.com>
cc: linux-...@lists.infradead.org
cc: linux-cach...@redhat.com
cc: linux-fsde...@vger.kernel.org
Link: 
https://lore.kernel.org/r/160588538471.3465195.782513375683399583.st...@warthog.procyon.org.uk/
 # rfc
Link: 
https://lore.kernel.org/r/161118154610.1232039.1765365632920504822.st...@warthog.procyon.org.uk/
 # rfc
Link: 
https://lore.kernel.org/r/161161050546.2537118.2202554806419189453.st...@warthog.procyon.org.uk/
 # v2
Link: 
https://lore.kernel.org/r/161340414102.1303470.9078891484034668985.st...@warthog.procyon.org.uk/
 # v3
Link: 
https://lore.kernel.org/r/161539558417.286939.2879469588895925399.st...@warthog.procyon.org.uk/
 # v4
Link: 
https://lore.kernel.org/r/161653813972.2770958.12671731209438112378.st...@warthog.procyon.org.uk/
 # v5
---

 fs/afs/write.c |  109 ++++++++++++++++++++++++++++++++++----------------------
 1 file changed, 67 insertions(+), 42 deletions(-)

diff --git a/fs/afs/write.c b/fs/afs/write.c
index 1b8cabf5ac92..4ccd2c263983 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -490,47 +490,25 @@ static int afs_store_data(struct afs_vnode *vnode, struct 
iov_iter *iter,
 }
 
 /*
- * Synchronously write back the locked page and any subsequent non-locked dirty
- * pages.
+ * Extend the region to be written back to include subsequent contiguously
+ * dirty pages if possible, but don't sleep while doing so.
+ *
+ * If this page holds new content, then we can include filler zeros in the
+ * writeback.
  */
-static int afs_write_back_from_locked_page(struct address_space *mapping,
-                                          struct writeback_control *wbc,
-                                          struct page *primary_page,
-                                          pgoff_t final_page)
+static void afs_extend_writeback(struct address_space *mapping,
+                                struct afs_vnode *vnode,
+                                long *_count,
+                                pgoff_t start,
+                                pgoff_t final_page,
+                                unsigned *_offset,
+                                unsigned *_to,
+                                bool new_content)
 {
-       struct afs_vnode *vnode = AFS_FS_I(mapping->host);
-       struct iov_iter iter;
        struct page *pages[8], *page;
-       unsigned long count, priv;
-       unsigned n, offset, to, f, t;
-       pgoff_t start, first, last;
-       loff_t i_size, pos, end;
-       int loop, ret;
-
-       _enter(",%lx", primary_page->index);
-
-       count = 1;
-       if (test_set_page_writeback(primary_page))
-               BUG();
-
-       /* Find all consecutive lockable dirty pages that have contiguous
-        * written regions, stopping when we find a page that is not
-        * immediately lockable, is not dirty or is missing, or we reach the
-        * end of the range.
-        */
-       start = primary_page->index;
-       priv = page_private(primary_page);
-       offset = afs_page_dirty_from(primary_page, priv);
-       to = afs_page_dirty_to(primary_page, priv);
-       trace_afs_page_dirty(vnode, tracepoint_string("store"), primary_page);
-
-       WARN_ON(offset == to);
-       if (offset == to)
-               trace_afs_page_dirty(vnode, tracepoint_string("WARN"), 
primary_page);
-
-       if (start >= final_page ||
-           (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
-               goto no_more;
+       unsigned long count = *_count, priv;
+       unsigned offset = *_offset, to = *_to, n, f, t;
+       int loop;
 
        start++;
        do {
@@ -551,8 +529,7 @@ static int afs_write_back_from_locked_page(struct 
address_space *mapping,
 
                for (loop = 0; loop < n; loop++) {
                        page = pages[loop];
-                       if (to != PAGE_SIZE &&
-                           !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
+                       if (to != PAGE_SIZE && !new_content)
                                break;
                        if (page->index > final_page)
                                break;
@@ -566,8 +543,7 @@ static int afs_write_back_from_locked_page(struct 
address_space *mapping,
                        priv = page_private(page);
                        f = afs_page_dirty_from(page, priv);
                        t = afs_page_dirty_to(page, priv);
-                       if (f != 0 &&
-                           !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
+                       if (f != 0 && !new_content) {
                                unlock_page(page);
                                break;
                        }
@@ -593,6 +569,55 @@ static int afs_write_back_from_locked_page(struct 
address_space *mapping,
        } while (start <= final_page && count < 65536);
 
 no_more:
+       *_count = count;
+       *_offset = offset;
+       *_to = to;
+}
+
+/*
+ * Synchronously write back the locked page and any subsequent non-locked dirty
+ * pages.
+ */
+static int afs_write_back_from_locked_page(struct address_space *mapping,
+                                          struct writeback_control *wbc,
+                                          struct page *primary_page,
+                                          pgoff_t final_page)
+{
+       struct afs_vnode *vnode = AFS_FS_I(mapping->host);
+       struct iov_iter iter;
+       unsigned long count, priv;
+       unsigned offset, to;
+       pgoff_t start, first, last;
+       loff_t i_size, pos, end;
+       bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
+       int ret;
+
+       _enter(",%lx", primary_page->index);
+
+       count = 1;
+       if (test_set_page_writeback(primary_page))
+               BUG();
+
+       /* Find all consecutive lockable dirty pages that have contiguous
+        * written regions, stopping when we find a page that is not
+        * immediately lockable, is not dirty or is missing, or we reach the
+        * end of the range.
+        */
+       start = primary_page->index;
+       priv = page_private(primary_page);
+       offset = afs_page_dirty_from(primary_page, priv);
+       to = afs_page_dirty_to(primary_page, priv);
+       trace_afs_page_dirty(vnode, tracepoint_string("store"), primary_page);
+
+       WARN_ON(offset == to);
+       if (offset == to)
+               trace_afs_page_dirty(vnode, tracepoint_string("WARN"), 
primary_page);
+
+       if (start < final_page &&
+           (to == PAGE_SIZE || new_content))
+               afs_extend_writeback(mapping, vnode, &count, start, final_page,
+                                    &offset, &to, new_content);
+
        /* We now have a contiguous set of dirty pages, each with writeback
         * set; the first page is still locked at this point, but all the rest
         * have been unlocked.


Reply via email to