We writeback whole huge page a time. Let's adjust iteration this way.

Signed-off-by: Kirill A. Shutemov <kirill.shute...@linux.intel.com>
---
 include/linux/mm.h      |  1 +
 include/linux/pagemap.h |  1 +
 mm/page-writeback.c     | 17 ++++++++++++-----
 3 files changed, 14 insertions(+), 5 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 4424784ac374..582844ca0b23 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1045,6 +1045,7 @@ extern pgoff_t __page_file_index(struct page *page);
  */
 static inline pgoff_t page_index(struct page *page)
 {
+       page = compound_head(page);
        if (unlikely(PageSwapCache(page)))
                return __page_file_index(page);
        return page->index;
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e530e7b3b6b2..faa3fa173939 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -546,6 +546,7 @@ static inline void wait_on_page_locked(struct page *page)
  */
 static inline void wait_on_page_writeback(struct page *page)
 {
+       page = compound_head(page);
        if (PageWriteback(page))
                wait_on_page_bit(page, PG_writeback);
 }
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 290e8b7d3181..47d5b12c460e 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2209,7 +2209,7 @@ int write_cache_pages(struct address_space *mapping,
                         * mapping. However, page->index will not change
                         * because we have a reference on the page.
                         */
-                       if (page->index > end) {
+                       if (page_to_pgoff(page) > end) {
                                /*
                                 * can't be range_cyclic (1st pass) because
                                 * end == -1 in that case.
@@ -2218,7 +2218,12 @@ int write_cache_pages(struct address_space *mapping,
                                break;
                        }
 
-                       done_index = page->index;
+                       done_index = page_to_pgoff(page);
+                       if (PageTransCompound(page)) {
+                               index = round_up(index + 1, HPAGE_PMD_NR);
+                               i += HPAGE_PMD_NR -
+                                       done_index % HPAGE_PMD_NR - 1;
+                       }
 
                        lock_page(page);
 
@@ -2230,7 +2235,7 @@ int write_cache_pages(struct address_space *mapping,
                         * even if there is now a new, dirty page at the same
                         * pagecache address.
                         */
-                       if (unlikely(page->mapping != mapping)) {
+                       if (unlikely(page_mapping(page) != mapping)) {
 continue_unlock:
                                unlock_page(page);
                                continue;
@@ -2268,7 +2273,8 @@ int write_cache_pages(struct address_space *mapping,
                                         * not be suitable for data integrity
                                         * writeout).
                                         */
-                                       done_index = page->index + 1;
+                                       done_index = compound_head(page)->index
+                                               + hpage_nr_pages(page);
                                        done = 1;
                                        break;
                                }
@@ -2280,7 +2286,8 @@ int write_cache_pages(struct address_space *mapping,
                         * keep going until we have written all the pages
                         * we tagged for writeback prior to entering this loop.
                         */
-                       if (--wbc->nr_to_write <= 0 &&
+                       wbc->nr_to_write -= hpage_nr_pages(page);
+                       if (wbc->nr_to_write <= 0 &&
                            wbc->sync_mode == WB_SYNC_NONE) {
                                done = 1;
                                break;
-- 
2.10.2

Reply via email to