From: Matthew Wilcox <mawil...@microsoft.com>

A couple of short loops.

Signed-off-by: Matthew Wilcox <mawil...@microsoft.com>
---
 fs/fs-writeback.c | 27 ++++++++++-----------------
 1 file changed, 10 insertions(+), 17 deletions(-)

diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index a3c2352507f6..18ad86ccba96 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -339,9 +339,9 @@ static void inode_switch_wbs_work_fn(struct work_struct 
*work)
        struct address_space *mapping = inode->i_mapping;
        struct bdi_writeback *old_wb = inode->i_wb;
        struct bdi_writeback *new_wb = isw->new_wb;
-       struct radix_tree_iter iter;
+       XA_STATE(xas, &mapping->pages, 0);
+       struct page *page;
        bool switched = false;
-       void **slot;
 
        /*
         * By the time control reaches here, RCU grace period has passed
@@ -373,27 +373,20 @@ static void inode_switch_wbs_work_fn(struct work_struct 
*work)
        /*
         * Count and transfer stats.  Note that PAGECACHE_TAG_DIRTY points
         * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
-        * pages actually under underwriteback.
+        * pages actually under writeback.
         */
-       radix_tree_for_each_tagged(slot, &mapping->pages, &iter, 0,
-                                  PAGECACHE_TAG_DIRTY) {
-               struct page *page = radix_tree_deref_slot_protected(slot,
-                                               &mapping->pages.xa_lock);
-               if (likely(page) && PageDirty(page)) {
+       xas_for_each_tag(&xas, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
+               if (PageDirty(page)) {
                        dec_wb_stat(old_wb, WB_RECLAIMABLE);
                        inc_wb_stat(new_wb, WB_RECLAIMABLE);
                }
        }
 
-       radix_tree_for_each_tagged(slot, &mapping->pages, &iter, 0,
-                                  PAGECACHE_TAG_WRITEBACK) {
-               struct page *page = radix_tree_deref_slot_protected(slot,
-                                               &mapping->pages.xa_lock);
-               if (likely(page)) {
-                       WARN_ON_ONCE(!PageWriteback(page));
-                       dec_wb_stat(old_wb, WB_WRITEBACK);
-                       inc_wb_stat(new_wb, WB_WRITEBACK);
-               }
+       xas_set(&xas, 0);
+       xas_for_each_tag(&xas, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
+               WARN_ON_ONCE(!PageWriteback(page));
+               dec_wb_stat(old_wb, WB_WRITEBACK);
+               inc_wb_stat(new_wb, WB_WRITEBACK);
        }
 
        wb_get(new_wb);
-- 
2.15.0

Reply via email to