We have been reclaimed pages per zone but upcoming patch will
pass pages from multiple zones into shrink_page_list so this patch
prepares it.

Signed-off-by: Minchan Kim <[email protected]>
---
 mm/vmscan.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 05119983c92e..d20c9e863d35 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -881,7 +881,6 @@ static void page_check_dirty_writeback(struct page *page,
  * shrink_page_list() returns the number of reclaimed pages
  */
 static unsigned long shrink_page_list(struct list_head *page_list,
-                                     struct zone *zone,
                                      struct scan_control *sc,
                                      enum ttu_flags ttu_flags,
                                      unsigned long *ret_nr_dirty,
@@ -910,6 +909,7 @@ static unsigned long shrink_page_list(struct list_head 
*page_list,
                bool dirty, writeback;
                bool lazyfree = false;
                int ret = SWAP_SUCCESS;
+               struct zone *zone;
 
                cond_resched();
 
@@ -919,8 +919,8 @@ static unsigned long shrink_page_list(struct list_head 
*page_list,
                if (!trylock_page(page))
                        goto keep;
 
+               zone = page_zone(page);
                VM_BUG_ON_PAGE(PageActive(page), page);
-               VM_BUG_ON_PAGE(page_zone(page) != zone, page);
 
                sc->nr_scanned++;
 
@@ -933,6 +933,7 @@ static unsigned long shrink_page_list(struct list_head 
*page_list,
                may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
                        (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
 
+               mapping = page_mapping(page);
                if (sc->force_reclaim)
                        goto force_reclaim;
 
@@ -958,7 +959,6 @@ static unsigned long shrink_page_list(struct list_head 
*page_list,
                 * pages marked for immediate reclaim are making it to the
                 * end of the LRU a second time.
                 */
-               mapping = page_mapping(page);
                if (((dirty || writeback) && mapping &&
                     inode_write_congested(mapping->host)) ||
                    (writeback && PageReclaim(page)))
@@ -1272,7 +1272,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone 
*zone,
                }
        }
 
-       ret = shrink_page_list(&clean_pages, zone, &sc,
+       ret = shrink_page_list(&clean_pages, &sc,
                        TTU_UNMAP|TTU_IGNORE_ACCESS,
                        &dummy1, &dummy2, &dummy3, &dummy4, &dummy5);
        list_splice(&clean_pages, page_list);
@@ -1627,7 +1627,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct 
lruvec *lruvec,
        if (nr_taken == 0)
                return 0;
 
-       nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
+       nr_reclaimed = shrink_page_list(&page_list, sc, TTU_UNMAP,
                                &nr_dirty, &nr_unqueued_dirty, &nr_congested,
                                &nr_writeback, &nr_immediate);
 
-- 
1.9.1

Reply via email to