reclaim_clean_pages_from_list() decreases NR_ISOLATED_FILE by returned
value from shrink_page_list(). But mlocked pages in the isolated
clean_pages page list would be removed from the list but not counted as
nr_reclaimed. Fix this miscounting by returning the number of mlocked
pages and count it.

Signed-off-by: Jaewon Kim <jaewon31....@samsung.com>
---
 mm/vmscan.c | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5e8eadd..5837695 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -849,6 +849,7 @@ static unsigned long shrink_page_list(struct list_head 
*page_list,
                                      unsigned long *ret_nr_congested,
                                      unsigned long *ret_nr_writeback,
                                      unsigned long *ret_nr_immediate,
+                                     unsigned long *ret_nr_mlocked,
                                      bool force_reclaim)
 {
        LIST_HEAD(ret_pages);
@@ -1158,6 +1159,7 @@ cull_mlocked:
                        try_to_free_swap(page);
                unlock_page(page);
                putback_lru_page(page);
+               (*ret_nr_mlocked)++;
                continue;
 
 activate_locked:
@@ -1197,6 +1199,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone 
*zone,
                .may_unmap = 1,
        };
        unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
+       unsigned long nr_mlocked = 0;
        struct page *page, *next;
        LIST_HEAD(clean_pages);
 
@@ -1210,8 +1213,10 @@ unsigned long reclaim_clean_pages_from_list(struct zone 
*zone,
 
        ret = shrink_page_list(&clean_pages, zone, &sc,
                        TTU_UNMAP|TTU_IGNORE_ACCESS,
-                       &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
+                       &dummy1, &dummy2, &dummy3, &dummy4, &dummy5,
+                       &nr_mlocked, true);
        list_splice(&clean_pages, page_list);
+       ret += nr_mlocked;
        mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
        return ret;
 }
@@ -1523,6 +1528,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct 
lruvec *lruvec,
        unsigned long nr_unqueued_dirty = 0;
        unsigned long nr_writeback = 0;
        unsigned long nr_immediate = 0;
+       unsigned long nr_mlocked = 0;
        isolate_mode_t isolate_mode = 0;
        int file = is_file_lru(lru);
        struct zone *zone = lruvec_zone(lruvec);
@@ -1565,7 +1571,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct 
lruvec *lruvec,
 
        nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
                                &nr_dirty, &nr_unqueued_dirty, &nr_congested,
-                               &nr_writeback, &nr_immediate,
+                               &nr_writeback, &nr_immediate, &nr_mlocked,
                                false);
 
        spin_lock_irq(&zone->lru_lock);
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to