Many of the callbacks called by pagevec_lru_move_fn() do not correctly
update the vmstats for huge pages. Fix that. Also __pagevec_lru_add_fn()
use the irq-unsafe alternative to update the stat as the irqs are
already disabled.

Signed-off-by: Shakeel Butt <shake...@google.com>
---
 mm/swap.c | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/mm/swap.c b/mm/swap.c
index a37bd7b202ac..3dbef6517cac 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -225,7 +225,7 @@ static void pagevec_move_tail_fn(struct page *page, struct 
lruvec *lruvec,
                del_page_from_lru_list(page, lruvec, page_lru(page));
                ClearPageActive(page);
                add_page_to_lru_list_tail(page, lruvec, page_lru(page));
-               (*pgmoved)++;
+               (*pgmoved) += hpage_nr_pages(page);
        }
 }
 
@@ -285,7 +285,7 @@ static void __activate_page(struct page *page, struct 
lruvec *lruvec,
                add_page_to_lru_list(page, lruvec, lru);
                trace_mm_lru_activate(page);
 
-               __count_vm_event(PGACTIVATE);
+               __count_vm_events(PGACTIVATE, hpage_nr_pages(page));
                update_page_reclaim_stat(lruvec, file, 1);
        }
 }
@@ -503,6 +503,7 @@ static void lru_deactivate_file_fn(struct page *page, 
struct lruvec *lruvec,
 {
        int lru, file;
        bool active;
+       int nr_pages = hpage_nr_pages(page);
 
        if (!PageLRU(page))
                return;
@@ -536,11 +537,11 @@ static void lru_deactivate_file_fn(struct page *page, 
struct lruvec *lruvec,
                 * We moves tha page into tail of inactive.
                 */
                add_page_to_lru_list_tail(page, lruvec, lru);
-               __count_vm_event(PGROTATED);
+               __count_vm_events(PGROTATED, nr_pages);
        }
 
        if (active)
-               __count_vm_event(PGDEACTIVATE);
+               __count_vm_events(PGDEACTIVATE, nr_pages);
        update_page_reclaim_stat(lruvec, file, 0);
 }
 
@@ -929,6 +930,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct 
lruvec *lruvec,
 {
        enum lru_list lru;
        int was_unevictable = TestClearPageUnevictable(page);
+       int nr_pages = hpage_nr_pages(page);
 
        VM_BUG_ON_PAGE(PageLRU(page), page);
 
@@ -966,13 +968,13 @@ static void __pagevec_lru_add_fn(struct page *page, 
struct lruvec *lruvec,
                update_page_reclaim_stat(lruvec, page_is_file_lru(page),
                                         PageActive(page));
                if (was_unevictable)
-                       count_vm_event(UNEVICTABLE_PGRESCUED);
+                       __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
        } else {
                lru = LRU_UNEVICTABLE;
                ClearPageActive(page);
                SetPageUnevictable(page);
                if (!was_unevictable)
-                       count_vm_event(UNEVICTABLE_PGCULLED);
+                       __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
        }
 
        add_page_to_lru_list(page, lruvec, lru);
-- 
2.26.2.645.ge9eca65c58-goog

Reply via email to