We are going to add MADV_FREE pages into a new LRU list. Add a new flag
to indicate such pages. Note, we are reusing PG_mappedtodisk for the new
flag. This is ok because no anonymous pages have this flag set.

The patch is based on Minchan's previous patch.

Cc: Michal Hocko <mho...@suse.com>
Cc: Minchan Kim <minc...@kernel.org>
Cc: Hugh Dickins <hu...@google.com>
Cc: Johannes Weiner <han...@cmpxchg.org>
Cc: Rik van Riel <r...@redhat.com>
Cc: Mel Gorman <mgor...@techsingularity.net>
Signed-off-by: Shaohua Li<s...@fb.com>
---
 fs/proc/task_mmu.c         | 8 +++++++-
 include/linux/mm_inline.h  | 5 +++++
 include/linux/page-flags.h | 6 ++++++
 mm/huge_memory.c           | 1 +
 mm/migrate.c               | 2 ++
 5 files changed, 21 insertions(+), 1 deletion(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index ee3efb2..813d3aa 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -440,6 +440,7 @@ struct mem_size_stats {
        unsigned long private_dirty;
        unsigned long referenced;
        unsigned long anonymous;
+       unsigned long lazyfree;
        unsigned long anonymous_thp;
        unsigned long shmem_thp;
        unsigned long swap;
@@ -456,8 +457,11 @@ static void smaps_account(struct mem_size_stats *mss, 
struct page *page,
        int i, nr = compound ? 1 << compound_order(page) : 1;
        unsigned long size = nr * PAGE_SIZE;
 
-       if (PageAnon(page))
+       if (PageAnon(page)) {
                mss->anonymous += size;
+               if (PageLazyFree(page))
+                       mss->lazyfree += size;
+       }
 
        mss->resident += size;
        /* Accumulate the size in pages that have been accessed. */
@@ -770,6 +774,7 @@ static int show_smap(struct seq_file *m, void *v, int 
is_pid)
                   "Private_Dirty:  %8lu kB\n"
                   "Referenced:     %8lu kB\n"
                   "Anonymous:      %8lu kB\n"
+                  "LazyFree:       %8lu kB\n"
                   "AnonHugePages:  %8lu kB\n"
                   "ShmemPmdMapped: %8lu kB\n"
                   "Shared_Hugetlb: %8lu kB\n"
@@ -788,6 +793,7 @@ static int show_smap(struct seq_file *m, void *v, int 
is_pid)
                   mss.private_dirty >> 10,
                   mss.referenced >> 10,
                   mss.anonymous >> 10,
+                  mss.lazyfree >> 10,
                   mss.anonymous_thp >> 10,
                   mss.shmem_thp >> 10,
                   mss.shared_hugetlb >> 10,
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 0dddc2c..828e813 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -22,6 +22,11 @@ static inline int page_is_file_cache(struct page *page)
        return !PageSwapBacked(page);
 }
 
+static inline bool page_is_lazyfree(struct page *page)
+{
+       return PageSwapBacked(page) && PageLazyFree(page);
+}
+
 static __always_inline void __update_lru_size(struct lruvec *lruvec,
                                enum lru_list lru, enum zone_type zid,
                                int nr_pages)
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 6b5818d..e8ea378 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -107,6 +107,9 @@ enum pageflags {
 #endif
        __NR_PAGEFLAGS,
 
+       /* MADV_FREE */
+       PG_lazyfree = PG_mappedtodisk,
+
        /* Filesystems */
        PG_checked = PG_owner_priv_1,
 
@@ -428,6 +431,9 @@ TESTPAGEFLAG_FALSE(Ksm)
 
 u64 stable_page_flags(struct page *page);
 
+PAGEFLAG(LazyFree, lazyfree, PF_ANY)
+       __CLEARPAGEFLAG(LazyFree, lazyfree, PF_ANY)
+
 static inline int PageUptodate(struct page *page)
 {
        int ret;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 40bd376..ffa7ed5 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1918,6 +1918,7 @@ static void __split_huge_page_tail(struct page *head, int 
tail,
                         (1L << PG_swapbacked) |
                         (1L << PG_mlocked) |
                         (1L << PG_uptodate) |
+                        (1L << PG_lazyfree) |
                         (1L << PG_active) |
                         (1L << PG_locked) |
                         (1L << PG_unevictable) |
diff --git a/mm/migrate.c b/mm/migrate.c
index 502ebea..496105c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -641,6 +641,8 @@ void migrate_page_copy(struct page *newpage, struct page 
*page)
                SetPageChecked(newpage);
        if (PageMappedToDisk(page))
                SetPageMappedToDisk(newpage);
+       if (PageLazyFree(page))
+               SetPageLazyFree(newpage);
 
        /* Move dirty on pages not done by migrate_page_move_mapping() */
        if (PageDirty(page))
-- 
2.9.3

Reply via email to