This patch add KPF_ZERO_PAGE flag for zero_page, so that userspace process can notice zero_page from /proc/kpageflags, and then do memory analysis more accurately.
Signed-off-by: Yalin Wang <yalin.w...@sonymobile.com> --- fs/proc/page.c | 16 +++++++++++++--- include/linux/huge_mm.h | 12 ++++++++++++ include/uapi/linux/kernel-page-flags.h | 1 + mm/huge_memory.c | 7 +------ 4 files changed, 27 insertions(+), 9 deletions(-) diff --git a/fs/proc/page.c b/fs/proc/page.c index 1e3187d..7eee2d8 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -5,6 +5,7 @@ #include <linux/ksm.h> #include <linux/mm.h> #include <linux/mmzone.h> +#include <linux/huge_mm.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/hugetlb.h> @@ -121,9 +122,18 @@ u64 stable_page_flags(struct page *page) * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon * to make sure a given page is a thp, not a non-huge compound page. */ - else if (PageTransCompound(page) && (PageLRU(compound_head(page)) || - PageAnon(compound_head(page)))) - u |= 1 << KPF_THP; + else if (PageTransCompound(page)) { + struct page *head = compound_head(page); + + if (PageLRU(head) || PageAnon(head)) + u |= 1 << KPF_THP; + else if (is_huge_zero_page(head)) { + u |= 1 << KPF_ZERO_PAGE; + u |= 1 << KPF_THP; + } + } else if (is_zero_pfn(page_to_pfn(page))) + u |= 1 << KPF_ZERO_PAGE; + /* * Caveats on high order pages: page->_count will only be set diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index ad9051b..f10b20f 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -157,6 +157,13 @@ static inline int hpage_nr_pages(struct page *page) extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pmd_t pmd, pmd_t *pmdp); +extern struct page *huge_zero_page; + +static inline bool is_huge_zero_page(struct page *page) +{ + return ACCESS_ONCE(huge_zero_page) == page; +} + #else /* CONFIG_TRANSPARENT_HUGEPAGE */ #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) @@ -206,6 +213,11 @@ static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_str return 0; } +static inline bool is_huge_zero_page(struct page *page) +{ + return false; +} + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* _LINUX_HUGE_MM_H */ diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h index 2f96d23..a6c4962 100644 --- a/include/uapi/linux/kernel-page-flags.h +++ b/include/uapi/linux/kernel-page-flags.h @@ -32,6 +32,7 @@ #define KPF_KSM 21 #define KPF_THP 22 #define KPF_BALLOON 23 +#define KPF_ZERO_PAGE 24 #endif /* _UAPILINUX_KERNEL_PAGE_FLAGS_H */ diff --git a/mm/huge_memory.c b/mm/huge_memory.c index de98415..d7bc7a5 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -171,12 +171,7 @@ static int start_khugepaged(void) } static atomic_t huge_zero_refcount; -static struct page *huge_zero_page __read_mostly; - -static inline bool is_huge_zero_page(struct page *page) -{ - return ACCESS_ONCE(huge_zero_page) == page; -} +struct page *huge_zero_page __read_mostly; static inline bool is_huge_zero_pmd(pmd_t pmd) { -- 2.1.3 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/