From: Huang Ying <[email protected]>

During MADV_WILLNEED, for a PMD swap mapping, if THP swapin is enabled
for the VMA, the whole swap cluster will be swapin.  Otherwise, the
huge swap cluster and the PMD swap mapping will be split and fallback
to PTE swap mapping.

Signed-off-by: "Huang, Ying" <[email protected]>
Cc: "Kirill A. Shutemov" <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Shaohua Li <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Minchan Kim <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Naoya Horiguchi <[email protected]>
Cc: Zi Yan <[email protected]>
---
 mm/madvise.c | 26 ++++++++++++++++++++++++--
 1 file changed, 24 insertions(+), 2 deletions(-)

diff --git a/mm/madvise.c b/mm/madvise.c
index e03e85a20fb4..44a0a62f4848 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -196,14 +196,36 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned 
long start,
        pte_t *orig_pte;
        struct vm_area_struct *vma = walk->private;
        unsigned long index;
+       swp_entry_t entry;
+       struct page *page;
+       pmd_t pmdval;
+
+       pmdval = *pmd;
+       if (thp_swap_supported() && is_swap_pmd(pmdval) &&
+           !is_pmd_migration_entry(pmdval)) {
+               entry = pmd_to_swp_entry(pmdval);
+               if (!transparent_hugepage_swapin_enabled(vma)) {
+                       if (!split_swap_cluster(entry, false))
+                               split_huge_swap_pmd(vma, pmd, start, pmdval);
+               } else {
+                       page = read_swap_cache_async(entry,
+                                                    GFP_HIGHUSER_MOVABLE,
+                                                    vma, start, false);
+                       /* The swap cluster has been split under us */
+                       if (page) {
+                               if (!PageTransHuge(page))
+                                       split_huge_swap_pmd(vma, pmd, start,
+                                                           pmdval);
+                               put_page(page);
+                       }
+               }
+       }
 
        if (pmd_none_or_trans_huge_or_clear_bad(pmd))
                return 0;
 
        for (index = start; index != end; index += PAGE_SIZE) {
                pte_t pte;
-               swp_entry_t entry;
-               struct page *page;
                spinlock_t *ptl;
 
                orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
-- 
2.17.0

Reply via email to