For a PMD swap mapping, zap_huge_pmd() will clear the PMD and call
free_swap_and_cache() to decrease the swap reference count and maybe
free or split the huge swap cluster and the THP in swap cache.

Signed-off-by: "Huang, Ying" <ying.hu...@intel.com>
Cc: "Kirill A. Shutemov" <kirill.shute...@linux.intel.com>
Cc: Andrea Arcangeli <aarca...@redhat.com>
Cc: Michal Hocko <mho...@kernel.org>
Cc: Johannes Weiner <han...@cmpxchg.org>
Cc: Shaohua Li <s...@kernel.org>
Cc: Hugh Dickins <hu...@google.com>
Cc: Minchan Kim <minc...@kernel.org>
Cc: Rik van Riel <r...@redhat.com>
Cc: Dave Hansen <dave.han...@linux.intel.com>
Cc: Naoya Horiguchi <n-horigu...@ah.jp.nec.com>
Cc: Zi Yan <zi....@cs.rutgers.edu>
Cc: Daniel Jordan <daniel.m.jor...@oracle.com>
---
 mm/huge_memory.c | 32 +++++++++++++++++++++-----------
 1 file changed, 21 insertions(+), 11 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 2a49b2068902..c2b23dfb0d55 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2072,7 +2072,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct 
vm_area_struct *vma,
                spin_unlock(ptl);
                if (is_huge_zero_pmd(orig_pmd))
                        tlb_remove_page_size(tlb, pmd_page(orig_pmd), 
HPAGE_PMD_SIZE);
-       } else if (is_huge_zero_pmd(orig_pmd)) {
+       } else if (pmd_present(orig_pmd) && is_huge_zero_pmd(orig_pmd)) {
                zap_deposited_table(tlb->mm, pmd);
                spin_unlock(ptl);
                tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
@@ -2085,17 +2085,27 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct 
vm_area_struct *vma,
                        page_remove_rmap(page, true);
                        VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
                        VM_BUG_ON_PAGE(!PageHead(page), page);
-               } else if (thp_migration_supported()) {
-                       swp_entry_t entry;
-
-                       VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
-                       entry = pmd_to_swp_entry(orig_pmd);
-                       page = pfn_to_page(swp_offset(entry));
+               } else {
+                       swp_entry_t entry = pmd_to_swp_entry(orig_pmd);
+
+                       if (thp_migration_supported() &&
+                           is_migration_entry(entry))
+                               page = pfn_to_page(swp_offset(entry));
+                       else if (IS_ENABLED(CONFIG_THP_SWAP) &&
+                                !non_swap_entry(entry))
+                               free_swap_and_cache(entry, HPAGE_PMD_NR);
+                       else {
+                               WARN_ONCE(1,
+"Non present huge pmd without pmd migration or swap enabled!");
+                               goto unlock;
+                       }
                        flush_needed = 0;
-               } else
-                       WARN_ONCE(1, "Non present huge pmd without pmd 
migration enabled!");
+               }
 
-               if (PageAnon(page)) {
+               if (!page) {
+                       zap_deposited_table(tlb->mm, pmd);
+                       add_mm_counter(tlb->mm, MM_SWAPENTS, -HPAGE_PMD_NR);
+               } else if (PageAnon(page)) {
                        zap_deposited_table(tlb->mm, pmd);
                        add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
                } else {
@@ -2103,7 +2113,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct 
vm_area_struct *vma,
                                zap_deposited_table(tlb->mm, pmd);
                        add_mm_counter(tlb->mm, mm_counter_file(page), 
-HPAGE_PMD_NR);
                }
-
+unlock:
                spin_unlock(ptl);
                if (flush_needed)
                        tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
-- 
2.18.1

Reply via email to