During fork, the page table need to be copied from parent to child.  A
PMD swap mapping need to be copied too and the swap reference count
need to be increased.

When the huge swap cluster has been split already, we need to split
the PMD swap mapping and fallback to PTE copying.

When swap count continuation failed to allocate a page with
GFP_ATOMIC, we need to unlock the spinlock and try again with
GFP_KERNEL.

Signed-off-by: "Huang, Ying" <[email protected]>
Cc: "Kirill A. Shutemov" <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Shaohua Li <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Minchan Kim <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Naoya Horiguchi <[email protected]>
Cc: Zi Yan <[email protected]>
Cc: Daniel Jordan <[email protected]>
---
 mm/huge_memory.c | 72 ++++++++++++++++++++++++++++++++++++++----------
 1 file changed, 57 insertions(+), 15 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 3aade329fe8b..2a49b2068902 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -985,6 +985,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct 
mm_struct *src_mm,
        if (unlikely(!pgtable))
                goto out;
 
+retry:
        dst_ptl = pmd_lock(dst_mm, dst_pmd);
        src_ptl = pmd_lockptr(src_mm, src_pmd);
        spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
@@ -992,26 +993,67 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct 
mm_struct *src_mm,
        ret = -EAGAIN;
        pmd = *src_pmd;
 
-#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
        if (unlikely(is_swap_pmd(pmd))) {
                swp_entry_t entry = pmd_to_swp_entry(pmd);
 
-               VM_BUG_ON(!is_pmd_migration_entry(pmd));
-               if (is_write_migration_entry(entry)) {
-                       make_migration_entry_read(&entry);
-                       pmd = swp_entry_to_pmd(entry);
-                       if (pmd_swp_soft_dirty(*src_pmd))
-                               pmd = pmd_swp_mksoft_dirty(pmd);
-                       set_pmd_at(src_mm, addr, src_pmd, pmd);
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+               if (is_migration_entry(entry)) {
+                       if (is_write_migration_entry(entry)) {
+                               make_migration_entry_read(&entry);
+                               pmd = swp_entry_to_pmd(entry);
+                               if (pmd_swp_soft_dirty(*src_pmd))
+                                       pmd = pmd_swp_mksoft_dirty(pmd);
+                               set_pmd_at(src_mm, addr, src_pmd, pmd);
+                       }
+                       add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
+                       mm_inc_nr_ptes(dst_mm);
+                       pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
+                       set_pmd_at(dst_mm, addr, dst_pmd, pmd);
+                       ret = 0;
+                       goto out_unlock;
                }
-               add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
-               mm_inc_nr_ptes(dst_mm);
-               pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
-               set_pmd_at(dst_mm, addr, dst_pmd, pmd);
-               ret = 0;
-               goto out_unlock;
-       }
 #endif
+               if (IS_ENABLED(CONFIG_THP_SWAP) && !non_swap_entry(entry)) {
+                       ret = swap_duplicate(&entry, HPAGE_PMD_NR);
+                       if (!ret) {
+                               add_mm_counter(dst_mm, MM_SWAPENTS,
+                                              HPAGE_PMD_NR);
+                               mm_inc_nr_ptes(dst_mm);
+                               pgtable_trans_huge_deposit(dst_mm, dst_pmd,
+                                                          pgtable);
+                               set_pmd_at(dst_mm, addr, dst_pmd, pmd);
+                               /* make sure dst_mm is on swapoff's mmlist. */
+                               if (unlikely(list_empty(&dst_mm->mmlist))) {
+                                       spin_lock(&mmlist_lock);
+                                       if (list_empty(&dst_mm->mmlist))
+                                               list_add(&dst_mm->mmlist,
+                                                        &src_mm->mmlist);
+                                       spin_unlock(&mmlist_lock);
+                               }
+                       } else if (ret == -ENOTDIR) {
+                               /*
+                                * The huge swap cluster has been split, split
+                                * the PMD swap mapping and fallback to PTE
+                                */
+                               __split_huge_swap_pmd(vma, addr, src_pmd);
+                               pte_free(dst_mm, pgtable);
+                       } else if (ret == -ENOMEM) {
+                               spin_unlock(src_ptl);
+                               spin_unlock(dst_ptl);
+                               ret = add_swap_count_continuation(entry,
+                                                                 GFP_KERNEL);
+                               if (ret < 0) {
+                                       ret = -ENOMEM;
+                                       pte_free(dst_mm, pgtable);
+                                       goto out;
+                               }
+                               goto retry;
+                       } else
+                               VM_BUG_ON(1);
+                       goto out_unlock;
+               }
+               VM_BUG_ON(1);
+       }
 
        if (unlikely(!pmd_trans_huge(pmd))) {
                pte_free(dst_mm, pgtable);
-- 
2.18.1

Reply via email to