Merge pte to huge pmd if it has ever been split. Now only support
gigantic page which's vmemmap pages size is an integer multiple of
PMD_SIZE. This is the simplest case to handle.

Signed-off-by: Muchun Song <songmuc...@bytedance.com>
---
 arch/x86/include/asm/hugetlb.h |   8 +++
 include/linux/hugetlb.h        |   7 +++
 mm/hugetlb.c                   | 106 ++++++++++++++++++++++++++++++++-
 3 files changed, 119 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
index 7c3eb60c2198..9f9e19dd0578 100644
--- a/arch/x86/include/asm/hugetlb.h
+++ b/arch/x86/include/asm/hugetlb.h
@@ -15,6 +15,14 @@ static inline bool vmemmap_pmd_huge(pmd_t *pmd)
 {
        return pmd_large(*pmd);
 }
+
+#define vmemmap_pmd_mkhuge vmemmap_pmd_mkhuge
+static inline pmd_t vmemmap_pmd_mkhuge(struct page *page)
+{
+       pte_t entry = pfn_pte(page_to_pfn(page), PAGE_KERNEL_LARGE);
+
+       return __pmd(pte_val(entry));
+}
 #endif
 
 #define hugepages_supported() boot_cpu_has(X86_FEATURE_PSE)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 695d3041ae7d..3a45199cc5c1 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -611,6 +611,13 @@ static inline bool vmemmap_pmd_huge(pmd_t *pmd)
 }
 #endif
 
+#ifndef vmemmap_pmd_mkhuge
+static inline pmd_t vmemmap_pmd_mkhuge(struct page *page)
+{
+       return pmd_mkhuge(mk_pmd(page, PAGE_KERNEL));
+}
+#endif
+
 #ifndef VMEMMAP_HPAGE_SHIFT
 #define VMEMMAP_HPAGE_SHIFT            PMD_SHIFT
 #endif
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 82467d573fee..a526bcdb137b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1718,6 +1718,62 @@ static void __remap_huge_page_pte_vmemmap(struct page 
*reuse, pte_t *ptep,
        }
 }
 
+static void __replace_huge_page_pte_vmemmap(pte_t *ptep, unsigned long start,
+                                           unsigned int nr, struct page *huge,
+                                           struct list_head *free_pages)
+{
+       unsigned long addr;
+       unsigned long end = start + (nr << PAGE_SHIFT);
+
+       for (addr = start; addr < end; addr += PAGE_SIZE, ptep++) {
+               struct page *page;
+               pte_t old = *ptep;
+               pte_t entry;
+
+               prepare_vmemmap_page(huge);
+
+               entry = mk_pte(huge++, PAGE_KERNEL);
+               VM_WARN_ON(!pte_present(old));
+               page = pte_page(old);
+               list_add(&page->lru, free_pages);
+
+               set_pte_at(&init_mm, addr, ptep, entry);
+       }
+}
+
+static void replace_huge_page_pmd_vmemmap(pmd_t *pmd, unsigned long start,
+                                         struct page *huge,
+                                         struct list_head *free_pages)
+{
+       unsigned long end = start + VMEMMAP_HPAGE_SIZE;
+
+       flush_cache_vunmap(start, end);
+       __replace_huge_page_pte_vmemmap(pte_offset_kernel(pmd, start), start,
+                                       VMEMMAP_HPAGE_NR, huge, free_pages);
+       flush_tlb_kernel_range(start, end);
+}
+
+static pte_t *merge_vmemmap_pte(pmd_t *pmdp, unsigned long addr)
+{
+       pte_t *pte;
+       struct page *page;
+
+       pte = pte_offset_kernel(pmdp, addr);
+       page = pte_page(*pte);
+       set_pmd(pmdp, vmemmap_pmd_mkhuge(page));
+
+       return pte;
+}
+
+static void merge_huge_page_pmd_vmemmap(pmd_t *pmd, unsigned long start,
+                                       struct page *huge,
+                                       struct list_head *free_pages)
+{
+       replace_huge_page_pmd_vmemmap(pmd, start, huge, free_pages);
+       pte_free_kernel(&init_mm, merge_vmemmap_pte(pmd, start));
+       flush_tlb_kernel_range(start, start + VMEMMAP_HPAGE_SIZE);
+}
+
 static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head 
*list)
 {
        int i;
@@ -1731,6 +1787,15 @@ static inline void alloc_vmemmap_pages(struct hstate *h, 
struct list_head *list)
        }
 }
 
+static inline void dissolve_compound_page(struct page *page, unsigned int 
order)
+{
+       int i;
+       unsigned int nr_pages = 1 << order;
+
+       for (i = 1; i < nr_pages; i++)
+               set_page_refcounted(page + i);
+}
+
 static void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
 {
        pmd_t *pmd;
@@ -1750,10 +1815,47 @@ static void alloc_huge_page_vmemmap(struct hstate *h, 
struct page *head)
                                    __remap_huge_page_pte_vmemmap);
        if (!freed_vmemmap_hpage_dec(pmd_page(*pmd)) && pmd_split(pmd)) {
                /*
-                * Todo:
-                * Merge pte to huge pmd if it has ever been split.
+                * Merge pte to huge pmd if it has ever been split. Now only
+                * support gigantic page which's vmemmap pages size is an
+                * integer multiple of PMD_SIZE. This is the simplest case
+                * to handle.
                 */
                clear_pmd_split(pmd);
+
+               if (IS_ALIGNED(nr_vmemmap(h), VMEMMAP_HPAGE_NR)) {
+                       unsigned long addr = (unsigned long)head;
+                       unsigned long end = addr + nr_vmemmap_size(h);
+
+                       spin_unlock(ptl);
+
+                       for (; addr < end; addr += VMEMMAP_HPAGE_SIZE) {
+                               void *to;
+                               struct page *page;
+
+                               page = alloc_pages(GFP_VMEMMAP_PAGE & 
~__GFP_NOFAIL,
+                                                  VMEMMAP_HPAGE_ORDER);
+                               dissolve_compound_page(page,
+                                                      VMEMMAP_HPAGE_ORDER);
+                               if (!page)
+                                       goto out;
+
+                               to = page_to_virt(page);
+                               memcpy(to, (void *)addr, VMEMMAP_HPAGE_SIZE);
+
+                               /*
+                                * Make sure that any data that writes to the
+                                * @to is made visible to the physical page.
+                                */
+                               flush_kernel_vmap_range(to, VMEMMAP_HPAGE_SIZE);
+
+                               merge_huge_page_pmd_vmemmap(pmd++, addr, page,
+                                                           &remap_pages);
+                       }
+
+out:
+                       free_vmemmap_page_list(&remap_pages);
+                       return;
+               }
        }
        spin_unlock(ptl);
 }
-- 
2.20.1

Reply via email to