When we allocate a hugetlb page from the buddy, we should free the
unused vmemmap pages associated with it. We can do that in the
prep_new_huge_page().

Signed-off-by: Muchun Song <[email protected]>
---
 include/linux/hugetlb.h |  21 ++++
 mm/hugetlb.c            | 231 ++++++++++++++++++++++++++++++++++++++++
 2 files changed, 252 insertions(+)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ace304a6196c..2561af2ad901 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -601,6 +601,13 @@ static inline bool arch_vmemmap_support_huge_mapping(void)
 }
 #endif
 
+#ifndef vmemmap_pmd_huge
+static inline bool vmemmap_pmd_huge(pmd_t *pmd)
+{
+       return pmd_huge(*pmd);
+}
+#endif
+
 #ifndef VMEMMAP_HPAGE_SHIFT
 #define VMEMMAP_HPAGE_SHIFT            PMD_SHIFT
 #endif
@@ -790,6 +797,15 @@ static inline void huge_ptep_modify_prot_commit(struct 
vm_area_struct *vma,
 }
 #endif
 
+#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+int handle_vmemmap_fault(unsigned long page);
+#else
+static inline int handle_vmemmap_fault(unsigned long page)
+{
+       return -EFAULT;
+}
+#endif
+
 #else  /* CONFIG_HUGETLB_PAGE */
 struct hstate {};
 
@@ -943,6 +959,11 @@ static inline void set_huge_swap_pte_at(struct mm_struct 
*mm, unsigned long addr
                                        pte_t *ptep, pte_t pte, unsigned long 
sz)
 {
 }
+
+static inline int handle_vmemmap_fault(unsigned long page)
+{
+       return -EFAULT;
+}
 #endif /* CONFIG_HUGETLB_PAGE */
 
 static inline spinlock_t *huge_pte_lock(struct hstate *h,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d6ae9b6876be..a628588a075a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1293,10 +1293,20 @@ static inline void 
destroy_compound_gigantic_page(struct page *page,
 #endif
 
 #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+#include <linux/bootmem_info.h>
+
 #define RESERVE_VMEMMAP_NR     2U
+#define RESERVE_VMEMMAP_SIZE   (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
 
 #define page_huge_pte(page)    ((page)->pmd_huge_pte)
 
+#define vmemmap_hpage_addr_end(addr, end)                              \
+({                                                                     \
+       unsigned long __boundary;                                       \
+       __boundary = ((addr) + VMEMMAP_HPAGE_SIZE) & VMEMMAP_HPAGE_MASK;\
+       (__boundary - 1 < (end) - 1) ? __boundary : (end);              \
+})
+
 static inline unsigned int nr_free_vmemmap(struct hstate *h)
 {
        return h->nr_free_vmemmap_pages;
@@ -1416,6 +1426,222 @@ static void __init hugetlb_vmemmap_init(struct hstate 
*h)
        pr_info("HugeTLB: can free %d vmemmap pages for %s\n",
                h->nr_free_vmemmap_pages, h->name);
 }
+
+static inline spinlock_t *vmemmap_pmd_lockptr(pmd_t *pmd)
+{
+       static DEFINE_SPINLOCK(pgtable_lock);
+
+       return &pgtable_lock;
+}
+
+/*
+ * Walk a vmemmap address to the pmd it maps.
+ */
+static pmd_t *vmemmap_to_pmd(const void *page)
+{
+       unsigned long addr = (unsigned long)page;
+       pgd_t *pgd;
+       p4d_t *p4d;
+       pud_t *pud;
+       pmd_t *pmd;
+
+       if (addr < VMEMMAP_START || addr >= VMEMMAP_END)
+               return NULL;
+
+       pgd = pgd_offset_k(addr);
+       if (pgd_none(*pgd))
+               return NULL;
+       p4d = p4d_offset(pgd, addr);
+       if (p4d_none(*p4d))
+               return NULL;
+       pud = pud_offset(p4d, addr);
+
+       WARN_ON_ONCE(pud_bad(*pud));
+       if (pud_none(*pud) || pud_bad(*pud))
+               return NULL;
+       pmd = pmd_offset(pud, addr);
+
+       return pmd;
+}
+
+static inline int freed_vmemmap_hpage(struct page *page)
+{
+       return atomic_read(&page->_mapcount) + 1;
+}
+
+static inline int freed_vmemmap_hpage_inc(struct page *page)
+{
+       return atomic_inc_return_relaxed(&page->_mapcount) + 1;
+}
+
+static inline int freed_vmemmap_hpage_dec(struct page *page)
+{
+       return atomic_dec_return_relaxed(&page->_mapcount) + 1;
+}
+
+static inline void free_vmemmap_page_list(struct list_head *list)
+{
+       struct page *page, *next;
+
+       list_for_each_entry_safe(page, next, list, lru) {
+               list_del(&page->lru);
+               free_vmemmap_page(page);
+       }
+}
+
+static void __free_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
+                                        unsigned long start,
+                                        unsigned int nr_free,
+                                        struct list_head *free_pages)
+{
+       pte_t entry = mk_pte(reuse, PAGE_KERNEL);
+       unsigned long addr;
+       unsigned long end = start + (nr_free  << PAGE_SHIFT);
+
+       for (addr = start; addr < end; addr += PAGE_SIZE, ptep++) {
+               struct page *page;
+               pte_t old = *ptep;
+
+               VM_WARN_ON(!pte_present(old));
+               page = pte_page(old);
+               list_add(&page->lru, free_pages);
+
+               set_pte_at(&init_mm, addr, ptep, entry);
+       }
+}
+
+static void __free_huge_page_pmd_vmemmap(struct hstate *h, pmd_t *pmd,
+                                        unsigned long addr,
+                                        struct list_head *free_pages)
+{
+       unsigned long next;
+       unsigned long start = addr + RESERVE_VMEMMAP_NR * PAGE_SIZE;
+       unsigned long end = addr + nr_vmemmap_size(h);
+       struct page *reuse = NULL;
+
+       addr = start;
+       do {
+               unsigned int nr_pages;
+               pte_t *ptep;
+
+               ptep = pte_offset_kernel(pmd, addr);
+               if (!reuse)
+                       reuse = pte_page(ptep[-1]);
+
+               next = vmemmap_hpage_addr_end(addr, end);
+               nr_pages = (next - addr) >> PAGE_SHIFT;
+               __free_huge_page_pte_vmemmap(reuse, ptep, addr, nr_pages,
+                                            free_pages);
+       } while (pmd++, addr = next, addr != end);
+
+       flush_tlb_kernel_range(start, end);
+}
+
+static void split_vmemmap_pmd(pmd_t *pmd, pte_t *pte_p, unsigned long addr)
+{
+       struct mm_struct *mm = &init_mm;
+       struct page *page;
+       pmd_t old_pmd, _pmd;
+       int i;
+
+       /*
+        * Up to this point the pmd is present and huge and userland has the
+        * whole access to the hugepage during the split (which happens in
+        * place). If we overwrite the pmd with the not-huge version pointing
+        * to the pte here (which of course we could if all CPUs were bug
+        * free), userland could trigger a small page size TLB miss on the
+        * small sized TLB while the hugepage TLB entry is still established in
+        * the huge TLB. Some CPU doesn't like that.
+        *
+        * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum
+        * 383 on page 93. Intel should be safe but is also warns that it's
+        * only safe if the permission and cache attributes of the two entries
+        * loaded in the two TLB is identical (which should be the case here).
+        *
+        * So it is generally safer to never allow small and huge TLB entries
+        * for the same virtual address to be loaded simultaneously. But here
+        * we should not set pmd non-present first and flush TLB. Because if
+        * we do that(maybe trriger IPI to other CPUs to flush TLB), we may be
+        * deadlocked. So we have to break the above rules. Be careful, Let us
+        * suppose all CPUs are bug free, otherwise, we should not enable the
+        * feature of freeing unused vmemmap pages on the bug CPU.
+        *
+        * Why we should not set pmd non-present first? Here we already hold
+        * the vmemmap pgtable spinlock on CPU1 and set pmd non-present. If
+        * CPU0 access the struct page with irqs disabled and the vmemmap
+        * pgtable lock is held by CPU1. In this case, the CPU0 can not handle
+        * the IPI interrupt to flush TLB because of the disabling of irqs.
+        * Then we can deadlock. In order to avoid this issue, we do not set
+        * pmd non-present.
+        *
+        * The deadlock scene is shown below.
+        *
+        *     CPU0:                                        CPU1:
+        * disable irqs                           hold the vmemmap pgtable lock
+        *                                        set pmd non-present
+        * read/write `struct page`(page fault)
+        * jump to handle_vmemmap_fault
+        * spin for vmemmap pgtable lock
+        *                                        flush_tlb(send IPI to CPU0)
+        *                                        set new pmd(small page)
+        */
+       old_pmd = READ_ONCE(*pmd);
+       page = pmd_page(old_pmd);
+       pmd_populate_kernel(mm, &_pmd, pte_p);
+
+       for (i = 0; i < VMEMMAP_HPAGE_NR; i++, addr += PAGE_SIZE) {
+               pte_t entry, *pte;
+
+               entry = mk_pte(page + i, PAGE_KERNEL);
+               pte = pte_offset_kernel(&_pmd, addr);
+               VM_BUG_ON(!pte_none(*pte));
+               set_pte_at(mm, addr, pte, entry);
+       }
+
+       /* make pte visible before pmd */
+       smp_wmb();
+       pmd_populate_kernel(mm, pmd, pte_p);
+}
+
+static void split_vmemmap_huge_page(struct page *head, pmd_t *pmd)
+{
+       pte_t *pte_p;
+       unsigned long start = (unsigned long)head & VMEMMAP_HPAGE_MASK;
+       unsigned long addr = start;
+
+       while ((pte_p = vmemmap_pgtable_withdraw(head))) {
+               VM_BUG_ON(freed_vmemmap_hpage(virt_to_page(pte_p)));
+               split_vmemmap_pmd(pmd++, pte_p, addr);
+               addr += VMEMMAP_HPAGE_SIZE;
+       }
+
+       flush_tlb_kernel_range(start, addr);
+}
+
+static void free_huge_page_vmemmap(struct hstate *h, struct page *head)
+{
+       pmd_t *pmd;
+       spinlock_t *ptl;
+       LIST_HEAD(free_pages);
+
+       if (!nr_free_vmemmap(h))
+               return;
+
+       pmd = vmemmap_to_pmd(head);
+       ptl = vmemmap_pmd_lockptr(pmd);
+
+       spin_lock(ptl);
+       if (vmemmap_pmd_huge(pmd)) {
+               VM_BUG_ON(!nr_pgtable(h));
+               split_vmemmap_huge_page(head, pmd);
+       }
+
+       __free_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head, &free_pages);
+       freed_vmemmap_hpage_inc(pmd_page(*pmd));
+       spin_unlock(ptl);
+
+       free_vmemmap_page_list(&free_pages);
+}
 #else
 static inline void hugetlb_vmemmap_init(struct hstate *h)
 {
@@ -1429,6 +1655,10 @@ static inline int vmemmap_pgtable_prealloc(struct hstate 
*h, struct page *page)
 static inline void vmemmap_pgtable_free(struct hstate *h, struct page *page)
 {
 }
+
+static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head)
+{
+}
 #endif
 
 static void update_and_free_page(struct hstate *h, struct page *page)
@@ -1637,6 +1867,7 @@ void free_huge_page(struct page *page)
 
 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
 {
+       free_huge_page_vmemmap(h, page);
        /* Must be called before the initialization of @page->lru */
        vmemmap_pgtable_free(h, page);
 
-- 
2.20.1

Reply via email to