The __free_huge_page_pmd_vmemmap and __remap_huge_page_pmd_vmemmap are
almost the same code. So introduce remap_free_huge_page_pmd_vmemmap
helper to simplify the code.

Signed-off-by: Muchun Song <songmuc...@bytedance.com>
---
 mm/hugetlb.c | 98 +++++++++++++++++++++-------------------------------
 1 file changed, 39 insertions(+), 59 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cea580058a16..bd0c4e7fd994 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1482,6 +1482,41 @@ static inline int freed_vmemmap_hpage_dec(struct page 
*page)
        return atomic_dec_return_relaxed(&page->_mapcount) + 1;
 }
 
+typedef void (*remap_pte_fn)(struct page *reuse, pte_t *ptep,
+                            unsigned long start, unsigned int nr_pages,
+                            struct list_head *pages);
+
+static void remap_huge_page_pmd_vmemmap(struct hstate *h, pmd_t *pmd,
+                                       unsigned long addr,
+                                       struct list_head *pages,
+                                       remap_pte_fn remap_fn)
+{
+       unsigned long next;
+       unsigned long start = addr + RESERVE_VMEMMAP_SIZE;
+       unsigned long end = addr + nr_vmemmap_size(h);
+       struct page *reuse = NULL;
+
+       flush_cache_vunmap(start, end);
+
+       addr = start;
+       do {
+               unsigned int nr_pages;
+               pte_t *ptep;
+
+               ptep = pte_offset_kernel(pmd, addr);
+               if (!reuse) {
+                       reuse = pte_page(ptep[-1]);
+                       set_page_private(reuse, addr - PAGE_SIZE);
+               }
+
+               next = vmemmap_hpage_addr_end(addr, end);
+               nr_pages = (next - addr) >> PAGE_SHIFT;
+               remap_fn(reuse, ptep, addr, nr_pages, pages);
+       } while (pmd++, addr = next, addr != end);
+
+       flush_tlb_kernel_range(start, end);
+}
+
 static inline void free_vmemmap_page_list(struct list_head *list)
 {
        struct page *page, *next;
@@ -1513,33 +1548,6 @@ static void __free_huge_page_pte_vmemmap(struct page 
*reuse, pte_t *ptep,
        }
 }
 
-static void __free_huge_page_pmd_vmemmap(struct hstate *h, pmd_t *pmd,
-                                        unsigned long addr,
-                                        struct list_head *free_pages)
-{
-       unsigned long next;
-       unsigned long start = addr + RESERVE_VMEMMAP_NR * PAGE_SIZE;
-       unsigned long end = addr + nr_vmemmap_size(h);
-       struct page *reuse = NULL;
-
-       addr = start;
-       do {
-               unsigned int nr_pages;
-               pte_t *ptep;
-
-               ptep = pte_offset_kernel(pmd, addr);
-               if (!reuse)
-                       reuse = pte_page(ptep[-1]);
-
-               next = vmemmap_hpage_addr_end(addr, end);
-               nr_pages = (next - addr) >> PAGE_SHIFT;
-               __free_huge_page_pte_vmemmap(reuse, ptep, addr, nr_pages,
-                                            free_pages);
-       } while (pmd++, addr = next, addr != end);
-
-       flush_tlb_kernel_range(start, end);
-}
-
 static void split_vmemmap_pmd(pmd_t *pmd, pte_t *pte_p, unsigned long addr)
 {
        struct mm_struct *mm = &init_mm;
@@ -1598,7 +1606,8 @@ static void free_huge_page_vmemmap(struct hstate *h, 
struct page *head)
                split_vmemmap_huge_page(head, pmd);
        }
 
-       __free_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head, &free_pages);
+       remap_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head, &free_pages,
+                                   __free_huge_page_pte_vmemmap);
        freed_vmemmap_hpage_inc(pmd_page(*pmd));
        spin_unlock(ptl);
 
@@ -1638,35 +1647,6 @@ static void __remap_huge_page_pte_vmemmap(struct page 
*reuse, pte_t *ptep,
        }
 }
 
-static void __remap_huge_page_pmd_vmemmap(struct hstate *h, pmd_t *pmd,
-                                         unsigned long addr,
-                                         struct list_head *remap_pages)
-{
-       unsigned long next;
-       unsigned long start = addr + RESERVE_VMEMMAP_NR * PAGE_SIZE;
-       unsigned long end = addr + nr_vmemmap_size(h);
-       struct page *reuse = NULL;
-
-       addr = start;
-       do {
-               unsigned int nr_pages;
-               pte_t *ptep;
-
-               ptep = pte_offset_kernel(pmd, addr);
-               if (!reuse) {
-                       reuse = pte_page(ptep[-1]);
-                       set_page_private(reuse, addr - PAGE_SIZE);
-               }
-
-               next = vmemmap_hpage_addr_end(addr, end);
-               nr_pages = (next - addr) >> PAGE_SHIFT;
-               __remap_huge_page_pte_vmemmap(reuse, ptep, addr, nr_pages,
-                                             remap_pages);
-       } while (pmd++, addr = next, addr != end);
-
-       flush_tlb_kernel_range(start, end);
-}
-
 static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head 
*list)
 {
        int i;
@@ -1695,8 +1675,8 @@ static void alloc_huge_page_vmemmap(struct hstate *h, 
struct page *head)
        ptl = vmemmap_pmd_lockptr(pmd);
 
        spin_lock(ptl);
-       __remap_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head,
-                                     &remap_pages);
+       remap_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head, &remap_pages,
+                                   __remap_huge_page_pte_vmemmap);
        if (!freed_vmemmap_hpage_dec(pmd_page(*pmd))) {
                /*
                 * Todo:
-- 
2.20.1

Reply via email to