Demote page functionality will split a huge page into a number of huge
pages of a smaller size.  For example, on x86 a 1GB huge page can be
demoted into 512 2M huge pages.  Demotion is done 'in place' by simply
splitting the huge page.

Signed-off-by: Mike Kravetz <mike.krav...@oracle.com>
---
 mm/hugetlb.c | 61 +++++++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 58 insertions(+), 3 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0c29b7d0d469..33b0d8778551 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2744,17 +2744,72 @@ static int set_max_huge_pages(struct hstate *h, 
unsigned long count, int nid,
        return 0;
 }
 
+static int demote_free_huge_page(struct hstate *h, struct page *page)
+{
+       int i, nid = page_to_nid(page);
+       struct hstate *target_hstate;
+       bool cma_page = HPageCma(page);
+
+       target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
+
+       ClearHPageFreed(page);
+       list_del(&page->lru);
+       h->free_huge_pages--;
+       h->free_huge_pages_node[nid]--;
+       h->nr_huge_pages--;
+       h->nr_huge_pages_node[nid]--;
+       set_page_refcounted(page);
+       set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
+       spin_unlock(&hugetlb_lock);
+
+       /*
+        * Note for future:
+        * When support for reducing vmemmap of huge pages is added, we
+        * will need to allocate vmemmap pages here and could fail.
+        */
+       destroy_compound_gigantic_page(page, huge_page_order(h));
+
+       for (i = 0; i < pages_per_huge_page(h);
+                               i += pages_per_huge_page(target_hstate)) {
+               /*
+                * Explicitly use prep_compound_gigantic_page if demoting
+                * a page allocated from CMA so that TAIL_MAPPING is not
+                * set in tail pages.  The CMA code does not expect this.
+                */
+               if (cma_page)
+                       prep_compound_gigantic_page(page + i,
+                                                       target_hstate->order);
+               else
+                       prep_compound_huge_page(page + i, target_hstate->order);
+               prep_new_huge_page(target_hstate, page + i, nid);
+               if (cma_page)
+                       SetHPageCma(page + i);
+               put_page(page + i);
+       }
+
+       spin_lock(&hugetlb_lock);
+       return 0;
+}
+
 static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
 {
+       int nr_nodes, node;
+       struct page *page;
        int rc = 0;
 
        /* If no demote order, free to buddy */
        if (!h->demote_order)
                return free_pool_huge_page(h, nodes_allowed, 0);
 
-       /*
-        * TODO - demote fucntionality will be added in subsequent patch
-        */
+       for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
+               if (!list_empty(&h->hugepage_freelists[node])) {
+                       page = list_entry(h->hugepage_freelists[node].next,
+                                       struct page, lru);
+                       rc = !demote_free_huge_page(h, page);
+                       break;
+               }
+       }
+
        return rc;
 }
 
-- 
2.29.2

Reply via email to