Currently, prep_new_huge_page() performs two functions.
It sets the right state for a new hugetlb, and increases the hstate's
counters to account for the new page.

Let us split its functionality into two separate functions, decoupling
the handling of the counters from initializing a hugepage.
The outcome is having __prep_new_huge_page(), which only
initializes the page , and __prep_account_new_huge_page(), which adds
the new page to the hstate's counters.

This allows us to be able to set a hugetlb without having to worry
about the counter/locking. It will prove useful in the next patch.
prep_new_huge_page() still calls both functions.

Signed-off-by: Oscar Salvador <osalva...@suse.de>
---
 mm/hugetlb.c | 19 ++++++++++++++++---
 1 file changed, 16 insertions(+), 3 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e40d5fe5c63c..0607b2b71ac6 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1483,7 +1483,16 @@ void free_huge_page(struct page *page)
        }
 }
 
-static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
+/*
+ * Must be called with the hugetlb lock held
+ */
+static void __prep_account_new_huge_page(struct hstate *h, int nid)
+{
+       h->nr_huge_pages++;
+       h->nr_huge_pages_node[nid]++;
+}
+
+static void __prep_new_huge_page(struct page *page)
 {
        INIT_LIST_HEAD(&page->lru);
        set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
@@ -1491,9 +1500,13 @@ static void prep_new_huge_page(struct hstate *h, struct 
page *page, int nid)
        set_hugetlb_cgroup(page, NULL);
        set_hugetlb_cgroup_rsvd(page, NULL);
        ClearHPageFreed(page);
+}
+
+static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
+{
+       __prep_new_huge_page(page);
        spin_lock_irq(&hugetlb_lock);
-       h->nr_huge_pages++;
-       h->nr_huge_pages_node[nid]++;
+       __prep_account_new_huge_page(h, nid);
        spin_unlock_irq(&hugetlb_lock);
 }
 
-- 
2.16.3

Reply via email to