We should flush work when dissolving a hugetlb page to make sure that
the hugetlb page is freed to the buddy.

Signed-off-by: Muchun Song <songmuc...@bytedance.com>
---
 mm/hugetlb.c | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7198bd9bdce5..509de0732d9f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1807,6 +1807,11 @@ static inline void free_gigantic_page_comm(struct hstate 
*h, struct page *page)
        free_gigantic_page(page, huge_page_order(h));
 }
 
+static inline void flush_free_huge_page_work(void)
+{
+       flush_work(&hpage_update_work);
+}
+
 static inline bool subpage_hwpoison(struct page *head, struct page *page)
 {
        return page_private(head + 4) == page - head;
@@ -1869,6 +1874,10 @@ static inline void free_gigantic_page_comm(struct hstate 
*h, struct page *page)
        spin_lock(&hugetlb_lock);
 }
 
+static inline void flush_free_huge_page_work(void)
+{
+}
+
 static inline bool subpage_hwpoison(struct page *head, struct page *page)
 {
        return true;
@@ -2443,6 +2452,7 @@ static int free_pool_huge_page(struct hstate *h, 
nodemask_t *nodes_allowed,
 int dissolve_free_huge_page(struct page *page)
 {
        int rc = -EBUSY;
+       bool need_flush = false;
 
        /* Not to disrupt normal path by vainly holding hugetlb_lock */
        if (!PageHuge(page))
@@ -2474,10 +2484,19 @@ int dissolve_free_huge_page(struct page *page)
                h->free_huge_pages_node[nid]--;
                h->max_huge_pages--;
                update_and_free_page(h, head);
+               need_flush = true;
                rc = 0;
        }
 out:
        spin_unlock(&hugetlb_lock);
+
+       /*
+        * We should flush work before return to make sure that
+        * the hugetlb page is freed to the buddy.
+        */
+       if (need_flush)
+               flush_free_huge_page_work();
+
        return rc;
 }
 
-- 
2.20.1

Reply via email to