From: Roman Gushchin <g...@fb.com>

Replace blocking cma_release() with a non-blocking cma_release_nowait()
call, so there is no more need to temporarily drop hugetlb_lock.

Signed-off-by: Roman Gushchin <g...@fb.com>
Signed-off-by: Mike Kravetz <mike.krav...@oracle.com>
---
 mm/hugetlb.c | 11 +++--------
 1 file changed, 3 insertions(+), 8 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 408dbc08298a..f9ba63fc1747 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1258,10 +1258,11 @@ static void free_gigantic_page(struct page *page, 
unsigned int order)
 {
        /*
         * If the page isn't allocated using the cma allocator,
-        * cma_release() returns false.
+        * cma_release_nowait() returns false.
         */
 #ifdef CONFIG_CMA
-       if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
+       if (cma_release_nowait(hugetlb_cma[page_to_nid(page)], page,
+                              1 << order))
                return;
 #endif
 
@@ -1348,14 +1349,8 @@ static void update_and_free_page(struct hstate *h, 
struct page *page)
        set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
        set_page_refcounted(page);
        if (hstate_is_gigantic(h)) {
-               /*
-                * Temporarily drop the hugetlb_lock, because
-                * we might block in free_gigantic_page().
-                */
-               spin_unlock(&hugetlb_lock);
                destroy_compound_gigantic_page(page, huge_page_order(h));
                free_gigantic_page(page, huge_page_order(h));
-               spin_lock(&hugetlb_lock);
        } else {
                __free_pages(page, huge_page_order(h));
        }
-- 
2.30.2

Reply via email to