-int isolate_or_dissolve_huge_page(struct page *page)
+int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
  {
        struct hstate *h;
        struct page *head;
+       int ret = -EBUSY;
/*
         * The page might have been dissolved from under our feet, so make sure
@@ -2373,13 +2380,18 @@ int isolate_or_dissolve_huge_page(struct page *page)
/*
         * Fence off gigantic pages as there is a cyclic dependency between
-        * alloc_contig_range and them. Return -ENOME as this has the effect
+        * alloc_contig_range and them. Return -ENOMEM as this has the effect

Nit: belongs into previous patch.

         * of bailing out right away without further retrying.
         */
        if (hstate_is_gigantic(h))
                return -ENOMEM;
- return alloc_and_dissolve_huge_page(h, head);
+       if (page_count(head) && isolate_huge_page(head, list))
+               ret = 0;
+       else if (!page_count(head))
+               ret = alloc_and_dissolve_huge_page(h, head, list);
+
+       return ret;
  }
struct page *alloc_huge_page(struct vm_area_struct *vma,
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bb8321026c0c..5199b9696bab 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1703,8 +1703,9 @@ unsigned int reclaim_clean_pages_from_list(struct zone 
*zone,
        LIST_HEAD(clean_pages);
list_for_each_entry_safe(page, next, page_list, lru) {
-               if (page_is_file_lru(page) && !PageDirty(page) &&
-                   !__PageMovable(page) && !PageUnevictable(page)) {
+               if (!PageHuge(page) && page_is_file_lru(page) &&
+                   !PageDirty(page) && !__PageMovable(page) &&
+                   !PageUnevictable(page)) {

Nit: adding to the end of the list would require less modifications ;)

                        ClearPageActive(page);
                        list_move(&page->lru, &clean_pages);
                }


Acked-by: David Hildenbrand <da...@redhat.com>

--
Thanks,

David / dhildenb

Reply via email to