If we have a lot of pages in queue to be split, deferred_split_scan()
can spend unreasonable amount of time under spinlock with disabled
interrupts.

Let's cap number of pages to split on scan by sc->nr_to_scan.

Signed-off-by: Kirill A. Shutemov <kirill.shute...@linux.intel.com>
Reported-by: Andrea Arcangeli <aarca...@redhat.com>
---
 mm/huge_memory.c | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 36f98459f854..298dbc001b07 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3478,17 +3478,19 @@ static unsigned long deferred_split_scan(struct 
shrinker *shrink,
        int split = 0;
 
        spin_lock_irqsave(&pgdata->split_queue_lock, flags);
-       list_splice_init(&pgdata->split_queue, &list);
-
        /* Take pin on all head pages to avoid freeing them under us */
        list_for_each_safe(pos, next, &list) {
                page = list_entry((void *)pos, struct page, mapping);
                page = compound_head(page);
-               /* race with put_compound_page() */
-               if (!get_page_unless_zero(page)) {
+               if (get_page_unless_zero(page)) {
+                       list_move(page_deferred_list(page), &list);
+               } else {
+                       /* We lost race with put_compound_page() */
                        list_del_init(page_deferred_list(page));
                        pgdata->split_queue_len--;
                }
+               if (!--sc->nr_to_scan)
+                       break;
        }
        spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
 
-- 
2.7.0.rc3

Reply via email to