LKP reported a preemptiable issue on this patch. update and refresh
the commit log.

>From f18e8c87a045bbb8040006b6816ded1f55fa6f9c Mon Sep 17 00:00:00 2001
From: Alex Shi <alex....@linux.alibaba.com>
Date: Sat, 25 Jul 2020 22:31:03 +0800
Subject: [PATCH] mm/mlock: remove lru_lock on TestClearPageMlocked in
 munlock_vma_page

In the func munlock_vma_page, comments mentained lru_lock needed for
serialization with split_huge_pages. But the page must be PageLocked
as well as pages in split_huge_page series funcs. Thus the PageLocked
is enough to serialize both funcs.

So we could relief the TestClearPageMlocked/hpage_nr_pages which are not
necessary under lru lock.

As to another munlock func __munlock_pagevec, which no PageLocked
protection and should remain lru protecting.

LKP found a preempt issue on __mod_zone_page_state which need change
to mod_zone_page_state. Thanks!

Reported-by: kernel test robot <rong.a.c...@intel.com>
Signed-off-by: Alex Shi <alex....@linux.alibaba.com>
Cc: Kirill A. Shutemov <kirill.shute...@linux.intel.com>
Cc: Vlastimil Babka <vba...@suse.cz>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: linux...@kvack.org
Cc: linux-kernel@vger.kernel.org
---
 mm/mlock.c | 43 ++++++++++++++++---------------------------
 1 file changed, 16 insertions(+), 27 deletions(-)

diff --git a/mm/mlock.c b/mm/mlock.c
index 0448409184e3..cd88b93b0f0d 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -69,9 +69,9 @@ void clear_page_mlock(struct page *page)
         *
         * See __pagevec_lru_add_fn for more explanation.
         */
-       if (!isolate_lru_page(page)) {
+       if (!isolate_lru_page(page))
                putback_lru_page(page);
-       } else {
+       else {
                /*
                 * We lost the race. the page already moved to evictable list.
                 */
@@ -178,7 +178,6 @@ static void __munlock_isolation_failed(struct page *page)
 unsigned int munlock_vma_page(struct page *page)
 {
        int nr_pages;
-       struct lruvec *lruvec;
 
        /* For try_to_munlock() and to serialize with page migration */
        BUG_ON(!PageLocked(page));
@@ -186,37 +185,22 @@ unsigned int munlock_vma_page(struct page *page)
        VM_BUG_ON_PAGE(PageTail(page), page);
 
        /*
-        * Serialize split tail pages in __split_huge_page_tail() which
-        * might otherwise copy PageMlocked to part of the tail pages before
-        * we clear it in the head page. It also stabilizes thp_nr_pages().
-        * TestClearPageLRU can't be used here to block page isolation, since
-        * out of lock clear_page_mlock may interfer PageLRU/PageMlocked
-        * sequence, same as __pagevec_lru_add_fn, and lead the page place to
-        * wrong lru list here. So relay on PageLocked to stop lruvec change
-        * in mem_cgroup_move_account().
+        * Serialize split tail pages in __split_huge_page_tail() by
+        * lock_page(); Do TestClearPageMlocked/PageLRU sequence like
+        * clear_page_mlock().
         */
-       lruvec = lock_page_lruvec_irq(page);
-
-       if (!TestClearPageMlocked(page)) {
+       if (!TestClearPageMlocked(page))
                /* Potentially, PTE-mapped THP: do not skip the rest PTEs */
-               nr_pages = 1;
-               goto unlock_out;
-       }
+               return 0;
 
        nr_pages = thp_nr_pages(page);
-       __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
+       mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
 
-       if (__munlock_isolate_lru_page(page, lruvec, true)) {
-               unlock_page_lruvec_irq(lruvec);
+       if (!isolate_lru_page(page))
                __munlock_isolated_page(page);
-               goto out;
-       }
-       __munlock_isolation_failed(page);
-
-unlock_out:
-       unlock_page_lruvec_irq(lruvec);
+       else
+               __munlock_isolation_failed(page);
 
-out:
        return nr_pages - 1;
 }
 
@@ -305,6 +289,11 @@ static void __munlock_pagevec(struct pagevec *pvec, struct 
zone *zone)
 
                /* block memcg change in mem_cgroup_move_account */
                lock_page_memcg(page);
+               /*
+                * Serialize split tail pages in __split_huge_page_tail() which
+                * might otherwise copy PageMlocked to part of the tail pages
+                * before we clear it in the head page.
+                */
                lruvec = relock_page_lruvec_irq(page, lruvec);
                if (TestClearPageMlocked(page)) {
                        /*
-- 
2.17.1

Reply via email to