From: Hugh Dickins <hu...@google.com>

Use the relock function to replace relocking action. And try to save few
lock times.

Signed-off-by: Hugh Dickins <hu...@google.com>
Signed-off-by: Alex Shi <alex....@linux.alibaba.com>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Tejun Heo <t...@kernel.org>
Cc: Andrey Ryabinin <aryabi...@virtuozzo.com>
Cc: Jann Horn <ja...@google.com>
Cc: Mel Gorman <mgor...@techsingularity.net>
Cc: Johannes Weiner <han...@cmpxchg.org>
Cc: Matthew Wilcox <wi...@infradead.org>
Cc: Hugh Dickins <hu...@google.com>
Cc: cgro...@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: linux...@kvack.org
---
 mm/vmscan.c | 17 ++++++-----------
 1 file changed, 6 insertions(+), 11 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index bdb53a678e7e..078a1640ec60 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1854,15 +1854,15 @@ static unsigned noinline_for_stack 
move_pages_to_lru(struct lruvec *lruvec,
        enum lru_list lru;
 
        while (!list_empty(list)) {
-               struct lruvec *new_lruvec = NULL;
-
                page = lru_to_page(list);
                VM_BUG_ON_PAGE(PageLRU(page), page);
                list_del(&page->lru);
                if (unlikely(!page_evictable(page))) {
-                       spin_unlock_irq(&lruvec->lru_lock);
+                       if (lruvec) {
+                               spin_unlock_irq(&lruvec->lru_lock);
+                               lruvec = NULL;
+                       }
                        putback_lru_page(page);
-                       spin_lock_irq(&lruvec->lru_lock);
                        continue;
                }
 
@@ -1876,12 +1876,7 @@ static unsigned noinline_for_stack 
move_pages_to_lru(struct lruvec *lruvec,
                 *                                        list_add(&page->lru,)
                 *     list_add(&page->lru,) //corrupt
                 */
-               new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
-               if (new_lruvec != lruvec) {
-                       if (lruvec)
-                               spin_unlock_irq(&lruvec->lru_lock);
-                       lruvec = lock_page_lruvec_irq(page);
-               }
+               lruvec = relock_page_lruvec_irq(page, lruvec);
                SetPageLRU(page);
 
                if (unlikely(put_page_testzero(page))) {
@@ -1890,8 +1885,8 @@ static unsigned noinline_for_stack 
move_pages_to_lru(struct lruvec *lruvec,
 
                        if (unlikely(PageCompound(page))) {
                                spin_unlock_irq(&lruvec->lru_lock);
+                               lruvec = NULL;
                                destroy_compound_page(page);
-                               spin_lock_irq(&lruvec->lru_lock);
                        } else
                                list_add(&page->lru, &pages_to_free);
 
-- 
1.8.3.1

Reply via email to