lru_cache_add_active_or_unevictable() already adds new ksm pages to
active lru. Calling activate_page() isn't really necessary in this
case.

Signed-off-by: Yu Zhao <yuz...@google.com>
---
 mm/swapfile.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/mm/swapfile.c b/mm/swapfile.c
index 6c26916e95fd..cf115ea26a20 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1913,16 +1913,16 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t 
*pmd,
                   pte_mkold(mk_pte(page, vma->vm_page_prot)));
        if (page == swapcache) {
                page_add_anon_rmap(page, vma, addr, false);
+               /*
+                * Move the page to the active list so it is not
+                * immediately swapped out again after swapon.
+                */
+               activate_page(page);
        } else { /* ksm created a completely new copy */
                page_add_new_anon_rmap(page, vma, addr, false);
                lru_cache_add_active_or_unevictable(page, vma);
        }
        swap_free(entry);
-       /*
-        * Move the page to the active list so it is not
-        * immediately swapped out again after swapon.
-        */
-       activate_page(page);
 out:
        pte_unmap_unlock(pte, ptl);
        if (page != swapcache) {
-- 
2.28.0.236.gb10cc79966-goog

Reply via email to