[Devel] [PATCH rh8] mm/swap: activate swapped in pages on fault

2020-10-19 Thread Konstantin Khorenko
From: Andrey Ryabinin 

Move swapped in anon pages directly to active list. This should
help us to prevent anon thrashing. Recently swapped in pages
has more chances to stay in memory.

https://pmc.acronis.com/browse/VSTOR-20859
Signed-off-by: Andrey Ryabinin 
[VvS RHEL7.8 rebase] context changes

(cherry picked from vz7 commit 134cd9b20a914080539e6310f76fe3f7b32bc710)
Signed-off-by: Konstantin Khorenko 
---
 include/linux/swap.h |  4 ++--
 mm/madvise.c |  4 ++--
 mm/swap_state.c  | 21 +
 mm/swapfile.c|  3 ++-
 mm/zswap.c   |  2 +-
 5 files changed, 20 insertions(+), 14 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index ee2145ab2ca1..c87b4f3b7acc 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -424,10 +424,10 @@ extern struct page *lookup_swap_cache(swp_entry_t entry,
  unsigned long addr);
 extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr,
-   bool do_poll);
+   bool do_poll, bool activate);
 extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr,
-   bool *new_page_allocated);
+   bool *new_page_allocated, bool activate);
 extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
struct vm_fault *vmf);
 extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
diff --git a/mm/madvise.c b/mm/madvise.c
index 4f76df2dbfb5..953d4238b31c 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -217,7 +217,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long 
start,
continue;
 
page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
-   vma, index, false);
+vma, index, false, false);
if (page)
put_page(page);
}
@@ -258,7 +258,7 @@ static void force_shm_swapin_readahead(struct 
vm_area_struct *vma,
}
swap = radix_to_swp_entry(page);
page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
-   NULL, 0, false);
+NULL, 0, false, false);
if (page)
put_page(page);
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index c64f5f088d01..e64dcca405f7 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -377,7 +377,7 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct 
vm_area_struct *vma,
 
 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr,
-   bool *new_page_allocated)
+   bool *new_page_allocated, bool activate)
 {
struct page *found_page = NULL, *new_page = NULL;
struct swap_info_struct *si;
@@ -455,7 +455,9 @@ struct page *__read_swap_cache_async(swp_entry_t entry, 
gfp_t gfp_mask,
 * Initiate read into locked page and return.
 */
SetPageWorkingset(new_page);
-   lru_cache_add_anon(new_page);
+   if (activate)
+   SetPageActive(new_page);
+   lru_cache_add(new_page);
*new_page_allocated = true;
return new_page;
}
@@ -480,11 +482,12 @@ struct page *__read_swap_cache_async(swp_entry_t entry, 
gfp_t gfp_mask,
  * the swap entry is no longer in use.
  */
 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
-   struct vm_area_struct *vma, unsigned long addr, bool do_poll)
+   struct vm_area_struct *vma, unsigned long addr, bool do_poll,
+   bool activate)
 {
bool page_was_allocated;
struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
-   vma, addr, &page_was_allocated);
+   vma, addr, &page_was_allocated, activate);
 
if (page_was_allocated)
swap_readpage(retpage, do_poll);
@@ -602,7 +605,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, 
gfp_t gfp_mask,
/* Ok, do the async read-ahead now */
page = __read_swap_cache_async(
swp_entry(swp_type(entry), offset),
-   gfp_mask, vma, addr, &page_allocated);
+   gfp_mask, vma, addr, &page_allocated, offset == 
entry_offset);
if (!page)
continue;
if (page_allocated) {
@@ -618,7 +621,8 @@ struct page *swap_cluste

Re: [Devel] [PATCH rh8] mm/swap: activate swapped in pages on fault

2020-10-22 Thread Andrey Ryabinin



On 10/19/20 7:32 PM, Konstantin Khorenko wrote:
> From: Andrey Ryabinin 
> 
> Move swapped in anon pages directly to active list. This should
> help us to prevent anon thrashing. Recently swapped in pages
> has more chances to stay in memory.
> 
> https://pmc.acronis.com/browse/VSTOR-20859
> Signed-off-by: Andrey Ryabinin 
> [VvS RHEL7.8 rebase] context changes
> 
> (cherry picked from vz7 commit 134cd9b20a914080539e6310f76fe3f7b32bc710)
> Signed-off-by: Konstantin Khorenko 

Reviewed-by: Andrey Ryabinin 
___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel