From: Joonsoo Kim <iamjoonsoo....@lge.com>

This patch implements workingset detection for anonymous LRU.
All the infrastructure is implemented by the previous patches so this
patch just activates the workingset detection by installing/retrieving
the shadow entry and adding refault calculation.

Acked-by: Johannes Weiner <han...@cmpxchg.org>
Acked-by: Vlastimil Babka <vba...@suse.cz>
Signed-off-by: Joonsoo Kim <iamjoonsoo....@lge.com>
---
 include/linux/swap.h |  6 ++++++
 mm/memory.c          | 11 ++++-------
 mm/swap_state.c      | 23 ++++++++++++++++++-----
 mm/vmscan.c          |  7 ++++---
 mm/workingset.c      | 15 +++++++++++----
 5 files changed, 43 insertions(+), 19 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 8a4c592..6610469 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -414,6 +414,7 @@ extern struct address_space *swapper_spaces[];
 extern unsigned long total_swapcache_pages(void);
 extern void show_swap_cache_info(void);
 extern int add_to_swap(struct page *page);
+extern void *get_shadow_from_swap_cache(swp_entry_t entry);
 extern int add_to_swap_cache(struct page *page, swp_entry_t entry,
                        gfp_t gfp, void **shadowp);
 extern void __delete_from_swap_cache(struct page *page,
@@ -573,6 +574,11 @@ static inline int add_to_swap(struct page *page)
        return 0;
 }
 
+static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
+{
+       return NULL;
+}
+
 static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
                                        gfp_t gfp_mask, void **shadowp)
 {
diff --git a/mm/memory.c b/mm/memory.c
index 25769b6..4934dbc 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3100,6 +3100,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
        int locked;
        int exclusive = 0;
        vm_fault_t ret = 0;
+       void *shadow = NULL;
 
        if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
                goto out;
@@ -3151,13 +3152,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                                        goto out_page;
                                }
 
-                               /*
-                                * XXX: Move to lru_cache_add() when it
-                                * supports new vs putback
-                                */
-                               spin_lock_irq(&page_pgdat(page)->lru_lock);
-                               lru_note_cost_page(page);
-                               spin_unlock_irq(&page_pgdat(page)->lru_lock);
+                               shadow = get_shadow_from_swap_cache(entry);
+                               if (shadow)
+                                       workingset_refault(page, shadow);
 
                                lru_cache_add(page);
                                swap_readpage(page, true);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 13d8d66..146a86d 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -106,6 +106,20 @@ void show_swap_cache_info(void)
        printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
 }
 
+void *get_shadow_from_swap_cache(swp_entry_t entry)
+{
+       struct address_space *address_space = swap_address_space(entry);
+       pgoff_t idx = swp_offset(entry);
+       struct page *page;
+
+       page = find_get_entry(address_space, idx);
+       if (xa_is_value(page))
+               return page;
+       if (page)
+               put_page(page);
+       return NULL;
+}
+
 /*
  * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
  * but sets SwapCache flag and private instead of mapping and index.
@@ -406,6 +420,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, 
gfp_t gfp_mask,
 {
        struct swap_info_struct *si;
        struct page *page;
+       void *shadow = NULL;
 
        *new_page_allocated = false;
 
@@ -474,7 +489,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, 
gfp_t gfp_mask,
        __SetPageSwapBacked(page);
 
        /* May fail (-ENOMEM) if XArray node allocation failed. */
-       if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, NULL)) {
+       if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, 
&shadow)) {
                put_swap_page(page, entry);
                goto fail_unlock;
        }
@@ -484,10 +499,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, 
gfp_t gfp_mask,
                goto fail_unlock;
        }
 
-       /* XXX: Move to lru_cache_add() when it supports new vs putback */
-       spin_lock_irq(&page_pgdat(page)->lru_lock);
-       lru_note_cost_page(page);
-       spin_unlock_irq(&page_pgdat(page)->lru_lock);
+       if (shadow)
+               workingset_refault(page, shadow);
 
        /* Caller will initiate read into locked page */
        SetPageWorkingset(page);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b9b543e..9d4e28c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -854,6 +854,7 @@ static int __remove_mapping(struct address_space *mapping, 
struct page *page,
 {
        unsigned long flags;
        int refcount;
+       void *shadow = NULL;
 
        BUG_ON(!PageLocked(page));
        BUG_ON(mapping != page_mapping(page));
@@ -896,13 +897,13 @@ static int __remove_mapping(struct address_space 
*mapping, struct page *page,
        if (PageSwapCache(page)) {
                swp_entry_t swap = { .val = page_private(page) };
                mem_cgroup_swapout(page, swap);
-               __delete_from_swap_cache(page, swap, NULL);
+               if (reclaimed && !mapping_exiting(mapping))
+                       shadow = workingset_eviction(page, target_memcg);
+               __delete_from_swap_cache(page, swap, shadow);
                xa_unlock_irqrestore(&mapping->i_pages, flags);
                put_swap_page(page, swap);
-               workingset_eviction(page, target_memcg);
        } else {
                void (*freepage)(struct page *);
-               void *shadow = NULL;
 
                freepage = mapping->a_ops->freepage;
                /*
diff --git a/mm/workingset.c b/mm/workingset.c
index 2d77e4d..92e6611 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -353,15 +353,22 @@ void workingset_refault(struct page *page, void *shadow)
        /*
         * Compare the distance to the existing workingset size. We
         * don't activate pages that couldn't stay resident even if
-        * all the memory was available to the page cache. Whether
-        * cache can compete with anon or not depends on having swap.
+        * all the memory was available to the workingset. Whether
+        * workingset competition needs to consider anon or not depends
+        * on having swap.
         */
        workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE);
-       if (mem_cgroup_get_nr_swap_pages(memcg) > 0) {
+       if (!file) {
                workingset_size += lruvec_page_state(eviction_lruvec,
-                                                    NR_INACTIVE_ANON);
+                                                    NR_INACTIVE_FILE);
+       }
+       if (mem_cgroup_get_nr_swap_pages(memcg) > 0) {
                workingset_size += lruvec_page_state(eviction_lruvec,
                                                     NR_ACTIVE_ANON);
+               if (file) {
+                       workingset_size += lruvec_page_state(eviction_lruvec,
+                                                    NR_INACTIVE_ANON);
+               }
        }
        if (refault_distance > workingset_size)
                goto out;
-- 
2.7.4

Reply via email to