From: Johannes Weiner <han...@cmpxchg.org>

Cache thrash detection (see a528910e12ec "mm: thrash detection-based
file cache sizing" for details) currently only works on the system
level, not inside cgroups.  Worse, as the refaults are compared to the
global number of active cache, cgroups might wrongfully get all their
refaults activated when their pages are hotter than those of others.

Move the refault machinery from the zone to the lruvec, and then tag
eviction entries with the memcg ID.  This makes the thrash detection
work correctly inside cgroups.

[sergey.senozhat...@gmail.com: do not return from workingset_activation() with 
locked rcu and page]
Signed-off-by: Johannes Weiner <han...@cmpxchg.org>
Signed-off-by: Sergey Senozhatsky <sergey.senozhat...@gmail.com>
Reviewed-by: Vladimir Davydov <vdavy...@virtuozzo.com>
Cc: Michal Hocko <mho...@suse.cz>
Cc: David Rientjes <rient...@google.com>
Signed-off-by: Andrew Morton <a...@linux-foundation.org>
Signed-off-by: Linus Torvalds <torva...@linux-foundation.org>

https://pmc.acronis.com/browse/VSTOR-19037
(cherry picked from commit 23047a96d7cfcfca1a6d026ecaec526ea4803e9e)
Signed-off-by: Andrey Ryabinin <aryabi...@virtuozzo.com>
---
 include/linux/memcontrol.h | 53 +++++++++++++++++++---
 include/linux/mmzone.h     | 14 +++---
 mm/memcontrol.c            | 32 +++++++++----
 mm/vmscan.c                | 71 ++++++++++++++++++-----------
 mm/workingset.c            | 93 +++++++++++++++++++++++++++++++++-----
 5 files changed, 204 insertions(+), 59 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 640a5802e398..3e309ff5c3a7 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -65,6 +65,10 @@ struct memcg_shrinker_map {
 #define MEM_CGROUP_RECLAIM_KMEM                (1 << 
MEM_CGROUP_RECLAIM_KMEM_BIT)
 
 #ifdef CONFIG_MEMCG
+
+#define MEM_CGROUP_ID_SHIFT    16
+#define MEM_CGROUP_ID_MAX      USHRT_MAX
+
 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
                          gfp_t gfp_mask, struct mem_cgroup **memcgp);
 int mem_cgroup_try_charge_cache(struct page *page, struct mm_struct *mm,
@@ -93,6 +97,9 @@ extern struct mem_cgroup *get_mem_cgroup_from_mm(struct 
mm_struct *mm);
 extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
 extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
 
+unsigned short mem_cgroup_id(struct mem_cgroup *memcg);
+struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
+
 static inline
 bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup 
*memcg)
 {
@@ -171,20 +178,21 @@ static inline void mem_cgroup_put(struct mem_cgroup 
*memcg)
        css_put(mem_cgroup_css(memcg));
 }
 
-void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
+struct mem_cgroup *__mem_cgroup_begin_update_page_stat(struct page *page, bool 
*locked,
                                         unsigned long *flags);
 
 extern atomic_t memcg_moving;
 
-static inline void mem_cgroup_begin_update_page_stat(struct page *page,
+static inline struct mem_cgroup *mem_cgroup_begin_update_page_stat(struct page 
*page,
                                        bool *locked, unsigned long *flags)
 {
        if (mem_cgroup_disabled())
-               return;
+               return NULL;
        rcu_read_lock();
        *locked = false;
        if (atomic_read(&memcg_moving))
-               __mem_cgroup_begin_update_page_stat(page, locked, flags);
+               return __mem_cgroup_begin_update_page_stat(page, locked, flags);
+       return NULL;
 }
 
 void __mem_cgroup_end_update_page_stat(struct page *page,
@@ -217,6 +225,9 @@ static inline void mem_cgroup_dec_page_stat(struct page 
*page,
 
 void mem_cgroup_fill_vmstat(struct mem_cgroup *memcg, unsigned long *stats);
 
+unsigned long memcg_ws_activates(struct mem_cgroup *memcg);
+void memcg_inc_ws_activate(struct mem_cgroup *memcg);
+
 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
                                                gfp_t gfp_mask,
                                                unsigned long *total_scanned);
@@ -238,8 +249,29 @@ bool mem_cgroup_bad_page_check(struct page *page);
 void mem_cgroup_print_bad_page(struct page *page);
 #endif
 #else /* CONFIG_MEMCG */
+
+#define MEM_CGROUP_ID_SHIFT    0
+#define MEM_CGROUP_ID_MAX      0
+
 struct mem_cgroup;
 
+static inline bool mem_cgroup_disabled(void)
+{
+       return true;
+}
+
+static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
+{
+       return 0;
+}
+
+static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
+{
+       WARN_ON_ONCE(id);
+       /* XXX: This should always return root_mem_cgroup */
+       return NULL;
+}
+
 static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct 
*mm,
                                        gfp_t gfp_mask,
                                        struct mem_cgroup **memcgp)
@@ -339,9 +371,11 @@ static inline void mem_cgroup_iter_break(struct mem_cgroup 
*root,
 {
 }
 
-static inline bool mem_cgroup_disabled(void)
+static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
 {
-       return true;
+       WARN_ON_ONCE(id);
+       /* XXX: This should always return root_mem_cgroup */
+       return NULL;
 }
 
 static inline void mem_cgroup_get(struct mem_cgroup *memcg)
@@ -445,6 +479,13 @@ static inline void mem_cgroup_fill_vmstat(struct 
mem_cgroup *memcg,
 {
 }
 
+static inline unsigned long memcg_ws_activates(struct mem_cgroup *memcg)
+{
+       return 0;
+}
+
+static inline void memcg_inc_ws_activate(struct mem_cgroup *memcg) { }
+
 static inline
 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
                                            gfp_t gfp_mask,
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 4c613195158d..70e925d41445 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -211,10 +211,13 @@ struct zone_reclaim_stat {
 };
 
 struct lruvec {
-       struct list_head lists[NR_LRU_LISTS];
-       struct zone_reclaim_stat reclaim_stat;
+       struct list_head                lists[NR_LRU_LISTS];
+       struct zone_reclaim_stat        reclaim_stat;
+       /* Evictions & activations on the inactive file list */
+       atomic_long_t                   inactive_age;
+       unsigned long                   refaults;
 #ifdef CONFIG_MEMCG
-       struct zone *zone;
+       struct zone                     *zone;
 #endif
 };
 
@@ -418,9 +421,6 @@ struct zone {
        spinlock_t              lru_lock;
        struct lruvec           lruvec;
 
-       /* Evictions & activations on the inactive file list */
-       atomic_long_t           inactive_age;
-
        unsigned long           pages_scanned;     /* since last reclaim */
        unsigned long           flags;             /* zone flags, see below */
 
@@ -911,6 +911,8 @@ static inline struct zone *lruvec_zone(struct lruvec 
*lruvec)
 #endif
 }
 
+extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru);
+
 #ifdef CONFIG_HAVE_MEMORY_PRESENT
 void memory_present(int nid, unsigned long start, unsigned long end);
 #else
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 98cf7b56eaaa..2985022c4f2a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -110,6 +110,7 @@ enum mem_cgroup_stat2_index {
        MEM_CGROUP_STAT_CACHE,          /* # of pages charged as cache */
        MEM_CGROUP_STAT_RSS,            /* # of pages charged as anon rss */
        MEM_CGROUP_STAT_SLAB_RECLAIMABLE, /* # of reclaimable slab pages */
+       MEM_CGROUP_STAT_WS_ACTIVATE,
        MEM_CGROUP_STAT2_NSTATS,
 };
 
@@ -125,6 +126,7 @@ static const char * const mem_cgroup_stat2_names[] = {
        "cache",
        "rss",
        "slab_reclaimable",
+       "workingset_activate",
 };
 
 enum mem_cgroup_events_index {
@@ -170,10 +172,7 @@ enum mem_cgroup_events_target {
 #define SOFTLIMIT_EVENTS_TARGET 1024
 #define NUMAINFO_EVENTS_TARGET 1024
 
-#define MEM_CGROUP_ID_MAX      USHRT_MAX
-
 static void mem_cgroup_id_put(struct mem_cgroup *memcg);
-static unsigned short mem_cgroup_id(struct mem_cgroup *memcg);
 
 struct mem_cgroup_stat_cpu {
        long count[MEM_CGROUP_STAT_NSTATS];
@@ -1137,6 +1136,16 @@ mem_cgroup_read_stat2(struct mem_cgroup *memcg, enum 
mem_cgroup_stat2_index idx)
        return percpu_counter_sum_positive(&memcg->stat2.counters[idx]);
 }
 
+unsigned long memcg_ws_activates(struct mem_cgroup *memcg)
+{
+       return 
percpu_counter_read_positive(&memcg->stat2.counters[MEM_CGROUP_STAT_WS_ACTIVATE]);
+}
+
+void memcg_inc_ws_activate(struct mem_cgroup *memcg)
+{
+       percpu_counter_inc(&memcg->stat2.counters[MEM_CGROUP_STAT_WS_ACTIVATE]);
+}
+
 static void mem_cgroup_update_swap_max(struct mem_cgroup *memcg)
 {
        long long swap;
@@ -2244,7 +2253,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 
struct task_struct *p)
                        pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
                                K(mem_cgroup_read_stat(iter, i)));
                }
-               for (i = 0; i < MEM_CGROUP_STAT2_NSTATS; i++) {
+               for (i = 0; i < MEM_CGROUP_STAT_WS_ACTIVATE; i++) {
                        pr_cont(" %s:%luKB", mem_cgroup_stat2_names[i],
                                K(mem_cgroup_read_stat2(iter, i)));
                }
@@ -2694,7 +2703,7 @@ bool mem_cgroup_oom_synchronize(bool handle)
  * account and taking the move_lock in the slowpath.
  */
 
-void __mem_cgroup_begin_update_page_stat(struct page *page,
+struct mem_cgroup *__mem_cgroup_begin_update_page_stat(struct page *page,
                                bool *locked, unsigned long *flags)
 {
        struct mem_cgroup *memcg;
@@ -2704,7 +2713,7 @@ void __mem_cgroup_begin_update_page_stat(struct page 
*page,
 again:
        memcg = pc->mem_cgroup;
        if (unlikely(!memcg || !PageCgroupUsed(pc)))
-               return;
+               return NULL;
        /*
         * If this memory cgroup is not under account moving, we don't
         * need to take move_lock_mem_cgroup(). Because we already hold
@@ -2712,7 +2721,7 @@ void __mem_cgroup_begin_update_page_stat(struct page 
*page,
         * rcu_read_unlock() if mem_cgroup_stolen() == true.
         */
        if (!mem_cgroup_stolen(memcg))
-               return;
+               return NULL;
 
        move_lock_mem_cgroup(memcg, flags);
        if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
@@ -2720,6 +2729,7 @@ void __mem_cgroup_begin_update_page_stat(struct page 
*page,
                goto again;
        }
        *locked = true;
+       return memcg;
 }
 
 void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
@@ -5422,10 +5432,12 @@ static int memcg_stat_show(struct cgroup *cont, struct 
cftype *cft,
                seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
                           mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
        }
-       for (i = 0; i < MEM_CGROUP_STAT2_NSTATS; i++) {
+       for (i = 0; i < MEM_CGROUP_STAT_WS_ACTIVATE; i++) {
                seq_printf(m, "%s %lu\n", mem_cgroup_stat2_names[i],
                           mem_cgroup_read_stat2(memcg, i) * PAGE_SIZE);
        }
+       seq_printf(m, "%s %lu\n", mem_cgroup_stat2_names[i],
+               mem_cgroup_read_stat2(memcg, i));
 
        for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
                seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
@@ -6191,8 +6203,10 @@ static struct cftype memsw_cgroup_files[] = {
 
 static DEFINE_IDR(mem_cgroup_idr);
 
-static unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
+unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
 {
+       if (mem_cgroup_disabled())
+               return 0;
        return memcg->id;
 }
 
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 27aff034ce40..87384a4fb436 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -268,7 +268,7 @@ bool zone_reclaimable(struct zone *zone)
        return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
 }
 
-static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
+unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru)
 {
        if (!mem_cgroup_disabled())
                return mem_cgroup_get_lru_size(lruvec, lru);
@@ -2018,15 +2018,15 @@ static int inactive_list_is_low(struct lruvec *lruvec, 
bool file,
        if (!file && !total_swap_pages)
                return false;
 
-       inactive = get_lru_size(lruvec, file * LRU_FILE);
-       active = get_lru_size(lruvec, file * LRU_FILE + LRU_ACTIVE);
+       inactive = lruvec_lru_size(lruvec, LRU_INACTIVE_FILE);
+       active = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE);
 
        if (memcg)
-               refaults = zone->refaults; /* we don't support per-cgroup 
workingset */
+               refaults = memcg_ws_activates(memcg);
         else
                refaults = zone_page_state(zone, WORKINGSET_ACTIVATE);
 
-       if (file && actual_reclaim && zone->refaults != refaults) {
+       if (file && actual_reclaim && lruvec->refaults != refaults) {
                inactive_ratio = 0;
        } else {
                gb = (inactive + active) >> (30 - PAGE_SHIFT);
@@ -2039,12 +2039,12 @@ static int inactive_list_is_low(struct lruvec *lruvec, 
bool file,
 }
 
 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
-                                struct lruvec *lruvec, struct scan_control *sc)
+                                struct lruvec *lruvec, struct mem_cgroup 
*memcg,
+                                struct scan_control *sc)
 {
        if (is_active_lru(lru)) {
                if (sc->may_thrash &&
-                   inactive_list_is_low(lruvec, is_file_lru(lru),
-                                        sc->target_mem_cgroup, true))
+                   inactive_list_is_low(lruvec, is_file_lru(lru), memcg, true))
                        shrink_active_list(nr_to_scan, lruvec, sc, lru);
                return 0;
        }
@@ -2075,12 +2075,12 @@ static void zone_update_force_scan(struct zone *zone)
                struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
                unsigned long size;
 
-               size = max(get_lru_size(lruvec, LRU_ACTIVE_FILE),
-                          get_lru_size(lruvec, LRU_INACTIVE_FILE));
+               size = max(lruvec_lru_size(lruvec, LRU_ACTIVE_FILE),
+                          lruvec_lru_size(lruvec, LRU_INACTIVE_FILE));
                if (get_nr_swap_pages() > 0)
                        size = max3(size,
-                                   get_lru_size(lruvec, LRU_ACTIVE_ANON),
-                                   get_lru_size(lruvec, LRU_INACTIVE_ANON));
+                                   lruvec_lru_size(lruvec, LRU_ACTIVE_ANON),
+                                   lruvec_lru_size(lruvec, LRU_INACTIVE_ANON));
 
                if (size && size >> DEF_PRIORITY == 0)
                        tiny++;
@@ -2178,10 +2178,10 @@ static void get_scan_count(struct lruvec *lruvec, 
struct mem_cgroup *memcg,
                goto out;
        }
 
-       anon  = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
-               get_lru_size(lruvec, LRU_INACTIVE_ANON);
-       file  = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
-               get_lru_size(lruvec, LRU_INACTIVE_FILE);
+       anon  = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON) +
+               lruvec_lru_size(lruvec, LRU_INACTIVE_ANON);
+       file  = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE) +
+               lruvec_lru_size(lruvec, LRU_INACTIVE_FILE);
 
        /*
         * Prevent the reclaimer from falling into the cache trap: as
@@ -2208,8 +2208,8 @@ static void get_scan_count(struct lruvec *lruvec, struct 
mem_cgroup *memcg,
         * There is enough inactive page cache, do not reclaim
         * anything from the anonymous working set right now.
         */
-       if (!inactive_list_is_low(lruvec, true, sc->target_mem_cgroup, false) &&
-           get_lru_size(lruvec, LRU_INACTIVE_FILE) >> sc->priority > 0) {
+       if (!inactive_list_is_low(lruvec, true, memcg, false) &&
+           lruvec_lru_size(lruvec, LRU_INACTIVE_FILE) >> sc->priority) {
                scan_balance = SCAN_FILE;
                goto out;
        }
@@ -2267,7 +2267,7 @@ static void get_scan_count(struct lruvec *lruvec, struct 
mem_cgroup *memcg,
                unsigned long size;
                unsigned long scan;
 
-               size = get_lru_size(lruvec, lru);
+               size = lruvec_lru_size(lruvec, lru);
                scan = size >> sc->priority;
 
                if (!scan && force_scan)
@@ -2368,7 +2368,7 @@ static void shrink_zone_memcg(struct zone *zone, struct 
mem_cgroup *memcg,
                                nr[lru] -= nr_to_scan;
 
                                nr_reclaimed += shrink_list(lru, nr_to_scan,
-                                                           lruvec, sc);
+                                                       lruvec, memcg, sc);
                        }
                }
 
@@ -2433,7 +2433,7 @@ static void shrink_zone_memcg(struct zone *zone, struct 
mem_cgroup *memcg,
         * Even if we did not try to evict anon pages at all, we want to
         * rebalance the anon lru active/inactive ratio.
         */
-       if (inactive_list_is_low(lruvec, false, sc->target_mem_cgroup, true))
+       if (inactive_list_is_low(lruvec, false, memcg, true))
                shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
                                   sc, LRU_ACTIVE_ANON);
 
@@ -2788,6 +2788,25 @@ static bool shrink_zones(struct zonelist *zonelist, 
struct scan_control *sc)
        return aborted_reclaim;
 }
 
+static void snapshot_refaults(struct mem_cgroup *root_memcg, struct zone *zone)
+{
+       struct mem_cgroup *memcg;
+
+       memcg = mem_cgroup_iter(root_memcg, NULL, NULL);
+       do {
+               unsigned long refaults;
+               struct lruvec *lruvec;
+
+               if (memcg)
+                       refaults = memcg_ws_activates(memcg);
+               else
+                       refaults = zone_page_state(zone, WORKINGSET_ACTIVATE);
+
+               lruvec = mem_cgroup_zone_lruvec(zone, memcg);
+               lruvec->refaults = refaults;
+       } while ((memcg = mem_cgroup_iter(root_memcg, memcg, NULL)));
+}
+
 /* All zones in zonelist are unreclaimable? */
 static bool all_unreclaimable(struct zonelist *zonelist,
                struct scan_control *sc)
@@ -2912,10 +2931,9 @@ static unsigned long do_try_to_free_pages(struct 
zonelist *zonelist,
        } while (--sc->priority >= 0 && !aborted_reclaim);
 
 out:
-       if (!sc->target_mem_cgroup)
-               for_each_zone_zonelist_nodemask(zone, z, zonelist,
+       for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                        gfp_zone(sc->gfp_mask), sc->nodemask)
-                       zone->refaults = zone_page_state(zone, 
WORKINGSET_ACTIVATE);
+               snapshot_refaults(sc->target_mem_cgroup, zone);
 
        delayacct_freepages_end();
        KSTAT_PERF_LEAVE(ttfp);}
@@ -3211,8 +3229,7 @@ static void age_active_anon(struct zone *zone, struct 
scan_control *sc)
        do {
                struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
 
-               if (inactive_list_is_low(lruvec, false,
-                                       sc->target_mem_cgroup, true))
+               if (inactive_list_is_low(lruvec, false, memcg, true))
                        shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
                                           sc, LRU_ACTIVE_ANON);
 
@@ -3610,7 +3627,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int 
order,
                if (!populated_zone(zone))
                        continue;
 
-               zone->refaults = zone_page_state(zone, WORKINGSET_ACTIVATE);
+               snapshot_refaults(NULL, zone);
        }
 
        /*
diff --git a/mm/workingset.c b/mm/workingset.c
index 45b98d9eaf78..485bbd84b230 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -7,6 +7,7 @@
 #include <linux/memcontrol.h>
 #include <linux/writeback.h>
 #include <linux/pagemap.h>
+#include <linux/page_cgroup.h>
 #include <linux/atomic.h>
 #include <linux/module.h>
 #include <linux/swap.h>
@@ -153,7 +154,8 @@
  */
 
 #define EVICTION_SHIFT (RADIX_TREE_EXCEPTIONAL_ENTRY + \
-                        ZONES_SHIFT + NODES_SHIFT)
+                        ZONES_SHIFT + NODES_SHIFT +    \
+                        MEM_CGROUP_ID_SHIFT)
 #define EVICTION_MASK  (~0UL >> EVICTION_SHIFT)
 
 /*
@@ -166,9 +168,10 @@
  */
 static unsigned int bucket_order __read_mostly;
 
-static void *pack_shadow(unsigned long eviction, struct zone *zone)
+static void *pack_shadow(int memcgid, struct zone *zone, unsigned long 
eviction)
 {
        eviction >>= bucket_order;
+       eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
        eviction = (eviction << NODES_SHIFT) | zone_to_nid(zone);
        eviction = (eviction << ZONES_SHIFT) | zone_idx(zone);
        eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT);
@@ -176,18 +179,21 @@ static void *pack_shadow(unsigned long eviction, struct 
zone *zone)
        return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
 }
 
-static void unpack_shadow(void *shadow, struct zone **zonep,
+static void unpack_shadow(void *shadow, int *memcgidp, struct zone **zonep,
                          unsigned long *evictionp)
 {
        unsigned long entry = (unsigned long)shadow;
-       int zid, nid;
+       int memcgid, nid, zid;
 
        entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT;
        zid = entry & ((1UL << ZONES_SHIFT) - 1);
        entry >>= ZONES_SHIFT;
        nid = entry & ((1UL << NODES_SHIFT) - 1);
        entry >>= NODES_SHIFT;
+       memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
+       entry >>= MEM_CGROUP_ID_SHIFT;
 
+       *memcgidp = memcgid;
        *zonep = NODE_DATA(nid)->node_zones + zid;
        *evictionp = entry << bucket_order;
 }
@@ -202,11 +208,25 @@ static void unpack_shadow(void *shadow, struct zone 
**zonep,
  */
 void *workingset_eviction(struct address_space *mapping, struct page *page)
 {
+       struct mem_cgroup *memcg;
        struct zone *zone = page_zone(page);
+       struct page_cgroup *pc;
        unsigned long eviction;
-
-       eviction = atomic_long_inc_return(&zone->inactive_age);
-       return pack_shadow(eviction, zone);
+       struct lruvec *lruvec;
+
+       /* Page is fully exclusive and pins page->mem_cgroup */
+       VM_BUG_ON_PAGE(PageLRU(page), page);
+       VM_BUG_ON_PAGE(page_count(page), page);
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
+
+       pc = lookup_page_cgroup(page);
+       if (PageCgroupUsed(pc))
+               memcg = pc->mem_cgroup;
+       else
+               memcg = root_mem_cgroup;
+       lruvec = mem_cgroup_zone_lruvec(zone, memcg);
+       eviction = atomic_long_inc_return(&lruvec->inactive_age);
+       return pack_shadow(mem_cgroup_id(memcg), zone, eviction);
 }
 
 /**
@@ -221,13 +241,42 @@ void *workingset_eviction(struct address_space *mapping, 
struct page *page)
 bool workingset_refault(void *shadow)
 {
        unsigned long refault_distance;
+       unsigned long active_file;
+       struct mem_cgroup *memcg;
        unsigned long eviction;
+       struct lruvec *lruvec;
        unsigned long refault;
        struct zone *zone;
+       int memcgid;
 
-       unpack_shadow(shadow, &zone, &eviction);
+       unpack_shadow(shadow, &memcgid, &zone, &eviction);
 
-       refault = atomic_long_read(&zone->inactive_age);
+       rcu_read_lock();
+       /*
+        * Look up the memcg associated with the stored ID. It might
+        * have been deleted since the page's eviction.
+        *
+        * Note that in rare events the ID could have been recycled
+        * for a new cgroup that refaults a shared page. This is
+        * impossible to tell from the available data. However, this
+        * should be a rare and limited disturbance, and activations
+        * are always speculative anyway. Ultimately, it's the aging
+        * algorithm's job to shake out the minimum access frequency
+        * for the active cache.
+        *
+        * XXX: On !CONFIG_MEMCG, this will always return NULL; it
+        * would be better if the root_mem_cgroup existed in all
+        * configurations instead.
+        */
+       memcg = mem_cgroup_from_id(memcgid);
+       if (!mem_cgroup_disabled() && !memcg) {
+               rcu_read_unlock();
+               return false;
+       }
+       lruvec = mem_cgroup_zone_lruvec(zone, memcg);
+       refault = atomic_long_read(&lruvec->inactive_age);
+       active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE);
+       rcu_read_unlock();
 
        /*
         * The unsigned subtraction here gives an accurate distance
@@ -249,7 +298,8 @@ bool workingset_refault(void *shadow)
 
        inc_zone_state(zone, WORKINGSET_REFAULT);
 
-       if (refault_distance <= zone_page_state(zone, NR_ACTIVE_FILE)) {
+       if (refault_distance <= active_file) {
+               memcg_inc_ws_activate(memcg);
                inc_zone_state(zone, WORKINGSET_ACTIVATE);
                return true;
        }
@@ -262,7 +312,28 @@ bool workingset_refault(void *shadow)
  */
 void workingset_activation(struct page *page)
 {
-       atomic_long_inc(&page_zone(page)->inactive_age);
+       struct mem_cgroup *memcg;
+       struct lruvec *lruvec;
+       bool locked;
+       unsigned long flags;
+
+       /*
+        * Filter non-memcg pages here, e.g. unmap can call
+        * mark_page_accessed() on VDSO pages.
+        *
+        * XXX: See workingset_refault() - this should return
+        * root_mem_cgroup even for !CONFIG_MEMCG.
+        */
+       if (!mem_cgroup_disabled())
+               return;
+
+       memcg = mem_cgroup_begin_update_page_stat(page, &locked, &flags);
+       if (!memcg)
+               return;
+
+       lruvec = mem_cgroup_zone_lruvec(page_zone(page), memcg);
+       atomic_long_inc(&lruvec->inactive_age);
+       mem_cgroup_end_update_page_stat(page, &locked, &flags);
 }
 
 /*
-- 
2.19.2

_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to