From: Johannes Weiner <han...@cmpxchg.org>

Per-cgroup thrash detection will need to derive a live memcg from the
eviction cookie, and doing that inside unpack_shadow() will get nasty
with the reference handling spread over two functions.

In preparation, make unpack_shadow() clearly about extracting static
data, and let workingset_refault() do all the higher-level handling.

Signed-off-by: Johannes Weiner <han...@cmpxchg.org>
Reviewed-by: Vladimir Davydov <vdavy...@virtuozzo.com>
Cc: Michal Hocko <mho...@suse.cz>
Cc: David Rientjes <rient...@google.com>
Signed-off-by: Andrew Morton <a...@linux-foundation.org>
Signed-off-by: Linus Torvalds <torva...@linux-foundation.org>

https://pmc.acronis.com/browse/VSTOR-19037
(cherry picked from commit 162453bfbdf4c0f58cb3058aad9ad8cda1044cda)
Signed-off-by: Andrey Ryabinin <aryabi...@virtuozzo.com>
---
 mm/workingset.c | 56 ++++++++++++++++++++++++-------------------------
 1 file changed, 28 insertions(+), 28 deletions(-)

diff --git a/mm/workingset.c b/mm/workingset.c
index 150223b6b161..22b66359d92d 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -165,13 +165,10 @@ static void *pack_shadow(unsigned long eviction, struct 
zone *zone)
        return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
 }
 
-static void unpack_shadow(void *shadow,
-                         struct zone **zone,
-                         unsigned long *distance)
+static void unpack_shadow(void *shadow, struct zone **zonep,
+                         unsigned long *evictionp)
 {
        unsigned long entry = (unsigned long)shadow;
-       unsigned long eviction;
-       unsigned long refault;
        int zid, nid;
 
        entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT;
@@ -179,29 +176,9 @@ static void unpack_shadow(void *shadow,
        entry >>= ZONES_SHIFT;
        nid = entry & ((1UL << NODES_SHIFT) - 1);
        entry >>= NODES_SHIFT;
-       eviction = entry;
-
-       *zone = NODE_DATA(nid)->node_zones + zid;
 
-       refault = atomic_long_read(&(*zone)->inactive_age);
-
-       /*
-        * The unsigned subtraction here gives an accurate distance
-        * across inactive_age overflows in most cases.
-        *
-        * There is a special case: usually, shadow entries have a
-        * short lifetime and are either refaulted or reclaimed along
-        * with the inode before they get too old.  But it is not
-        * impossible for the inactive_age to lap a shadow entry in
-        * the field, which can then can result in a false small
-        * refault distance, leading to a false activation should this
-        * old entry actually refault again.  However, earlier kernels
-        * used to deactivate unconditionally with *every* reclaim
-        * invocation for the longest time, so the occasional
-        * inappropriate activation leading to pressure on the active
-        * list is not a problem.
-        */
-       *distance = (refault - eviction) & EVICTION_MASK;
+       *zonep = NODE_DATA(nid)->node_zones + zid;
+       *evictionp = entry;
 }
 
 /**
@@ -233,9 +210,32 @@ void *workingset_eviction(struct address_space *mapping, 
struct page *page)
 bool workingset_refault(void *shadow)
 {
        unsigned long refault_distance;
+       unsigned long eviction;
+       unsigned long refault;
        struct zone *zone;
 
-       unpack_shadow(shadow, &zone, &refault_distance);
+       unpack_shadow(shadow, &zone, &eviction);
+
+       refault = atomic_long_read(&zone->inactive_age);
+
+       /*
+        * The unsigned subtraction here gives an accurate distance
+        * across inactive_age overflows in most cases.
+        *
+        * There is a special case: usually, shadow entries have a
+        * short lifetime and are either refaulted or reclaimed along
+        * with the inode before they get too old.  But it is not
+        * impossible for the inactive_age to lap a shadow entry in
+        * the field, which can then can result in a false small
+        * refault distance, leading to a false activation should this
+        * old entry actually refault again.  However, earlier kernels
+        * used to deactivate unconditionally with *every* reclaim
+        * invocation for the longest time, so the occasional
+        * inappropriate activation leading to pressure on the active
+        * list is not a problem.
+        */
+       refault_distance = (refault - eviction) & EVICTION_MASK;
+
        inc_zone_state(zone, WORKINGSET_REFAULT);
 
        if (refault_distance <= zone_page_state(zone, NR_ACTIVE_FILE)) {
-- 
2.19.2

_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to