The commit is pushed to "branch-rh7-3.10.0-514.6.1.vz7.28.x-ovz" and will 
appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-514.6.1.vz7.28.6
------>
commit 31166ec0c438e0b8da2e41ba99661791f837e173
Author: Greg Thelen <gthe...@google.com>
Date:   Mon Feb 20 14:35:32 2017 +0400

    ms/memcg: make mem_cgroup_read_stat() unsigned
    
    commit 484ebb3b8c8b27dd2171696462a3116edb9ff801 upstream.
    
    mem_cgroup_read_stat() returns a page count by summing per cpu page
    counters.  The summing is racy wrt.  updates, so a transient negative
    sum is possible.  Callers don't want negative values:
    
     - mem_cgroup_wb_stats() doesn't want negative nr_dirty or nr_writeback.
       This could confuse dirty throttling.
    
     - oom reports and memory.stat shouldn't show confusing negative usage.
    
     - tree_usage() already avoids negatives.
    
    Avoid returning negative page counts from mem_cgroup_read_stat() and
    convert it to unsigned.
    
    [a...@linux-foundation.org: fix old typo while we're in there]
    Signed-off-by: Greg Thelen <gthe...@google.com>
    Cc: Johannes Weiner <han...@cmpxchg.org>
    Acked-by: Michal Hocko <mho...@suse.com>
    Cc: <sta...@vger.kernel.org>        [4.2+]
    Signed-off-by: Andrew Morton <a...@linux-foundation.org>
    Signed-off-by: Linus Torvalds <torva...@linux-foundation.org>
    
    https://jira.sw.ru/browse/PSBM-56256
    Signed-off-by: Andrey Ryabinin <aryabi...@virtuozzo.com>
---
 mm/memcontrol.c | 30 ++++++++++++++++++------------
 1 file changed, 18 insertions(+), 12 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 49b69f7..95b1c0c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -919,12 +919,14 @@ mem_cgroup_largest_soft_limit_node(struct 
mem_cgroup_tree_per_zone *mctz)
 }
 
 /*
+ * Return page count for single (non recursive) @memcg.
+ *
  * Implementation Note: reading percpu statistics for memcg.
  *
  * Both of vmstat[] and percpu_counter has threshold and do periodic
  * synchronization to implement "quick" read. There are trade-off between
  * reading cost and precision of value. Then, we may have a chance to implement
- * a periodic synchronizion of counter in memcg's counter.
+ * a periodic synchronization of counter in memcg's counter.
  *
  * But this _read() function is used for user interface now. The user accounts
  * memory usage by memory cgroup and he _always_ requires exact value because
@@ -934,17 +936,24 @@ mem_cgroup_largest_soft_limit_node(struct 
mem_cgroup_tree_per_zone *mctz)
  *
  * If there are kernel internal actions which can make use of some not-exact
  * value, and reading all cpu value can be performance bottleneck in some
- * common workload, threashold and synchonization as vmstat[] should be
+ * common workload, threshold and synchronization as vmstat[] should be
  * implemented.
  */
-static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
-                                enum mem_cgroup_stat_index idx)
+static unsigned long
+mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
 {
        long val = 0;
        int cpu;
 
+       /* Per-cpu values can be negative, use a signed accumulator */
        for_each_possible_cpu(cpu)
                val += per_cpu(memcg->stat->count[idx], cpu);
+       /*
+        * Summing races with updates, so val may be negative.  Avoid exposing
+        * transient negative values.
+        */
+       if (val < 0)
+               val = 0;
        return val;
 }
 
@@ -1961,7 +1970,7 @@ done:
                for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
                        if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
                                continue;
-                       pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
+                       pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
                                K(mem_cgroup_read_stat(iter, i)));
                }
 
@@ -4162,14 +4171,11 @@ static unsigned long mem_cgroup_recursive_stat(struct 
mem_cgroup *memcg,
                                               enum mem_cgroup_stat_index idx)
 {
        struct mem_cgroup *iter;
-       long val = 0;
+       unsigned long val = 0;
 
-       /* Per-cpu values can be negative, use a signed accumulator */
        for_each_mem_cgroup_tree(iter, memcg)
                val += mem_cgroup_read_stat(iter, idx);
 
-       if (val < 0) /* race ? */
-               val = 0;
        return val;
 }
 
@@ -5050,7 +5056,7 @@ static int memcg_stat_show(struct cgroup *cont, struct 
cftype *cft,
        for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
                if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
                        continue;
-               seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
+               seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
                           mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
        }
 
@@ -5075,13 +5081,13 @@ static int memcg_stat_show(struct cgroup *cont, struct 
cftype *cft,
                           (u64)memsw * PAGE_SIZE);
 
        for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
-               long long val = 0;
+               unsigned long long val = 0;
 
                if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
                        continue;
                for_each_mem_cgroup_tree(mi, memcg)
                        val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
-               seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
+               seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
        }
 
        for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to