We need to show dcachesize stats in the UB API. Since we need to record max usage, we use a plain res_counter instead of going with per cpu counters. The following patch will make beancounter pull this value from memcg whenever necessary.
Signed-off-by: Vladimir Davydov <vdavy...@parallels.com> --- include/linux/memcontrol.h | 3 --- mm/memcontrol.c | 44 ++++++++++++++++++++++++++++++++++++++++++-- mm/slab.h | 8 +++++--- 3 files changed, 47 insertions(+), 8 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 1382e4939a21..d434d6c2cf27 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -487,9 +487,6 @@ void __memcg_kmem_put_cache(struct kmem_cache *cachep); struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr); -int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size); -void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size); - /** * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. * @gfp: the gfp allocation flags. diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d8f9b5561222..1871108bba17 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -279,6 +279,14 @@ struct mem_cgroup { */ struct res_counter kmem; + /* + * the counter to account for dcache usage. + * + * Never limited, only needed for showing stats. We could use a per cpu + * counter if we did not have to report max usage. + */ + struct res_counter dcache; + /* beancounter-related stats */ unsigned long long swap_max; atomic_long_t mem_failcnt; @@ -3140,7 +3148,7 @@ static int mem_cgroup_slabinfo_read(struct cgroup *cont, struct cftype *cft, } #endif -int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) +static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) { struct res_counter *fail_res; struct mem_cgroup *_memcg; @@ -3188,7 +3196,7 @@ int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) return ret; } -void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) +static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) { res_counter_uncharge(&memcg->res, size); if (do_swap_account) @@ -3210,6 +3218,35 @@ void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) css_put(&memcg->css); } +int __memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, unsigned size) +{ + struct mem_cgroup *memcg; + struct res_counter *fail_res; + int ret; + + VM_BUG_ON(is_root_cache(s)); + memcg = s->memcg_params.memcg; + + ret = memcg_charge_kmem(memcg, gfp, size); + if (ret) + return ret; + if (s->flags & SLAB_RECLAIM_ACCOUNT) + res_counter_charge_nofail(&memcg->dcache, size, &fail_res); + return 0; +} + +void __memcg_uncharge_slab(struct kmem_cache *s, unsigned size) +{ + struct mem_cgroup *memcg; + + VM_BUG_ON(is_root_cache(s)); + memcg = s->memcg_params.memcg; + + memcg_uncharge_kmem(memcg, size); + if (s->flags & SLAB_RECLAIM_ACCOUNT) + res_counter_uncharge(&memcg->dcache, size); +} + /* * helper for acessing a memcg's index. It will be used as an index in the * child cache array in kmem_cache, and also to derive its name. This function @@ -6345,6 +6382,7 @@ mem_cgroup_css_alloc(struct cgroup *cont) res_counter_init(&memcg->res, NULL); res_counter_init(&memcg->memsw, NULL); res_counter_init(&memcg->kmem, NULL); + res_counter_init(&memcg->dcache, NULL); } memcg->last_scanned_node = MAX_NUMNODES; @@ -6386,6 +6424,7 @@ mem_cgroup_css_online(struct cgroup *cont) res_counter_init(&memcg->res, &parent->res); res_counter_init(&memcg->memsw, &parent->memsw); res_counter_init(&memcg->kmem, &parent->kmem); + res_counter_init(&memcg->dcache, &parent->dcache); /* * No need to take a reference to the parent because cgroup @@ -6395,6 +6434,7 @@ mem_cgroup_css_online(struct cgroup *cont) res_counter_init(&memcg->res, NULL); res_counter_init(&memcg->memsw, NULL); res_counter_init(&memcg->kmem, NULL); + res_counter_init(&memcg->dcache, NULL); /* * Deeper hierachy with use_hierarchy == false doesn't make * much sense so let cgroup subsystem know about this diff --git a/mm/slab.h b/mm/slab.h index 390a0b6f3760..3d1b33ec732b 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -217,6 +217,9 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) return s->memcg_params.root_cache; } +extern int __memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, unsigned size); +extern void __memcg_uncharge_slab(struct kmem_cache *s, unsigned size); + static __always_inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order) { @@ -224,8 +227,7 @@ static __always_inline int memcg_charge_slab(struct kmem_cache *s, return 0; if (is_root_cache(s)) return 0; - return memcg_charge_kmem(s->memcg_params.memcg, gfp, - PAGE_SIZE << order); + return __memcg_charge_slab(s, gfp, PAGE_SIZE << order); } static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) @@ -234,7 +236,7 @@ static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) return; if (is_root_cache(s)) return; - memcg_uncharge_kmem(s->memcg_params.memcg, PAGE_SIZE << order); + __memcg_uncharge_slab(s, PAGE_SIZE << order); } extern void slab_init_memcg_params(struct kmem_cache *); -- 1.7.10.4 _______________________________________________ Devel mailing list Devel@openvz.org https://lists.openvz.org/mailman/listinfo/devel