Re: [PATCH 1/2] memcg, oom: provide more precise dump info while memcg oom happening
On 11/08/2012 05:07 PM, Kamezawa Hiroyuki wrote: > (2012/11/07 17:41), Sha Zhengju wrote: >> From: Sha Zhengju >> >> Current, when a memcg oom is happening the oom dump messages is still global >> state and provides few useful info for users. This patch prints more pointed >> memcg page statistics for memcg-oom. >> >> Signed-off-by: Sha Zhengju >> Cc: Michal Hocko >> Cc: KAMEZAWA Hiroyuki >> Cc: David Rientjes >> Cc: Andrew Morton >> --- >> mm/memcontrol.c | 71 >> --- >> mm/oom_kill.c |6 +++- >> 2 files changed, 66 insertions(+), 11 deletions(-) >> >> diff --git a/mm/memcontrol.c b/mm/memcontrol.c >> index 0eab7d5..2df5e72 100644 >> --- a/mm/memcontrol.c >> +++ b/mm/memcontrol.c >> @@ -118,6 +118,14 @@ static const char * const mem_cgroup_events_names[] = { >> "pgmajfault", >> }; >> >> +static const char * const mem_cgroup_lru_names[] = { >> +"inactive_anon", >> +"active_anon", >> +"inactive_file", >> +"active_file", >> +"unevictable", >> +}; >> + > Is this for the same strings with show_free_areas() ? > I just move the declaration here from the bottom of source file to make the following use error-free. >> /* >>* Per memcg event counter is incremented at every pagein/pageout. With >> THP, >>* it will be incremated by the number of pages. This counter is used for >> @@ -1501,8 +1509,59 @@ static void move_unlock_mem_cgroup(struct mem_cgroup >> *memcg, >> spin_unlock_irqrestore(>move_lock, *flags); >> } >> >> +#define K(x) ((x) << (PAGE_SHIFT-10)) >> +static void mem_cgroup_print_oom_stat(struct mem_cgroup *memcg) >> +{ >> +struct mem_cgroup *mi; >> +unsigned int i; >> + >> +if (!memcg->use_hierarchy && memcg != root_mem_cgroup) { > Why do you need to have this condition check ? > Yes, the check is unnecessary... I'll remove it next version. >> +for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { >> +if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) >> +continue; >> +printk(KERN_CONT "%s:%ldKB ", mem_cgroup_stat_names[i], >> +K(mem_cgroup_read_stat(memcg, i))); > Hm, how about using the same style with show_free_areas() ? > I'm also trying do so. show_free_areas() prints the memory related info in two style: one is in page unit and the oher is in KB (I've no idea why we distinct them), but I think the KB format is more readable. >> +} >> + >> +for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) >> +printk(KERN_CONT "%s:%lu ", mem_cgroup_events_names[i], >> +mem_cgroup_read_events(memcg, i)); >> + > I don't think EVENTS info is useful for oom. > It seems you're right. : ) >> +for (i = 0; i < NR_LRU_LISTS; i++) >> +printk(KERN_CONT "%s:%luKB ", mem_cgroup_lru_names[i], >> +K(mem_cgroup_nr_lru_pages(memcg, BIT(i; > How far does your new information has different format than usual oom ? > Could you show a sample and difference in changelog ? > > Of course, I prefer both of them has similar format. > > > The new memcg-oom info excludes global state out and prints the memcg statistics instead which seems more brevity. I'll add a sample next time. Thanks for reminding me! Thanks, Sha -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH 1/2] memcg, oom: provide more precise dump info while memcg oom happening
On 11/08/2012 06:17 AM, Michal Hocko wrote: On Wed 07-11-12 16:41:36, Sha Zhengju wrote: From: Sha Zhengju Current, when a memcg oom is happening the oom dump messages is still global state and provides few useful info for users. This patch prints more pointed memcg page statistics for memcg-oom. Signed-off-by: Sha Zhengju Cc: Michal Hocko Cc: KAMEZAWA Hiroyuki Cc: David Rientjes Cc: Andrew Morton --- mm/memcontrol.c | 71 --- mm/oom_kill.c |6 +++- 2 files changed, 66 insertions(+), 11 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0eab7d5..2df5e72 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c [...] @@ -1501,8 +1509,59 @@ static void move_unlock_mem_cgroup(struct mem_cgroup *memcg, spin_unlock_irqrestore(>move_lock, *flags); } +#define K(x) ((x)<< (PAGE_SHIFT-10)) +static void mem_cgroup_print_oom_stat(struct mem_cgroup *memcg) +{ + struct mem_cgroup *mi; + unsigned int i; + + if (!memcg->use_hierarchy&& memcg != root_mem_cgroup) { + for (i = 0; i< MEM_CGROUP_STAT_NSTATS; i++) { + if (i == MEM_CGROUP_STAT_SWAP&& !do_swap_account) + continue; + printk(KERN_CONT "%s:%ldKB ", mem_cgroup_stat_names[i], + K(mem_cgroup_read_stat(memcg, i))); + } + + for (i = 0; i< MEM_CGROUP_EVENTS_NSTATS; i++) + printk(KERN_CONT "%s:%lu ", mem_cgroup_events_names[i], + mem_cgroup_read_events(memcg, i)); + + for (i = 0; i< NR_LRU_LISTS; i++) + printk(KERN_CONT "%s:%luKB ", mem_cgroup_lru_names[i], + K(mem_cgroup_nr_lru_pages(memcg, BIT(i; + } else { + + for (i = 0; i< MEM_CGROUP_STAT_NSTATS; i++) { + long long val = 0; + + if (i == MEM_CGROUP_STAT_SWAP&& !do_swap_account) + continue; + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_read_stat(mi, i); + printk(KERN_CONT "%s:%lldKB ", mem_cgroup_stat_names[i], K(val)); + } + + for (i = 0; i< MEM_CGROUP_EVENTS_NSTATS; i++) { + unsigned long long val = 0; + + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_read_events(mi, i); + printk(KERN_CONT "%s:%llu ", + mem_cgroup_events_names[i], val); + } + + for (i = 0; i< NR_LRU_LISTS; i++) { + unsigned long long val = 0; + + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_nr_lru_pages(mi, BIT(i)); + printk(KERN_CONT "%s:%lluKB ", mem_cgroup_lru_names[i], K(val)); + } + } This is just plain ugly. for_each_mem_cgroup_tree is use_hierarchy aware and there is no need for if (use_hierarchy) part. memcg != root_mem_cgroup test doesn't make much sense as well because we call that a global oom killer ;) Yes... bitterly did I repent the patch... The else-part of for_each_mem_cgroup_tree is enough for hierarchy. I'll send a update one later. Sorry for the noise. : ( Thanks, Sha -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH 1/2] memcg, oom: provide more precise dump info while memcg oom happening
On Thu 08-11-12 20:37:45, Sha Zhengju wrote: > On 11/08/2012 02:02 AM, David Rientjes wrote: > >On Wed, 7 Nov 2012, Sha Zhengju wrote: [..] > >>+ else > >>+ show_mem(SHOW_MEM_FILTER_NODES); > >Well that's disappointing if memcg == root_mem_cgroup, we'd probably like > >to know the global memory state to determine what the problem is. > > > > I really wondering if there is any case that can pass > root_mem_cgroup down here. No it cannot because the root cgroup doesn't have any limit so we cannot trigger memcg oom killer. -- Michal Hocko SUSE Labs -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH 1/2] memcg, oom: provide more precise dump info while memcg oom happening
On 11/08/2012 02:02 AM, David Rientjes wrote: On Wed, 7 Nov 2012, Sha Zhengju wrote: diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0eab7d5..2df5e72 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -118,6 +118,14 @@ static const char * const mem_cgroup_events_names[] = { "pgmajfault", }; +static const char * const mem_cgroup_lru_names[] = { + "inactive_anon", + "active_anon", + "inactive_file", + "active_file", + "unevictable", +}; + /* * Per memcg event counter is incremented at every pagein/pageout. With THP, * it will be incremated by the number of pages. This counter is used for @@ -1501,8 +1509,59 @@ static void move_unlock_mem_cgroup(struct mem_cgroup *memcg, spin_unlock_irqrestore(>move_lock, *flags); } +#define K(x) ((x)<< (PAGE_SHIFT-10)) +static void mem_cgroup_print_oom_stat(struct mem_cgroup *memcg) +{ + struct mem_cgroup *mi; + unsigned int i; + + if (!memcg->use_hierarchy&& memcg != root_mem_cgroup) { + for (i = 0; i< MEM_CGROUP_STAT_NSTATS; i++) { + if (i == MEM_CGROUP_STAT_SWAP&& !do_swap_account) + continue; + printk(KERN_CONT "%s:%ldKB ", mem_cgroup_stat_names[i], This printk isn't continuing any previous printk, so using KERN_CONT here will require a short header to be printed first ("Memcg: "?) with KERN_INFO before the iterations. Yep...I think I lost it while rebasing... sorry for the stupid mistake. + K(mem_cgroup_read_stat(memcg, i))); + } + + for (i = 0; i< MEM_CGROUP_EVENTS_NSTATS; i++) + printk(KERN_CONT "%s:%lu ", mem_cgroup_events_names[i], + mem_cgroup_read_events(memcg, i)); + + for (i = 0; i< NR_LRU_LISTS; i++) + printk(KERN_CONT "%s:%luKB ", mem_cgroup_lru_names[i], + K(mem_cgroup_nr_lru_pages(memcg, BIT(i; + } else { + Spurious newline. Eek, is there really no way to avoid this if-conditional and just use for_each_mem_cgroup_tree() for everything and use mem_cgroup_iter_break(memcg, iter); break; for !memcg->use_hierarchy? Now I'm shamed at my bad brain of yesterday by sending this chunk out... Yes, the if-part code above is obviously unwanted, and the for_each_mem_cgroup_tree can handle hierarchy already. + for (i = 0; i< MEM_CGROUP_STAT_NSTATS; i++) { + long long val = 0; + + if (i == MEM_CGROUP_STAT_SWAP&& !do_swap_account) + continue; + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_read_stat(mi, i); + printk(KERN_CONT "%s:%lldKB ", mem_cgroup_stat_names[i], K(val)); + } + + for (i = 0; i< MEM_CGROUP_EVENTS_NSTATS; i++) { + unsigned long long val = 0; + + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_read_events(mi, i); + printk(KERN_CONT "%s:%llu ", + mem_cgroup_events_names[i], val); + } + + for (i = 0; i< NR_LRU_LISTS; i++) { + unsigned long long val = 0; + + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_nr_lru_pages(mi, BIT(i)); + printk(KERN_CONT "%s:%lluKB ", mem_cgroup_lru_names[i], K(val)); + } + } + printk(KERN_CONT "\n"); +} /** - * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode. * @memcg: The memory cgroup that went over limit * @p: Task that is going to be killed * @@ -1569,6 +1628,8 @@ done: res_counter_read_u64(>kmem, RES_USAGE)>> 10, res_counter_read_u64(>kmem, RES_LIMIT)>> 10, res_counter_read_u64(>kmem, RES_FAILCNT)); + + mem_cgroup_print_oom_stat(memcg); I think this should be folded into mem_cgroup_print_oom_info(), I don't see a need for a new function. } /* @@ -5195,14 +5256,6 @@ static int memcg_numa_stat_show(struct cgroup *cont, struct cftype *cft, } #endif /* CONFIG_NUMA */ -static const char * const mem_cgroup_lru_names[] = { - "inactive_anon", - "active_anon", - "inactive_file", - "active_file", - "unevictable", -}; - static inline void mem_cgroup_lru_names_not_uptodate(void) { BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 7e9e911..4b8a6dd 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -421,8 +421,10 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, cpuset_print_task_mems_allowed(current);
Re: [PATCH 1/2] memcg, oom: provide more precise dump info while memcg oom happening
(2012/11/07 17:41), Sha Zhengju wrote: > From: Sha Zhengju > > Current, when a memcg oom is happening the oom dump messages is still global > state and provides few useful info for users. This patch prints more pointed > memcg page statistics for memcg-oom. > > Signed-off-by: Sha Zhengju > Cc: Michal Hocko > Cc: KAMEZAWA Hiroyuki > Cc: David Rientjes > Cc: Andrew Morton > --- > mm/memcontrol.c | 71 > --- > mm/oom_kill.c |6 +++- > 2 files changed, 66 insertions(+), 11 deletions(-) > > diff --git a/mm/memcontrol.c b/mm/memcontrol.c > index 0eab7d5..2df5e72 100644 > --- a/mm/memcontrol.c > +++ b/mm/memcontrol.c > @@ -118,6 +118,14 @@ static const char * const mem_cgroup_events_names[] = { > "pgmajfault", > }; > > +static const char * const mem_cgroup_lru_names[] = { > + "inactive_anon", > + "active_anon", > + "inactive_file", > + "active_file", > + "unevictable", > +}; > + Is this for the same strings with show_free_areas() ? > /* >* Per memcg event counter is incremented at every pagein/pageout. With THP, >* it will be incremated by the number of pages. This counter is used for > @@ -1501,8 +1509,59 @@ static void move_unlock_mem_cgroup(struct mem_cgroup > *memcg, > spin_unlock_irqrestore(>move_lock, *flags); > } > > +#define K(x) ((x) << (PAGE_SHIFT-10)) > +static void mem_cgroup_print_oom_stat(struct mem_cgroup *memcg) > +{ > + struct mem_cgroup *mi; > + unsigned int i; > + > + if (!memcg->use_hierarchy && memcg != root_mem_cgroup) { Why do you need to have this condition check ? > + for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { > + if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) > + continue; > + printk(KERN_CONT "%s:%ldKB ", mem_cgroup_stat_names[i], > + K(mem_cgroup_read_stat(memcg, i))); Hm, how about using the same style with show_free_areas() ? > + } > + > + for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) > + printk(KERN_CONT "%s:%lu ", mem_cgroup_events_names[i], > + mem_cgroup_read_events(memcg, i)); > + I don't think EVENTS info is useful for oom. > + for (i = 0; i < NR_LRU_LISTS; i++) > + printk(KERN_CONT "%s:%luKB ", mem_cgroup_lru_names[i], > + K(mem_cgroup_nr_lru_pages(memcg, BIT(i; How far does your new information has different format than usual oom ? Could you show a sample and difference in changelog ? Of course, I prefer both of them has similar format. > + } else { > + > + for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { > + long long val = 0; > + > + if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) > + continue; > + for_each_mem_cgroup_tree(mi, memcg) > + val += mem_cgroup_read_stat(mi, i); > + printk(KERN_CONT "%s:%lldKB ", > mem_cgroup_stat_names[i], K(val)); > + } > + > + for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { > + unsigned long long val = 0; > + > + for_each_mem_cgroup_tree(mi, memcg) > + val += mem_cgroup_read_events(mi, i); > + printk(KERN_CONT "%s:%llu ", > + mem_cgroup_events_names[i], val); > + } > + > + for (i = 0; i < NR_LRU_LISTS; i++) { > + unsigned long long val = 0; > + > + for_each_mem_cgroup_tree(mi, memcg) > + val += mem_cgroup_nr_lru_pages(mi, BIT(i)); > + printk(KERN_CONT "%s:%lluKB ", mem_cgroup_lru_names[i], > K(val)); > + } > + } > + printk(KERN_CONT "\n"); > +} > /** > - * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in > read mode. >* @memcg: The memory cgroup that went over limit >* @p: Task that is going to be killed >* > @@ -1569,6 +1628,8 @@ done: > res_counter_read_u64(>kmem, RES_USAGE) >> 10, > res_counter_read_u64(>kmem, RES_LIMIT) >> 10, > res_counter_read_u64(>kmem, RES_FAILCNT)); > + > + mem_cgroup_print_oom_stat(memcg); > } please put directly in print_oom_info() Thanks, -Kame -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH 1/2] memcg, oom: provide more precise dump info while memcg oom happening
(2012/11/07 17:41), Sha Zhengju wrote: From: Sha Zhengju handai@taobao.com Current, when a memcg oom is happening the oom dump messages is still global state and provides few useful info for users. This patch prints more pointed memcg page statistics for memcg-oom. Signed-off-by: Sha Zhengju handai@taobao.com Cc: Michal Hocko mho...@suse.cz Cc: KAMEZAWA Hiroyuki kamezawa.hir...@jp.fujitsu.com Cc: David Rientjes rient...@google.com Cc: Andrew Morton a...@linux-foundation.org --- mm/memcontrol.c | 71 --- mm/oom_kill.c |6 +++- 2 files changed, 66 insertions(+), 11 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0eab7d5..2df5e72 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -118,6 +118,14 @@ static const char * const mem_cgroup_events_names[] = { pgmajfault, }; +static const char * const mem_cgroup_lru_names[] = { + inactive_anon, + active_anon, + inactive_file, + active_file, + unevictable, +}; + Is this for the same strings with show_free_areas() ? /* * Per memcg event counter is incremented at every pagein/pageout. With THP, * it will be incremated by the number of pages. This counter is used for @@ -1501,8 +1509,59 @@ static void move_unlock_mem_cgroup(struct mem_cgroup *memcg, spin_unlock_irqrestore(memcg-move_lock, *flags); } +#define K(x) ((x) (PAGE_SHIFT-10)) +static void mem_cgroup_print_oom_stat(struct mem_cgroup *memcg) +{ + struct mem_cgroup *mi; + unsigned int i; + + if (!memcg-use_hierarchy memcg != root_mem_cgroup) { Why do you need to have this condition check ? + for (i = 0; i MEM_CGROUP_STAT_NSTATS; i++) { + if (i == MEM_CGROUP_STAT_SWAP !do_swap_account) + continue; + printk(KERN_CONT %s:%ldKB , mem_cgroup_stat_names[i], + K(mem_cgroup_read_stat(memcg, i))); Hm, how about using the same style with show_free_areas() ? + } + + for (i = 0; i MEM_CGROUP_EVENTS_NSTATS; i++) + printk(KERN_CONT %s:%lu , mem_cgroup_events_names[i], + mem_cgroup_read_events(memcg, i)); + I don't think EVENTS info is useful for oom. + for (i = 0; i NR_LRU_LISTS; i++) + printk(KERN_CONT %s:%luKB , mem_cgroup_lru_names[i], + K(mem_cgroup_nr_lru_pages(memcg, BIT(i; How far does your new information has different format than usual oom ? Could you show a sample and difference in changelog ? Of course, I prefer both of them has similar format. + } else { + + for (i = 0; i MEM_CGROUP_STAT_NSTATS; i++) { + long long val = 0; + + if (i == MEM_CGROUP_STAT_SWAP !do_swap_account) + continue; + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_read_stat(mi, i); + printk(KERN_CONT %s:%lldKB , mem_cgroup_stat_names[i], K(val)); + } + + for (i = 0; i MEM_CGROUP_EVENTS_NSTATS; i++) { + unsigned long long val = 0; + + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_read_events(mi, i); + printk(KERN_CONT %s:%llu , + mem_cgroup_events_names[i], val); + } + + for (i = 0; i NR_LRU_LISTS; i++) { + unsigned long long val = 0; + + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_nr_lru_pages(mi, BIT(i)); + printk(KERN_CONT %s:%lluKB , mem_cgroup_lru_names[i], K(val)); + } + } + printk(KERN_CONT \n); +} /** - * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode. * @memcg: The memory cgroup that went over limit * @p: Task that is going to be killed * @@ -1569,6 +1628,8 @@ done: res_counter_read_u64(memcg-kmem, RES_USAGE) 10, res_counter_read_u64(memcg-kmem, RES_LIMIT) 10, res_counter_read_u64(memcg-kmem, RES_FAILCNT)); + + mem_cgroup_print_oom_stat(memcg); } please put directly in print_oom_info() Thanks, -Kame -- To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH 1/2] memcg, oom: provide more precise dump info while memcg oom happening
On 11/08/2012 02:02 AM, David Rientjes wrote: On Wed, 7 Nov 2012, Sha Zhengju wrote: diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0eab7d5..2df5e72 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -118,6 +118,14 @@ static const char * const mem_cgroup_events_names[] = { pgmajfault, }; +static const char * const mem_cgroup_lru_names[] = { + inactive_anon, + active_anon, + inactive_file, + active_file, + unevictable, +}; + /* * Per memcg event counter is incremented at every pagein/pageout. With THP, * it will be incremated by the number of pages. This counter is used for @@ -1501,8 +1509,59 @@ static void move_unlock_mem_cgroup(struct mem_cgroup *memcg, spin_unlock_irqrestore(memcg-move_lock, *flags); } +#define K(x) ((x) (PAGE_SHIFT-10)) +static void mem_cgroup_print_oom_stat(struct mem_cgroup *memcg) +{ + struct mem_cgroup *mi; + unsigned int i; + + if (!memcg-use_hierarchy memcg != root_mem_cgroup) { + for (i = 0; i MEM_CGROUP_STAT_NSTATS; i++) { + if (i == MEM_CGROUP_STAT_SWAP !do_swap_account) + continue; + printk(KERN_CONT %s:%ldKB , mem_cgroup_stat_names[i], This printk isn't continuing any previous printk, so using KERN_CONT here will require a short header to be printed first (Memcg: ?) with KERN_INFO before the iterations. Yep...I think I lost it while rebasing... sorry for the stupid mistake. + K(mem_cgroup_read_stat(memcg, i))); + } + + for (i = 0; i MEM_CGROUP_EVENTS_NSTATS; i++) + printk(KERN_CONT %s:%lu , mem_cgroup_events_names[i], + mem_cgroup_read_events(memcg, i)); + + for (i = 0; i NR_LRU_LISTS; i++) + printk(KERN_CONT %s:%luKB , mem_cgroup_lru_names[i], + K(mem_cgroup_nr_lru_pages(memcg, BIT(i; + } else { + Spurious newline. Eek, is there really no way to avoid this if-conditional and just use for_each_mem_cgroup_tree() for everything and use mem_cgroup_iter_break(memcg, iter); break; for !memcg-use_hierarchy? Now I'm shamed at my bad brain of yesterday by sending this chunk out... Yes, the if-part code above is obviously unwanted, and the for_each_mem_cgroup_tree can handle hierarchy already. + for (i = 0; i MEM_CGROUP_STAT_NSTATS; i++) { + long long val = 0; + + if (i == MEM_CGROUP_STAT_SWAP !do_swap_account) + continue; + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_read_stat(mi, i); + printk(KERN_CONT %s:%lldKB , mem_cgroup_stat_names[i], K(val)); + } + + for (i = 0; i MEM_CGROUP_EVENTS_NSTATS; i++) { + unsigned long long val = 0; + + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_read_events(mi, i); + printk(KERN_CONT %s:%llu , + mem_cgroup_events_names[i], val); + } + + for (i = 0; i NR_LRU_LISTS; i++) { + unsigned long long val = 0; + + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_nr_lru_pages(mi, BIT(i)); + printk(KERN_CONT %s:%lluKB , mem_cgroup_lru_names[i], K(val)); + } + } + printk(KERN_CONT \n); +} /** - * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode. * @memcg: The memory cgroup that went over limit * @p: Task that is going to be killed * @@ -1569,6 +1628,8 @@ done: res_counter_read_u64(memcg-kmem, RES_USAGE) 10, res_counter_read_u64(memcg-kmem, RES_LIMIT) 10, res_counter_read_u64(memcg-kmem, RES_FAILCNT)); + + mem_cgroup_print_oom_stat(memcg); I think this should be folded into mem_cgroup_print_oom_info(), I don't see a need for a new function. } /* @@ -5195,14 +5256,6 @@ static int memcg_numa_stat_show(struct cgroup *cont, struct cftype *cft, } #endif /* CONFIG_NUMA */ -static const char * const mem_cgroup_lru_names[] = { - inactive_anon, - active_anon, - inactive_file, - active_file, - unevictable, -}; - static inline void mem_cgroup_lru_names_not_uptodate(void) { BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 7e9e911..4b8a6dd 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -421,8 +421,10 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, cpuset_print_task_mems_allowed(current); task_unlock(current);
Re: [PATCH 1/2] memcg, oom: provide more precise dump info while memcg oom happening
On Thu 08-11-12 20:37:45, Sha Zhengju wrote: On 11/08/2012 02:02 AM, David Rientjes wrote: On Wed, 7 Nov 2012, Sha Zhengju wrote: [..] + else + show_mem(SHOW_MEM_FILTER_NODES); Well that's disappointing if memcg == root_mem_cgroup, we'd probably like to know the global memory state to determine what the problem is. I really wondering if there is any case that can pass root_mem_cgroup down here. No it cannot because the root cgroup doesn't have any limit so we cannot trigger memcg oom killer. -- Michal Hocko SUSE Labs -- To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH 1/2] memcg, oom: provide more precise dump info while memcg oom happening
On 11/08/2012 06:17 AM, Michal Hocko wrote: On Wed 07-11-12 16:41:36, Sha Zhengju wrote: From: Sha Zhengjuhandai@taobao.com Current, when a memcg oom is happening the oom dump messages is still global state and provides few useful info for users. This patch prints more pointed memcg page statistics for memcg-oom. Signed-off-by: Sha Zhengjuhandai@taobao.com Cc: Michal Hockomho...@suse.cz Cc: KAMEZAWA Hiroyukikamezawa.hir...@jp.fujitsu.com Cc: David Rientjesrient...@google.com Cc: Andrew Mortona...@linux-foundation.org --- mm/memcontrol.c | 71 --- mm/oom_kill.c |6 +++- 2 files changed, 66 insertions(+), 11 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0eab7d5..2df5e72 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c [...] @@ -1501,8 +1509,59 @@ static void move_unlock_mem_cgroup(struct mem_cgroup *memcg, spin_unlock_irqrestore(memcg-move_lock, *flags); } +#define K(x) ((x) (PAGE_SHIFT-10)) +static void mem_cgroup_print_oom_stat(struct mem_cgroup *memcg) +{ + struct mem_cgroup *mi; + unsigned int i; + + if (!memcg-use_hierarchy memcg != root_mem_cgroup) { + for (i = 0; i MEM_CGROUP_STAT_NSTATS; i++) { + if (i == MEM_CGROUP_STAT_SWAP !do_swap_account) + continue; + printk(KERN_CONT %s:%ldKB , mem_cgroup_stat_names[i], + K(mem_cgroup_read_stat(memcg, i))); + } + + for (i = 0; i MEM_CGROUP_EVENTS_NSTATS; i++) + printk(KERN_CONT %s:%lu , mem_cgroup_events_names[i], + mem_cgroup_read_events(memcg, i)); + + for (i = 0; i NR_LRU_LISTS; i++) + printk(KERN_CONT %s:%luKB , mem_cgroup_lru_names[i], + K(mem_cgroup_nr_lru_pages(memcg, BIT(i; + } else { + + for (i = 0; i MEM_CGROUP_STAT_NSTATS; i++) { + long long val = 0; + + if (i == MEM_CGROUP_STAT_SWAP !do_swap_account) + continue; + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_read_stat(mi, i); + printk(KERN_CONT %s:%lldKB , mem_cgroup_stat_names[i], K(val)); + } + + for (i = 0; i MEM_CGROUP_EVENTS_NSTATS; i++) { + unsigned long long val = 0; + + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_read_events(mi, i); + printk(KERN_CONT %s:%llu , + mem_cgroup_events_names[i], val); + } + + for (i = 0; i NR_LRU_LISTS; i++) { + unsigned long long val = 0; + + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_nr_lru_pages(mi, BIT(i)); + printk(KERN_CONT %s:%lluKB , mem_cgroup_lru_names[i], K(val)); + } + } This is just plain ugly. for_each_mem_cgroup_tree is use_hierarchy aware and there is no need for if (use_hierarchy) part. memcg != root_mem_cgroup test doesn't make much sense as well because we call that a global oom killer ;) Yes... bitterly did I repent the patch... The else-part of for_each_mem_cgroup_tree is enough for hierarchy. I'll send a update one later. Sorry for the noise. : ( Thanks, Sha -- To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH 1/2] memcg, oom: provide more precise dump info while memcg oom happening
On 11/08/2012 05:07 PM, Kamezawa Hiroyuki wrote: (2012/11/07 17:41), Sha Zhengju wrote: From: Sha Zhengju handai@taobao.com Current, when a memcg oom is happening the oom dump messages is still global state and provides few useful info for users. This patch prints more pointed memcg page statistics for memcg-oom. Signed-off-by: Sha Zhengju handai@taobao.com Cc: Michal Hocko mho...@suse.cz Cc: KAMEZAWA Hiroyuki kamezawa.hir...@jp.fujitsu.com Cc: David Rientjes rient...@google.com Cc: Andrew Morton a...@linux-foundation.org --- mm/memcontrol.c | 71 --- mm/oom_kill.c |6 +++- 2 files changed, 66 insertions(+), 11 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0eab7d5..2df5e72 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -118,6 +118,14 @@ static const char * const mem_cgroup_events_names[] = { pgmajfault, }; +static const char * const mem_cgroup_lru_names[] = { +inactive_anon, +active_anon, +inactive_file, +active_file, +unevictable, +}; + Is this for the same strings with show_free_areas() ? I just move the declaration here from the bottom of source file to make the following use error-free. /* * Per memcg event counter is incremented at every pagein/pageout. With THP, * it will be incremated by the number of pages. This counter is used for @@ -1501,8 +1509,59 @@ static void move_unlock_mem_cgroup(struct mem_cgroup *memcg, spin_unlock_irqrestore(memcg-move_lock, *flags); } +#define K(x) ((x) (PAGE_SHIFT-10)) +static void mem_cgroup_print_oom_stat(struct mem_cgroup *memcg) +{ +struct mem_cgroup *mi; +unsigned int i; + +if (!memcg-use_hierarchy memcg != root_mem_cgroup) { Why do you need to have this condition check ? Yes, the check is unnecessary... I'll remove it next version. +for (i = 0; i MEM_CGROUP_STAT_NSTATS; i++) { +if (i == MEM_CGROUP_STAT_SWAP !do_swap_account) +continue; +printk(KERN_CONT %s:%ldKB , mem_cgroup_stat_names[i], +K(mem_cgroup_read_stat(memcg, i))); Hm, how about using the same style with show_free_areas() ? I'm also trying do so. show_free_areas() prints the memory related info in two style: one is in page unit and the oher is in KB (I've no idea why we distinct them), but I think the KB format is more readable. +} + +for (i = 0; i MEM_CGROUP_EVENTS_NSTATS; i++) +printk(KERN_CONT %s:%lu , mem_cgroup_events_names[i], +mem_cgroup_read_events(memcg, i)); + I don't think EVENTS info is useful for oom. It seems you're right. : ) +for (i = 0; i NR_LRU_LISTS; i++) +printk(KERN_CONT %s:%luKB , mem_cgroup_lru_names[i], +K(mem_cgroup_nr_lru_pages(memcg, BIT(i; How far does your new information has different format than usual oom ? Could you show a sample and difference in changelog ? Of course, I prefer both of them has similar format. The new memcg-oom info excludes global state out and prints the memcg statistics instead which seems more brevity. I'll add a sample next time. Thanks for reminding me! Thanks, Sha -- To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH 1/2] memcg, oom: provide more precise dump info while memcg oom happening
On Wed 07-11-12 16:41:36, Sha Zhengju wrote: > From: Sha Zhengju > > Current, when a memcg oom is happening the oom dump messages is still global > state and provides few useful info for users. This patch prints more pointed > memcg page statistics for memcg-oom. > > Signed-off-by: Sha Zhengju > Cc: Michal Hocko > Cc: KAMEZAWA Hiroyuki > Cc: David Rientjes > Cc: Andrew Morton > --- > mm/memcontrol.c | 71 > --- > mm/oom_kill.c |6 +++- > 2 files changed, 66 insertions(+), 11 deletions(-) > > diff --git a/mm/memcontrol.c b/mm/memcontrol.c > index 0eab7d5..2df5e72 100644 > --- a/mm/memcontrol.c > +++ b/mm/memcontrol.c [...] > @@ -1501,8 +1509,59 @@ static void move_unlock_mem_cgroup(struct mem_cgroup > *memcg, > spin_unlock_irqrestore(>move_lock, *flags); > } > > +#define K(x) ((x) << (PAGE_SHIFT-10)) > +static void mem_cgroup_print_oom_stat(struct mem_cgroup *memcg) > +{ > + struct mem_cgroup *mi; > + unsigned int i; > + > + if (!memcg->use_hierarchy && memcg != root_mem_cgroup) { > + for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { > + if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) > + continue; > + printk(KERN_CONT "%s:%ldKB ", mem_cgroup_stat_names[i], > + K(mem_cgroup_read_stat(memcg, i))); > + } > + > + for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) > + printk(KERN_CONT "%s:%lu ", mem_cgroup_events_names[i], > + mem_cgroup_read_events(memcg, i)); > + > + for (i = 0; i < NR_LRU_LISTS; i++) > + printk(KERN_CONT "%s:%luKB ", mem_cgroup_lru_names[i], > + K(mem_cgroup_nr_lru_pages(memcg, BIT(i; > + } else { > + > + for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { > + long long val = 0; > + > + if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) > + continue; > + for_each_mem_cgroup_tree(mi, memcg) > + val += mem_cgroup_read_stat(mi, i); > + printk(KERN_CONT "%s:%lldKB ", > mem_cgroup_stat_names[i], K(val)); > + } > + > + for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { > + unsigned long long val = 0; > + > + for_each_mem_cgroup_tree(mi, memcg) > + val += mem_cgroup_read_events(mi, i); > + printk(KERN_CONT "%s:%llu ", > + mem_cgroup_events_names[i], val); > + } > + > + for (i = 0; i < NR_LRU_LISTS; i++) { > + unsigned long long val = 0; > + > + for_each_mem_cgroup_tree(mi, memcg) > + val += mem_cgroup_nr_lru_pages(mi, BIT(i)); > + printk(KERN_CONT "%s:%lluKB ", mem_cgroup_lru_names[i], > K(val)); > + } > + } This is just plain ugly. for_each_mem_cgroup_tree is use_hierarchy aware and there is no need for if (use_hierarchy) part. memcg != root_mem_cgroup test doesn't make much sense as well because we call that a global oom killer ;) -- Michal Hocko SUSE Labs -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH 1/2] memcg, oom: provide more precise dump info while memcg oom happening
On Wed, 7 Nov 2012, Sha Zhengju wrote: > diff --git a/mm/memcontrol.c b/mm/memcontrol.c > index 0eab7d5..2df5e72 100644 > --- a/mm/memcontrol.c > +++ b/mm/memcontrol.c > @@ -118,6 +118,14 @@ static const char * const mem_cgroup_events_names[] = { > "pgmajfault", > }; > > +static const char * const mem_cgroup_lru_names[] = { > + "inactive_anon", > + "active_anon", > + "inactive_file", > + "active_file", > + "unevictable", > +}; > + > /* > * Per memcg event counter is incremented at every pagein/pageout. With THP, > * it will be incremated by the number of pages. This counter is used for > @@ -1501,8 +1509,59 @@ static void move_unlock_mem_cgroup(struct mem_cgroup > *memcg, > spin_unlock_irqrestore(>move_lock, *flags); > } > > +#define K(x) ((x) << (PAGE_SHIFT-10)) > +static void mem_cgroup_print_oom_stat(struct mem_cgroup *memcg) > +{ > + struct mem_cgroup *mi; > + unsigned int i; > + > + if (!memcg->use_hierarchy && memcg != root_mem_cgroup) { > + for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { > + if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) > + continue; > + printk(KERN_CONT "%s:%ldKB ", mem_cgroup_stat_names[i], This printk isn't continuing any previous printk, so using KERN_CONT here will require a short header to be printed first ("Memcg: "?) with KERN_INFO before the iterations. > + K(mem_cgroup_read_stat(memcg, i))); > + } > + > + for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) > + printk(KERN_CONT "%s:%lu ", mem_cgroup_events_names[i], > + mem_cgroup_read_events(memcg, i)); > + > + for (i = 0; i < NR_LRU_LISTS; i++) > + printk(KERN_CONT "%s:%luKB ", mem_cgroup_lru_names[i], > + K(mem_cgroup_nr_lru_pages(memcg, BIT(i; > + } else { > + Spurious newline. Eek, is there really no way to avoid this if-conditional and just use for_each_mem_cgroup_tree() for everything and use mem_cgroup_iter_break(memcg, iter); break; for !memcg->use_hierarchy? > + for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { > + long long val = 0; > + > + if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) > + continue; > + for_each_mem_cgroup_tree(mi, memcg) > + val += mem_cgroup_read_stat(mi, i); > + printk(KERN_CONT "%s:%lldKB ", > mem_cgroup_stat_names[i], K(val)); > + } > + > + for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { > + unsigned long long val = 0; > + > + for_each_mem_cgroup_tree(mi, memcg) > + val += mem_cgroup_read_events(mi, i); > + printk(KERN_CONT "%s:%llu ", > + mem_cgroup_events_names[i], val); > + } > + > + for (i = 0; i < NR_LRU_LISTS; i++) { > + unsigned long long val = 0; > + > + for_each_mem_cgroup_tree(mi, memcg) > + val += mem_cgroup_nr_lru_pages(mi, BIT(i)); > + printk(KERN_CONT "%s:%lluKB ", mem_cgroup_lru_names[i], > K(val)); > + } > + } > + printk(KERN_CONT "\n"); > +} > /** > - * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in > read mode. > * @memcg: The memory cgroup that went over limit > * @p: Task that is going to be killed > * > @@ -1569,6 +1628,8 @@ done: > res_counter_read_u64(>kmem, RES_USAGE) >> 10, > res_counter_read_u64(>kmem, RES_LIMIT) >> 10, > res_counter_read_u64(>kmem, RES_FAILCNT)); > + > + mem_cgroup_print_oom_stat(memcg); I think this should be folded into mem_cgroup_print_oom_info(), I don't see a need for a new function. > } > > /* > @@ -5195,14 +5256,6 @@ static int memcg_numa_stat_show(struct cgroup *cont, > struct cftype *cft, > } > #endif /* CONFIG_NUMA */ > > -static const char * const mem_cgroup_lru_names[] = { > - "inactive_anon", > - "active_anon", > - "inactive_file", > - "active_file", > - "unevictable", > -}; > - > static inline void mem_cgroup_lru_names_not_uptodate(void) > { > BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); > diff --git a/mm/oom_kill.c b/mm/oom_kill.c > index 7e9e911..4b8a6dd 100644 > --- a/mm/oom_kill.c > +++ b/mm/oom_kill.c > @@ -421,8 +421,10 @@ static void dump_header(struct task_struct *p, gfp_t > gfp_mask, int order, > cpuset_print_task_mems_allowed(current); > task_unlock(current); > dump_stack(); > - mem_cgroup_print_oom_info(memcg, p); > - show_mem(SHOW_MEM_FILTER_NODES); > + if (memcg) > + mem_cgroup_print_oom_info(memcg, p);
Re: [PATCH 1/2] memcg, oom: provide more precise dump info while memcg oom happening
On Wed, 7 Nov 2012, Sha Zhengju wrote: diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0eab7d5..2df5e72 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -118,6 +118,14 @@ static const char * const mem_cgroup_events_names[] = { pgmajfault, }; +static const char * const mem_cgroup_lru_names[] = { + inactive_anon, + active_anon, + inactive_file, + active_file, + unevictable, +}; + /* * Per memcg event counter is incremented at every pagein/pageout. With THP, * it will be incremated by the number of pages. This counter is used for @@ -1501,8 +1509,59 @@ static void move_unlock_mem_cgroup(struct mem_cgroup *memcg, spin_unlock_irqrestore(memcg-move_lock, *flags); } +#define K(x) ((x) (PAGE_SHIFT-10)) +static void mem_cgroup_print_oom_stat(struct mem_cgroup *memcg) +{ + struct mem_cgroup *mi; + unsigned int i; + + if (!memcg-use_hierarchy memcg != root_mem_cgroup) { + for (i = 0; i MEM_CGROUP_STAT_NSTATS; i++) { + if (i == MEM_CGROUP_STAT_SWAP !do_swap_account) + continue; + printk(KERN_CONT %s:%ldKB , mem_cgroup_stat_names[i], This printk isn't continuing any previous printk, so using KERN_CONT here will require a short header to be printed first (Memcg: ?) with KERN_INFO before the iterations. + K(mem_cgroup_read_stat(memcg, i))); + } + + for (i = 0; i MEM_CGROUP_EVENTS_NSTATS; i++) + printk(KERN_CONT %s:%lu , mem_cgroup_events_names[i], + mem_cgroup_read_events(memcg, i)); + + for (i = 0; i NR_LRU_LISTS; i++) + printk(KERN_CONT %s:%luKB , mem_cgroup_lru_names[i], + K(mem_cgroup_nr_lru_pages(memcg, BIT(i; + } else { + Spurious newline. Eek, is there really no way to avoid this if-conditional and just use for_each_mem_cgroup_tree() for everything and use mem_cgroup_iter_break(memcg, iter); break; for !memcg-use_hierarchy? + for (i = 0; i MEM_CGROUP_STAT_NSTATS; i++) { + long long val = 0; + + if (i == MEM_CGROUP_STAT_SWAP !do_swap_account) + continue; + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_read_stat(mi, i); + printk(KERN_CONT %s:%lldKB , mem_cgroup_stat_names[i], K(val)); + } + + for (i = 0; i MEM_CGROUP_EVENTS_NSTATS; i++) { + unsigned long long val = 0; + + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_read_events(mi, i); + printk(KERN_CONT %s:%llu , + mem_cgroup_events_names[i], val); + } + + for (i = 0; i NR_LRU_LISTS; i++) { + unsigned long long val = 0; + + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_nr_lru_pages(mi, BIT(i)); + printk(KERN_CONT %s:%lluKB , mem_cgroup_lru_names[i], K(val)); + } + } + printk(KERN_CONT \n); +} /** - * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode. * @memcg: The memory cgroup that went over limit * @p: Task that is going to be killed * @@ -1569,6 +1628,8 @@ done: res_counter_read_u64(memcg-kmem, RES_USAGE) 10, res_counter_read_u64(memcg-kmem, RES_LIMIT) 10, res_counter_read_u64(memcg-kmem, RES_FAILCNT)); + + mem_cgroup_print_oom_stat(memcg); I think this should be folded into mem_cgroup_print_oom_info(), I don't see a need for a new function. } /* @@ -5195,14 +5256,6 @@ static int memcg_numa_stat_show(struct cgroup *cont, struct cftype *cft, } #endif /* CONFIG_NUMA */ -static const char * const mem_cgroup_lru_names[] = { - inactive_anon, - active_anon, - inactive_file, - active_file, - unevictable, -}; - static inline void mem_cgroup_lru_names_not_uptodate(void) { BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 7e9e911..4b8a6dd 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -421,8 +421,10 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, cpuset_print_task_mems_allowed(current); task_unlock(current); dump_stack(); - mem_cgroup_print_oom_info(memcg, p); - show_mem(SHOW_MEM_FILTER_NODES); + if (memcg) + mem_cgroup_print_oom_info(memcg, p); mem_cgroup_print_oom_info() already returns immediately for !memcg, so I'm not sure why this change is made. + else +
Re: [PATCH 1/2] memcg, oom: provide more precise dump info while memcg oom happening
On Wed 07-11-12 16:41:36, Sha Zhengju wrote: From: Sha Zhengju handai@taobao.com Current, when a memcg oom is happening the oom dump messages is still global state and provides few useful info for users. This patch prints more pointed memcg page statistics for memcg-oom. Signed-off-by: Sha Zhengju handai@taobao.com Cc: Michal Hocko mho...@suse.cz Cc: KAMEZAWA Hiroyuki kamezawa.hir...@jp.fujitsu.com Cc: David Rientjes rient...@google.com Cc: Andrew Morton a...@linux-foundation.org --- mm/memcontrol.c | 71 --- mm/oom_kill.c |6 +++- 2 files changed, 66 insertions(+), 11 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0eab7d5..2df5e72 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c [...] @@ -1501,8 +1509,59 @@ static void move_unlock_mem_cgroup(struct mem_cgroup *memcg, spin_unlock_irqrestore(memcg-move_lock, *flags); } +#define K(x) ((x) (PAGE_SHIFT-10)) +static void mem_cgroup_print_oom_stat(struct mem_cgroup *memcg) +{ + struct mem_cgroup *mi; + unsigned int i; + + if (!memcg-use_hierarchy memcg != root_mem_cgroup) { + for (i = 0; i MEM_CGROUP_STAT_NSTATS; i++) { + if (i == MEM_CGROUP_STAT_SWAP !do_swap_account) + continue; + printk(KERN_CONT %s:%ldKB , mem_cgroup_stat_names[i], + K(mem_cgroup_read_stat(memcg, i))); + } + + for (i = 0; i MEM_CGROUP_EVENTS_NSTATS; i++) + printk(KERN_CONT %s:%lu , mem_cgroup_events_names[i], + mem_cgroup_read_events(memcg, i)); + + for (i = 0; i NR_LRU_LISTS; i++) + printk(KERN_CONT %s:%luKB , mem_cgroup_lru_names[i], + K(mem_cgroup_nr_lru_pages(memcg, BIT(i; + } else { + + for (i = 0; i MEM_CGROUP_STAT_NSTATS; i++) { + long long val = 0; + + if (i == MEM_CGROUP_STAT_SWAP !do_swap_account) + continue; + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_read_stat(mi, i); + printk(KERN_CONT %s:%lldKB , mem_cgroup_stat_names[i], K(val)); + } + + for (i = 0; i MEM_CGROUP_EVENTS_NSTATS; i++) { + unsigned long long val = 0; + + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_read_events(mi, i); + printk(KERN_CONT %s:%llu , + mem_cgroup_events_names[i], val); + } + + for (i = 0; i NR_LRU_LISTS; i++) { + unsigned long long val = 0; + + for_each_mem_cgroup_tree(mi, memcg) + val += mem_cgroup_nr_lru_pages(mi, BIT(i)); + printk(KERN_CONT %s:%lluKB , mem_cgroup_lru_names[i], K(val)); + } + } This is just plain ugly. for_each_mem_cgroup_tree is use_hierarchy aware and there is no need for if (use_hierarchy) part. memcg != root_mem_cgroup test doesn't make much sense as well because we call that a global oom killer ;) -- Michal Hocko SUSE Labs -- To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/