dump_tasks() traverses all the existing processes even for the memcg OOM
context which is not only unnecessary but also wasteful.  This imposes a
long RCU critical section even from a contained context which can be quite
disruptive.

Change dump_tasks() to be aligned with select_bad_process and use
mem_cgroup_scan_tasks to selectively traverse only processes of the target
memcg hierarchy during memcg OOM.

Signed-off-by: Shakeel Butt <shake...@google.com>
Acked-by: Michal Hocko <mho...@suse.com>
Acked-by: Roman Gushchin <g...@fb.com>
Cc: Johannes Weiner <han...@cmpxchg.org>
Cc: Tetsuo Handa <penguin-ker...@i-love.sakura.ne.jp>
Cc: Vladimir Davydov <vdavydov....@gmail.com>
Cc: David Rientjes <rient...@google.com>
Cc: KOSAKI Motohiro <kosaki.motoh...@jp.fujitsu.com>
Cc: Paul Jackson <p...@sgi.com>
Cc: Nick Piggin <npig...@gmail.com>
Cc: Andrew Morton <a...@linux-foundation.org>
---
Changelog since v3:
- None

Changelog since v2:
- Updated the commit message.

Changelog since v1:
- Divide the patch into two patches.

 mm/oom_kill.c | 68 ++++++++++++++++++++++++++++++---------------------
 1 file changed, 40 insertions(+), 28 deletions(-)

diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 085abc91024d..a940d2aa92d6 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -380,10 +380,38 @@ static void select_bad_process(struct oom_control *oc)
        }
 }
 
+static int dump_task(struct task_struct *p, void *arg)
+{
+       struct oom_control *oc = arg;
+       struct task_struct *task;
+
+       if (oom_unkillable_task(p, NULL, oc->nodemask))
+               return 0;
+
+       task = find_lock_task_mm(p);
+       if (!task) {
+               /*
+                * This is a kthread or all of p's threads have already
+                * detached their mm's.  There's no need to report
+                * them; they can't be oom killed anyway.
+                */
+               return 0;
+       }
+
+       pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu         %5hd %s\n",
+               task->pid, from_kuid(&init_user_ns, task_uid(task)),
+               task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
+               mm_pgtables_bytes(task->mm),
+               get_mm_counter(task->mm, MM_SWAPENTS),
+               task->signal->oom_score_adj, task->comm);
+       task_unlock(task);
+
+       return 0;
+}
+
 /**
  * dump_tasks - dump current memory state of all system tasks
- * @memcg: current's memory controller, if constrained
- * @nodemask: nodemask passed to page allocator for mempolicy ooms
+ * @oc: pointer to struct oom_control
  *
  * Dumps the current memory state of all eligible tasks.  Tasks not in the same
  * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
@@ -391,37 +419,21 @@ static void select_bad_process(struct oom_control *oc)
  * State information includes task's pid, uid, tgid, vm size, rss,
  * pgtables_bytes, swapents, oom_score_adj value, and name.
  */
-static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
+static void dump_tasks(struct oom_control *oc)
 {
-       struct task_struct *p;
-       struct task_struct *task;
-
        pr_info("Tasks state (memory values in pages):\n");
        pr_info("[  pid  ]   uid  tgid total_vm      rss pgtables_bytes 
swapents oom_score_adj name\n");
-       rcu_read_lock();
-       for_each_process(p) {
-               if (oom_unkillable_task(p, memcg, nodemask))
-                       continue;
 
-               task = find_lock_task_mm(p);
-               if (!task) {
-                       /*
-                        * This is a kthread or all of p's threads have already
-                        * detached their mm's.  There's no need to report
-                        * them; they can't be oom killed anyway.
-                        */
-                       continue;
-               }
+       if (is_memcg_oom(oc))
+               mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
+       else {
+               struct task_struct *p;
 
-               pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu         %5hd %s\n",
-                       task->pid, from_kuid(&init_user_ns, task_uid(task)),
-                       task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
-                       mm_pgtables_bytes(task->mm),
-                       get_mm_counter(task->mm, MM_SWAPENTS),
-                       task->signal->oom_score_adj, task->comm);
-               task_unlock(task);
+               rcu_read_lock();
+               for_each_process(p)
+                       dump_task(p, oc);
+               rcu_read_unlock();
        }
-       rcu_read_unlock();
 }
 
 static void dump_oom_summary(struct oom_control *oc, struct task_struct 
*victim)
@@ -453,7 +465,7 @@ static void dump_header(struct oom_control *oc, struct 
task_struct *p)
                        dump_unreclaimable_slab();
        }
        if (sysctl_oom_dump_tasks)
-               dump_tasks(oc->memcg, oc->nodemask);
+               dump_tasks(oc);
        if (p)
                dump_oom_summary(oc, p);
 }
-- 
2.22.0.410.gd8fdbe21b5-goog

Reply via email to