Currently memory error handler handles action optional errors in the deferred
manner by default. And if a recovery aware application wants to handle it
immediately, it can do it by setting PF_MCE_EARLY flag. However, such signal
can be sent only to the main thread, so it's problematic if the application
wants to have a dedicated thread to handler such signals.

So this patch adds dedicated thread support to memory error handler. We have
PF_MCE_EARLY flags for each thread separately, so with this patch AO signal
is sent to the thread with PF_MCE_EARLY flag set, not the main thread. If
you want to implement a dedicated thread, you call prctl() to set PF_MCE_EARLY
on the thread.

Memory error handler collects processes to be killed, so this patch lets it
check PF_MCE_EARLY flag on each thread in the collecting routines.

No behavioral change for all non-early kill cases.

ChangeLog:
- document more specifically
- add parenthesis in find_early_kill_thread()
- move position of find_early_kill_thread() and task_early_kill()

Signed-off-by: Naoya Horiguchi <n-horigu...@ah.jp.nec.com>
Reviewed-by: Tony Luck <tony.l...@intel.com>
Cc: Kamil Iskra <is...@mcs.anl.gov>
Cc: Andi Kleen <a...@firstfloor.org>
Cc: Borislav Petkov <b...@suse.de>
Cc: Chen Gong <gong.c...@linux.jf.intel.com>
---
 Documentation/vm/hwpoison.txt |  5 ++++
 mm/memory-failure.c           | 58 ++++++++++++++++++++++++++++++++-----------
 2 files changed, 48 insertions(+), 15 deletions(-)

diff --git mmotm-2014-05-21-16-57.orig/Documentation/vm/hwpoison.txt 
mmotm-2014-05-21-16-57/Documentation/vm/hwpoison.txt
index 550068466605..6ae89a9edf2a 100644
--- mmotm-2014-05-21-16-57.orig/Documentation/vm/hwpoison.txt
+++ mmotm-2014-05-21-16-57/Documentation/vm/hwpoison.txt
@@ -84,6 +84,11 @@ PR_MCE_KILL
                PR_MCE_KILL_EARLY: Early kill
                PR_MCE_KILL_LATE:  Late kill
                PR_MCE_KILL_DEFAULT: Use system global default
+       Note that if you want to have a dedicated thread which handles
+       the SIGBUS(BUS_MCEERR_AO) on behalf of the process, you should
+       call prctl(PR_MCE_KILL_EARLY) on the designated thread. Otherwise,
+       the SIGBUS is sent to the main thread.
+
 PR_MCE_KILL_GET
        return current mode
 
diff --git mmotm-2014-05-21-16-57.orig/mm/memory-failure.c 
mmotm-2014-05-21-16-57/mm/memory-failure.c
index fbcdb1d54c55..9751e19ab13b 100644
--- mmotm-2014-05-21-16-57.orig/mm/memory-failure.c
+++ mmotm-2014-05-21-16-57/mm/memory-failure.c
@@ -380,15 +380,44 @@ static void kill_procs(struct list_head *to_kill, int 
forcekill, int trapno,
        }
 }
 
-static int task_early_kill(struct task_struct *tsk, int force_early)
+/*
+ * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
+ * on behalf of the thread group. Return task_struct of the (first found)
+ * dedicated thread if found, and return NULL otherwise.
+ */
+static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
+{
+       struct task_struct *t;
+       rcu_read_lock();
+       for_each_thread(tsk, t)
+               if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY))
+                       goto found;
+       t = NULL;
+found:
+       rcu_read_unlock();
+       return t;
+}
+
+/*
+ * Determine whether a given process is "early kill" process which expects
+ * to be signaled when some page under the process is hwpoisoned.
+ * Return task_struct of the dedicated thread (main thread unless explicitly
+ * specified) if the process is "early kill," and otherwise returns NULL.
+ */
+static struct task_struct *task_early_kill(struct task_struct *tsk,
+                                          int force_early)
 {
+       struct task_struct *t;
        if (!tsk->mm)
-               return 0;
+               return NULL;
        if (force_early)
-               return 1;
-       if (tsk->flags & PF_MCE_PROCESS)
-               return !!(tsk->flags & PF_MCE_EARLY);
-       return sysctl_memory_failure_early_kill;
+               return tsk;
+       t = find_early_kill_thread(tsk);
+       if (t)
+               return t;
+       if (sysctl_memory_failure_early_kill)
+               return tsk;
+       return NULL;
 }
 
 /*
@@ -410,16 +439,16 @@ static void collect_procs_anon(struct page *page, struct 
list_head *to_kill,
        read_lock(&tasklist_lock);
        for_each_process (tsk) {
                struct anon_vma_chain *vmac;
-
-               if (!task_early_kill(tsk, force_early))
+               struct task_struct *t = task_early_kill(tsk, force_early);
+               if (!t)
                        continue;
                anon_vma_interval_tree_foreach(vmac, &av->rb_root,
                                               pgoff, pgoff) {
                        vma = vmac->vma;
                        if (!page_mapped_in_vma(page, vma))
                                continue;
-                       if (vma->vm_mm == tsk->mm)
-                               add_to_kill(tsk, page, vma, to_kill, tkc);
+                       if (vma->vm_mm == t->mm)
+                               add_to_kill(t, page, vma, to_kill, tkc);
                }
        }
        read_unlock(&tasklist_lock);
@@ -440,10 +469,9 @@ static void collect_procs_file(struct page *page, struct 
list_head *to_kill,
        read_lock(&tasklist_lock);
        for_each_process(tsk) {
                pgoff_t pgoff = page_pgoff(page);
-
-               if (!task_early_kill(tsk, force_early))
+               struct task_struct *t = task_early_kill(tsk, force_early);
+               if (!t)
                        continue;
-
                vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
                                      pgoff) {
                        /*
@@ -453,8 +481,8 @@ static void collect_procs_file(struct page *page, struct 
list_head *to_kill,
                         * Assume applications who requested early kill want
                         * to be informed of all such data corruptions.
                         */
-                       if (vma->vm_mm == tsk->mm)
-                               add_to_kill(tsk, page, vma, to_kill, tkc);
+                       if (vma->vm_mm == t->mm)
+                               add_to_kill(t, page, vma, to_kill, tkc);
                }
        }
        read_unlock(&tasklist_lock);
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to