Re: [PATCH v4 1/2] mm/memory-failure.c clean up around tk pre-allocation

2019-08-06 Thread Dan Williams
Hi Jane, looks good. Checkpatch prompts me to point out a couple more fixups:

This patch is titled:

"mm/memory-failure.c clean up..."

...to match the second patch it should be:

"mm/memory-failure: clean up..."

On Tue, Aug 6, 2019 at 10:26 AM Jane Chu  wrote:
>
> add_to_kill() expects the first 'tk' to be pre-allocated, it makes
> subsequent allocations on need basis, this makes the code a bit
> difficult to read. Move all the allocation internal to add_to_kill()
> and drop the **tk argument.
>
> Signed-off-by: Jane Chu 
> ---
>  mm/memory-failure.c | 40 +---
>  1 file changed, 13 insertions(+), 27 deletions(-)
>
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index d9cc660..51d5b20 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -304,25 +304,19 @@ static unsigned long dev_pagemap_mapping_shift(struct 
> page *page,
>  /*
>   * Schedule a process for later kill.
>   * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
> - * TBD would GFP_NOIO be enough?
>   */
>  static void add_to_kill(struct task_struct *tsk, struct page *p,
>struct vm_area_struct *vma,
> -  struct list_head *to_kill,
> -  struct to_kill **tkc)
> +  struct list_head *to_kill)
>  {
> struct to_kill *tk;
>
> -   if (*tkc) {
> -   tk = *tkc;
> -   *tkc = NULL;
> -   } else {
> -   tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
> -   if (!tk) {
> -   pr_err("Memory failure: Out of memory while machine 
> check handling\n");
> -   return;
> -   }
> +   tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
> +   if (!tk) {
> +   pr_err("Memory failure: Out of memory while machine check 
> handling\n");
> +   return;
> }

checkpatch points out that this error message can be deleted.
According to the commit that added this check (ebfdc40969f2
"checkpatch: attempt to find unnecessary 'out of memory' messages")
the kernel already prints a message and a backtrace on these events,
so seems like a decent additional cleanup to fold.

With those fixups you can add:

Reviewed-by: Dan Williams 

...along with Naoya's ack.

I would Cc: Andrew Morton on the v5 posting of these as he's the
upstream path for changes to this file.


[PATCH v4 1/2] mm/memory-failure.c clean up around tk pre-allocation

2019-08-06 Thread Jane Chu
add_to_kill() expects the first 'tk' to be pre-allocated, it makes
subsequent allocations on need basis, this makes the code a bit
difficult to read. Move all the allocation internal to add_to_kill()
and drop the **tk argument.

Signed-off-by: Jane Chu 
---
 mm/memory-failure.c | 40 +---
 1 file changed, 13 insertions(+), 27 deletions(-)

diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index d9cc660..51d5b20 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -304,25 +304,19 @@ static unsigned long dev_pagemap_mapping_shift(struct 
page *page,
 /*
  * Schedule a process for later kill.
  * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
- * TBD would GFP_NOIO be enough?
  */
 static void add_to_kill(struct task_struct *tsk, struct page *p,
   struct vm_area_struct *vma,
-  struct list_head *to_kill,
-  struct to_kill **tkc)
+  struct list_head *to_kill)
 {
struct to_kill *tk;
 
-   if (*tkc) {
-   tk = *tkc;
-   *tkc = NULL;
-   } else {
-   tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
-   if (!tk) {
-   pr_err("Memory failure: Out of memory while machine 
check handling\n");
-   return;
-   }
+   tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
+   if (!tk) {
+   pr_err("Memory failure: Out of memory while machine check 
handling\n");
+   return;
}
+
tk->addr = page_address_in_vma(p, vma);
tk->addr_valid = 1;
if (is_zone_device_page(p))
@@ -341,6 +335,7 @@ static void add_to_kill(struct task_struct *tsk, struct 
page *p,
page_to_pfn(p), tsk->comm);
tk->addr_valid = 0;
}
+
get_task_struct(tsk);
tk->tsk = tsk;
list_add_tail(>nd, to_kill);
@@ -432,7 +427,7 @@ static struct task_struct *task_early_kill(struct 
task_struct *tsk,
  * Collect processes when the error hit an anonymous page.
  */
 static void collect_procs_anon(struct page *page, struct list_head *to_kill,
- struct to_kill **tkc, int force_early)
+   int force_early)
 {
struct vm_area_struct *vma;
struct task_struct *tsk;
@@ -457,7 +452,7 @@ static void collect_procs_anon(struct page *page, struct 
list_head *to_kill,
if (!page_mapped_in_vma(page, vma))
continue;
if (vma->vm_mm == t->mm)
-   add_to_kill(t, page, vma, to_kill, tkc);
+   add_to_kill(t, page, vma, to_kill);
}
}
read_unlock(_lock);
@@ -468,7 +463,7 @@ static void collect_procs_anon(struct page *page, struct 
list_head *to_kill,
  * Collect processes when the error hit a file mapped page.
  */
 static void collect_procs_file(struct page *page, struct list_head *to_kill,
- struct to_kill **tkc, int force_early)
+   int force_early)
 {
struct vm_area_struct *vma;
struct task_struct *tsk;
@@ -492,7 +487,7 @@ static void collect_procs_file(struct page *page, struct 
list_head *to_kill,
 * to be informed of all such data corruptions.
 */
if (vma->vm_mm == t->mm)
-   add_to_kill(t, page, vma, to_kill, tkc);
+   add_to_kill(t, page, vma, to_kill);
}
}
read_unlock(_lock);
@@ -501,26 +496,17 @@ static void collect_procs_file(struct page *page, struct 
list_head *to_kill,
 
 /*
  * Collect the processes who have the corrupted page mapped to kill.
- * This is done in two steps for locking reasons.
- * First preallocate one tokill structure outside the spin locks,
- * so that we can kill at least one process reasonably reliable.
  */
 static void collect_procs(struct page *page, struct list_head *tokill,
int force_early)
 {
-   struct to_kill *tk;
-
if (!page->mapping)
return;
 
-   tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
-   if (!tk)
-   return;
if (PageAnon(page))
-   collect_procs_anon(page, tokill, , force_early);
+   collect_procs_anon(page, tokill, force_early);
else
-   collect_procs_file(page, tokill, , force_early);
-   kfree(tk);
+   collect_procs_file(page, tokill, force_early);
 }
 
 static const char *action_name[] = {
-- 
1.8.3.1