This uses the new annotation to determine if an mm has mmu notifiers with
blockable invalidate range callbacks to avoid oom reaping.  Otherwise, the
callbacks are used around unmap_page_range().

Signed-off-by: David Rientjes <rient...@google.com>
---
 mm/oom_kill.c | 21 +++++++++++----------
 1 file changed, 11 insertions(+), 10 deletions(-)

diff --git a/mm/oom_kill.c b/mm/oom_kill.c
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -514,15 +514,12 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, 
struct mm_struct *mm)
        }
 
        /*
-        * If the mm has notifiers then we would need to invalidate them around
-        * unmap_page_range and that is risky because notifiers can sleep and
-        * what they do is basically undeterministic.  So let's have a short
+        * If the mm has invalidate_{start,end}() notifiers that could block,
         * sleep to give the oom victim some more time.
         * TODO: we really want to get rid of this ugly hack and make sure that
-        * notifiers cannot block for unbounded amount of time and add
-        * mmu_notifier_invalidate_range_{start,end} around unmap_page_range
+        * notifiers cannot block for unbounded amount of time
         */
-       if (mm_has_notifiers(mm)) {
+       if (mm_has_blockable_invalidate_notifiers(mm)) {
                up_read(&mm->mmap_sem);
                schedule_timeout_idle(HZ);
                goto unlock_oom;
@@ -565,10 +562,14 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, 
struct mm_struct *mm)
                 * count elevated without a good reason.
                 */
                if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
-                       tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end);
-                       unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
-                                        NULL);
-                       tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end);
+                       const unsigned long start = vma->vm_start;
+                       const unsigned long end = vma->vm_end;
+
+                       tlb_gather_mmu(&tlb, mm, start, end);
+                       mmu_notifier_invalidate_range_start(mm, start, end);
+                       unmap_page_range(&tlb, vma, start, end, NULL);
+                       mmu_notifier_invalidate_range_end(mm, start, end);
+                       tlb_finish_mmu(&tlb, start, end);
                }
        }
        pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, 
file-rss:%lukB, shmem-rss:%lukB\n",

Reply via email to