mmu_notifier_invalidate_page() is now be call from under the spinlock. Add a call to mmu_notifier_invalidate_range() for user that need to be able to sleep.
Relevent threads: https://lkml.kernel.org/r/[email protected] https://lkml.kernel.org/r/[email protected] https://marc.info/?l=kvm&m=150327081325160&w=2 Signed-off-by: Jérôme Glisse <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Bernhard Held <[email protected]> Cc: Adam Borowski <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Radim Krčmář <[email protected]> Cc: Wanpeng Li <[email protected]> Cc: Paolo Bonzini <[email protected]> Cc: Takashi Iwai <[email protected]> Cc: Nadav Amit <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: axie <[email protected]> Cc: Andrew Morton <[email protected]> --- mm/rmap.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/mm/rmap.c b/mm/rmap.c index c8993c63eb25..06792e28093c 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -887,6 +887,8 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, .address = address, .flags = PVMW_SYNC, }; + unsigned long start = address, end = address; + bool invalidate = false; int *cleaned = arg; while (page_vma_mapped_walk(&pvmw)) { @@ -927,10 +929,16 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, if (ret) { mmu_notifier_invalidate_page(vma->vm_mm, address); + /* range is exclusive */ + end = address + PAGE_SIZE; + invalidate = true; (*cleaned)++; } } + if (invalidate) + mmu_notifier_invalidate_range(vma->vm_mm, start, end); + return true; } @@ -1323,8 +1331,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, }; pte_t pteval; struct page *subpage; - bool ret = true; + bool ret = true, invalidate = false; enum ttu_flags flags = (enum ttu_flags)arg; + unsigned long start = address, end = address; /* munlock has nothing to gain from examining un-locked vmas */ if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) @@ -1491,7 +1500,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, page_remove_rmap(subpage, PageHuge(page)); put_page(page); mmu_notifier_invalidate_page(mm, address); + /* range is exclusive */ + end = address + PAGE_SIZE; + invalidate = true; } + + if (invalidate) + mmu_notifier_invalidate_range(vma->vm_mm, start, end); + return ret; } -- 2.13.5

