From: Will Deacon <w...@kernel.org>

Since commit 0758cd830494 ("asm-generic/tlb: avoid potential double
flush"), TLB invalidation is elided in tlb_finish_mmu() if no entries
were batched via the tlb_remove_*() functions. Consequently, the
page-table modifications performed by clear_refs_write() in response to
a write to /proc/<pid>/clear_refs do not perform TLB invalidation.
Although this is fine when simply aging the ptes, in the case of
clearing the "soft-dirty" state we can end up with entries where
pte_write() is false, yet a writable mapping remains in the TLB.

Fix this by avoiding the mmu_gather API altogether: managing both the
'tlb_flush_pending' flag on the 'mm_struct' and explicit TLB
invalidation for the sort-dirty path, much like mprotect() does already.

Fixes: 0758cd830494 ("asm-generic/tlb: avoid potential double flush")
Signed-off-by: Will Deacon <w...@kernel.org>
Signed-off-by: Andrea Arcangeli <aarca...@redhat.com>
---
 fs/proc/task_mmu.c | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index ee5a235b3056..a127262ba517 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1189,7 +1189,6 @@ static ssize_t clear_refs_write(struct file *file, const 
char __user *buf,
        struct mm_struct *mm;
        struct vm_area_struct *vma;
        enum clear_refs_types type;
-       struct mmu_gather tlb;
        int itype;
        int rv;
 
@@ -1234,7 +1233,6 @@ static ssize_t clear_refs_write(struct file *file, const 
char __user *buf,
                        count = -EINTR;
                        goto out_mm;
                }
-               tlb_gather_mmu(&tlb, mm, 0, -1);
                if (type == CLEAR_REFS_SOFT_DIRTY) {
                        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                                if (!(vma->vm_flags & VM_SOFTDIRTY))
@@ -1252,15 +1250,18 @@ static ssize_t clear_refs_write(struct file *file, 
const char __user *buf,
                                break;
                        }
 
+                       inc_tlb_flush_pending(mm);
                        mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
                                                0, NULL, mm, 0, -1UL);
                        mmu_notifier_invalidate_range_start(&range);
                }
                walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops,
                                &cp);
-               if (type == CLEAR_REFS_SOFT_DIRTY)
+               if (type == CLEAR_REFS_SOFT_DIRTY) {
                        mmu_notifier_invalidate_range_end(&range);
-               tlb_finish_mmu(&tlb, 0, -1);
+                       flush_tlb_mm(mm);
+                       dec_tlb_flush_pending(mm);
+               }
                mmap_read_unlock(mm);
 out_mm:
                mmput(mm);

Reply via email to