From: Nadav Amit <na...@vmware.com>

Currently, using mprotect() to unprotect a memory region or uffd to
unprotect a memory region causes a TLB flush. At least on x86, as
protection is promoted, no TLB flush is needed.

Add an arch-specific pte_may_need_flush() which tells whether a TLB
flush is needed based on the old PTE and the new one. Implement an x86
pte_may_need_flush().

For x86, besides the simple logic that PTE protection promotion or
changes of software bits does require a flush, also add logic that
considers the dirty-bit. If the dirty-bit is clear and write-protect is
set, no TLB flush is needed, as x86 updates the dirty-bit atomically
on write, and if the bit is clear, the PTE is reread.

Signed-off-by: Nadav Amit <na...@vmware.com>
Cc: Andrea Arcangeli <aarca...@redhat.com>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Andy Lutomirski <l...@kernel.org>
Cc: Dave Hansen <dave.han...@linux.intel.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Will Deacon <w...@kernel.org>
Cc: Yu Zhao <yuz...@google.com>
Cc: Nick Piggin <npig...@gmail.com>
Cc: x...@kernel.org
---
 arch/x86/include/asm/tlbflush.h | 44 +++++++++++++++++++++++++++++++++
 include/asm-generic/tlb.h       |  4 +++
 mm/mprotect.c                   |  3 ++-
 3 files changed, 50 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 8c87a2e0b660..a617dc0a9b06 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -255,6 +255,50 @@ static inline void arch_tlbbatch_add_mm(struct 
arch_tlbflush_unmap_batch *batch,
 
 extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
 
+static inline bool pte_may_need_flush(pte_t oldpte, pte_t newpte)
+{
+       const pteval_t ignore_mask = _PAGE_SOFTW1 | _PAGE_SOFTW2 |
+                                    _PAGE_SOFTW3 | _PAGE_ACCESSED;
+       const pteval_t enable_mask = _PAGE_RW | _PAGE_DIRTY | _PAGE_GLOBAL;
+       pteval_t oldval = pte_val(oldpte);
+       pteval_t newval = pte_val(newpte);
+       pteval_t diff = oldval ^ newval;
+       pteval_t disable_mask = 0;
+
+       if (IS_ENABLED(CONFIG_X86_64) || IS_ENABLED(CONFIG_X86_PAE))
+               disable_mask = _PAGE_NX;
+
+       /* new is non-present: need only if old is present */
+       if (pte_none(newpte))
+               return !pte_none(oldpte);
+
+       /*
+        * If, excluding the ignored bits, only RW and dirty are cleared and the
+        * old PTE does not have the dirty-bit set, we can avoid a flush. This
+        * is possible since x86 architecture set the dirty bit atomically while
+        * it caches the PTE in the TLB.
+        *
+        * The condition considers any change to RW and dirty as not requiring
+        * flush if the old PTE is not dirty or not writable for simplification
+        * of the code and to consider (unlikely) cases of changing dirty-bit of
+        * write-protected PTE.
+        */
+       if (!(diff & ~(_PAGE_RW | _PAGE_DIRTY | ignore_mask)) &&
+           (!(pte_dirty(oldpte) || !pte_write(oldpte))))
+               return false;
+
+       /*
+        * Any change of PFN and any flag other than those that we consider
+        * requires a flush (e.g., PAT, protection keys). To save flushes we do
+        * not consider the access bit as it is considered by the kernel as
+        * best-effort.
+        */
+       return diff & ((oldval & enable_mask) |
+                      (newval & disable_mask) |
+                      ~(enable_mask | disable_mask | ignore_mask));
+}
+#define pte_may_need_flush pte_may_need_flush
+
 #endif /* !MODULE */
 
 #endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index eea113323468..c2deec0b6919 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -654,6 +654,10 @@ static inline void tlb_flush_p4d_range(struct mmu_gather 
*tlb,
        } while (0)
 #endif
 
+#ifndef pte_may_need_flush
+static inline bool pte_may_need_flush(pte_t oldpte, pte_t newpte) { return 
true; }
+#endif
+
 #endif /* CONFIG_MMU */
 
 #endif /* _ASM_GENERIC__TLB_H */
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 632d5a677d3f..b7473d2c9a1f 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -139,7 +139,8 @@ static unsigned long change_pte_range(struct mmu_gather 
*tlb,
                                ptent = pte_mkwrite(ptent);
                        }
                        ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
-                       tlb_flush_pte_range(tlb, addr, PAGE_SIZE);
+                       if (pte_may_need_flush(oldpte, ptent))
+                               tlb_flush_pte_range(tlb, addr, PAGE_SIZE);
                        pages++;
                } else if (is_swap_pte(oldpte)) {
                        swp_entry_t entry = pte_to_swp_entry(oldpte);
-- 
2.25.1

Reply via email to