Shooting down lazy TLB references to an mm at exit_mmap time ensures
that no users of the mm_struct will be left anywhere in the system,
allowing it to be torn down and freed immediately.

Signed-off-by: Rik van Riel <r...@surriel.com>
Suggested-by: Andy Lutomirski <l...@kernel.org>
Suggested-by: Peter Zijlstra <pet...@infradead.org>
---
 arch/x86/Kconfig                   |  1 +
 arch/x86/include/asm/mmu_context.h |  1 +
 arch/x86/include/asm/tlbflush.h    |  2 ++
 arch/x86/mm/tlb.c                  | 15 +++++++++++++++
 4 files changed, 19 insertions(+)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6d4774f203d0..ecdfc6933203 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -75,6 +75,7 @@ config X86
        select ARCH_MIGHT_HAVE_ACPI_PDC         if ACPI
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_MIGHT_HAVE_PC_SERIO
+       select ARCH_NO_ACTIVE_MM_REFCOUNTING
        select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_SUPPORTS_NUMA_BALANCING     if X86_64
        select ARCH_USE_BUILTIN_BSWAP
diff --git a/arch/x86/include/asm/mmu_context.h 
b/arch/x86/include/asm/mmu_context.h
index eeeb9289c764..529bf7bc5f75 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -238,6 +238,7 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
 {
        paravirt_arch_exit_mmap(mm);
        ldt_arch_exit_mmap(mm);
+       lazy_tlb_exit_mmap(mm);
 }
 
 #ifdef CONFIG_X86_64
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 511bf5fae8b8..3966a45367cd 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -538,6 +538,8 @@ extern void arch_tlbbatch_flush(struct 
arch_tlbflush_unmap_batch *batch);
        native_flush_tlb_others(mask, info)
 #endif
 
+extern void lazy_tlb_exit_mmap(struct mm_struct *mm);
+
 extern void tlb_flush_remove_tables(struct mm_struct *mm);
 extern void tlb_flush_remove_tables_local(void *arg);
 
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index ea4ef5ceaba2..7b1add904396 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -713,6 +713,21 @@ void tlb_flush_remove_tables(struct mm_struct *mm)
        put_cpu();
 }
 
+/*
+ * At exit or execve time, all other threads of a process have disappeared,
+ * but other CPUs could still be referencing this mm in lazy TLB mode.
+ * Get rid of those references before releasing the mm.
+ */
+void lazy_tlb_exit_mmap(struct mm_struct *mm)
+{
+       int cpu = get_cpu();
+
+       if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
+               on_each_cpu_mask(mm_cpumask(mm), leave_mm, NULL, 1);
+
+       put_cpu();
+}
+
 static void do_flush_tlb_all(void *info)
 {
        count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
-- 
2.14.4

Reply via email to