On big systems, the mm refcount can become highly contented when doing
a lot of context switching with threaded applications (particularly
switching between the idle thread and an application thread).

Abandoning lazy tlb slows switching down quite a bit in the important
user->idle->user cases, so so instead implement a non-refcounted scheme
that causes __mmdrop() to IPI all CPUs in the mm_cpumask and shoot down
any remaining lazy ones.

Shootdown IPIs are some concern, but they have not been observed to be
a big problem with this scheme (the powerpc implementation generated
314 additional interrupts on a 144 CPU system during a kernel compile).
There are a number of strategies that could be employed to reduce IPIs
if they turn out to be a problem for some workload.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/Kconfig  | 13 +++++++++++++
 kernel/fork.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 66 insertions(+)

diff --git a/arch/Kconfig b/arch/Kconfig
index 596bf589d74b..540e43aeefa4 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -440,6 +440,19 @@ config MMU_LAZY_TLB
 config MMU_LAZY_TLB_REFCOUNT
        def_bool y
        depends on MMU_LAZY_TLB
+       depends on !MMU_LAZY_TLB_SHOOTDOWN
+
+config MMU_LAZY_TLB_SHOOTDOWN
+       bool
+       depends on MMU_LAZY_TLB
+       help
+         Instead of refcounting the "lazy tlb" mm struct, which can cause
+         contention with multi-threaded apps on large multiprocessor systems,
+         this option causes __mmdrop to IPI all CPUs in the mm_cpumask and
+         switch to init_mm if they were using the to-be-freed mm as the lazy
+         tlb. To implement this, architectures must use _lazy_tlb variants of
+         mm refcounting, and mm_cpumask must include at least all possible
+         CPUs in which mm might be lazy.
 
 config ARCH_HAVE_NMI_SAFE_CMPXCHG
        bool
diff --git a/kernel/fork.c b/kernel/fork.c
index 6d266388d380..e47312c2b48b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -669,6 +669,54 @@ static void check_mm(struct mm_struct *mm)
 #define allocate_mm()  (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
 #define free_mm(mm)    (kmem_cache_free(mm_cachep, (mm)))
 
+static void do_shoot_lazy_tlb(void *arg)
+{
+       struct mm_struct *mm = arg;
+
+       if (current->active_mm == mm) {
+               WARN_ON_ONCE(current->mm);
+               current->active_mm = &init_mm;
+               switch_mm(mm, &init_mm, current);
+               exit_lazy_tlb(mm, current);
+       }
+}
+
+static void do_check_lazy_tlb(void *arg)
+{
+       struct mm_struct *mm = arg;
+
+       WARN_ON_ONCE(current->active_mm == mm);
+}
+
+static void shoot_lazy_tlbs(struct mm_struct *mm)
+{
+       if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_SHOOTDOWN)) {
+               /*
+                * IPI overheads have not found to be expensive, but they could
+                * be reduced in a number of possible ways, for example (in
+                * roughly increasing order of complexity):
+                * - A batch of mms requiring IPIs could be gathered and freed
+                *   at once.
+                * - CPUs could store their active mm somewhere that can be
+                *   remotely checked without a lock, to filter out
+                *   false-positives in the cpumask.
+                * - After mm_users or mm_count reaches zero, switching away
+                *   from the mm could clear mm_cpumask to reduce some IPIs
+                *   (some batching or delaying would help).
+                * - A delayed freeing and RCU-like quiescing sequence based on
+                *   mm switching to avoid IPIs completely.
+                */
+               on_each_cpu_mask(mm_cpumask(mm), do_shoot_lazy_tlb, (void *)mm, 
1);
+               if (IS_ENABLED(CONFIG_DEBUG_VM))
+                       on_each_cpu(do_check_lazy_tlb, (void *)mm, 1);
+       } else {
+               /*
+                * In this case, lazy tlb mms are refounted and would not reach
+                * __mmdrop until all CPUs have switched away and mmdrop()ed.
+                */
+       }
+}
+
 /*
  * Called when the last reference to the mm
  * is dropped: either by a lazy thread or by
@@ -678,7 +726,12 @@ void __mmdrop(struct mm_struct *mm)
 {
        BUG_ON(mm == &init_mm);
        WARN_ON_ONCE(mm == current->mm);
+
+       /* Ensure no CPUs are using this as their lazy tlb mm */
+       shoot_lazy_tlbs(mm);
+
        WARN_ON_ONCE(mm == current->active_mm);
+
        mm_free_pgd(mm);
        destroy_context(mm);
        mmu_notifier_subscriptions_destroy(mm);
-- 
2.23.0

Reply via email to