When checking a performance change for will-it-scale scalability
mmap test [1], we found very high lock contention for spinlock of
percpu counter 'vm_committed_as':

    94.14%     0.35%  [kernel.kallsyms]         [k] _raw_spin_lock_irqsave
    48.21% 
_raw_spin_lock_irqsave;percpu_counter_add_batch;__vm_enough_memory;mmap_region;do_mmap;
    45.91% _raw_spin_lock_irqsave;percpu_counter_add_batch;__do_munmap;

Actually this heavy lock contention is not always necessary. The
'vm_committed_as' needs to be very precise when the strict
OVERCOMMIT_NEVER policy is set, which requires a rather small batch
number for the percpu counter.

So lift the batch number to 16X for OVERCOMMIT_ALWAYS and
OVERCOMMIT_GUESS policies, and add a sysctl handler to adjust it
when the policy is reconfigured.

Benchmark with the same testcase in [1] shows 53% improvement on a
8C/16T desktop, and 2097%(20X) on a 4S/72C/144T server. And no change
for some platforms, due to the test mmap size of the case is bigger
than the batch number computed, though the patch will help mmap/munmap
generally.

[1] https://lkml.org/lkml/2020/3/5/57

Signed-off-by: Feng Tang <[email protected]>
---
 include/linux/mm.h   |  2 ++
 include/linux/mman.h |  4 ++++
 kernel/sysctl.c      |  2 +-
 mm/mm_init.c         | 19 +++++++++++++++----
 mm/util.c            | 13 +++++++++++++
 5 files changed, 35 insertions(+), 5 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5a32342..bc3722f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -205,6 +205,8 @@ extern int overcommit_ratio_handler(struct ctl_table *, 
int, void __user *,
                                    size_t *, loff_t *);
 extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
                                    size_t *, loff_t *);
+extern int overcommit_policy_handler(struct ctl_table *, int, void __user *,
+                                   size_t *, loff_t *);
 
 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
 
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 4b08e9c..91c93c1 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -57,8 +57,12 @@ extern struct percpu_counter vm_committed_as;
 
 #ifdef CONFIG_SMP
 extern s32 vm_committed_as_batch;
+extern void mm_compute_batch(void);
 #else
 #define vm_committed_as_batch 0
+static inline void mm_compute_batch(void)
+{
+}
 #endif
 
 unsigned long vm_memory_committed(void);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8a176d8..6fa552d 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1278,7 +1278,7 @@ static struct ctl_table vm_table[] = {
                .data           = &sysctl_overcommit_memory,
                .maxlen         = sizeof(sysctl_overcommit_memory),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
+               .proc_handler   = overcommit_policy_handler,
                .extra1         = SYSCTL_ZERO,
                .extra2         = &two,
        },
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 7da6991..1654358 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -13,6 +13,7 @@
 #include <linux/memory.h>
 #include <linux/notifier.h>
 #include <linux/sched.h>
+#include <linux/mman.h>
 #include "internal.h"
 
 #ifdef CONFIG_DEBUG_MEMORY_INIT
@@ -140,16 +141,26 @@ EXPORT_SYMBOL_GPL(mm_kobj);
 #ifdef CONFIG_SMP
 s32 vm_committed_as_batch = 32;
 
-static void __meminit mm_compute_batch(void)
+void mm_compute_batch(void)
 {
        u64 memsized_batch;
        s32 nr = num_present_cpus();
        s32 batch = max_t(s32, nr*2, 32);
-
-       /* batch size set to 0.4% of (total memory/#cpus), or max int32 */
-       memsized_batch = min_t(u64, (totalram_pages()/nr)/256, 0x7fffffff);
+       unsigned long ram_pages = totalram_pages();
+
+       /*
+        * For policy of OVERCOMMIT_NEVER, set batch size to 0.4%
+        * of (total memory/#cpus), and lift it to 6.25% for other
+        * policies to easy the possible lock contention for percpu_counter
+        * vm_committed_as, while the max limit is INT_MAX
+        */
+       if (sysctl_overcommit_memory == OVERCOMMIT_NEVER)
+               memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX);
+       else
+               memsized_batch = min_t(u64, ram_pages/nr/16, INT_MAX);
 
        vm_committed_as_batch = max_t(s32, memsized_batch, batch);
+       printk("vm_committed_as_batch = %d\n", vm_committed_as_batch);
 }
 
 static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
diff --git a/mm/util.c b/mm/util.c
index 3de78e9..99936d3 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -729,6 +729,19 @@ int overcommit_ratio_handler(struct ctl_table *table, int 
write,
        return ret;
 }
 
+int overcommit_policy_handler(struct ctl_table *table, int write,
+                            void __user *buffer, size_t *lenp,
+                            loff_t *ppos)
+{
+       int ret;
+
+       ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+       if (ret == 0 && write)
+               mm_compute_batch();
+
+       return ret;
+}
+
 int overcommit_kbytes_handler(struct ctl_table *table, int write,
                             void __user *buffer, size_t *lenp,
                             loff_t *ppos)
-- 
2.7.4

Reply via email to