We have seen significant overhead in cache bouncing caused by NUMA counters
update in multi-threaded page allocation. See 'commit 1d90ca897cb0 ("mm:
update NUMA counter threshold size")' for more details.

This patch updates NUMA counters to a fixed size of (MAX_S16 - 2) and deals
with global counter update using different threshold size for node page
stats.

Signed-off-by: Kemi Wang <kemi.w...@intel.com>
---
 mm/vmstat.c | 13 +++++++++++--
 1 file changed, 11 insertions(+), 2 deletions(-)

diff --git a/mm/vmstat.c b/mm/vmstat.c
index 9c681cc..64e08ae 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -30,6 +30,8 @@
 
 #include "internal.h"
 
+#define VM_NUMA_STAT_THRESHOLD (S16_MAX - 2)
+
 #ifdef CONFIG_NUMA
 int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
 
@@ -394,7 +396,11 @@ void __inc_node_state(struct pglist_data *pgdat, enum 
node_stat_item item)
        s16 v, t;
 
        v = __this_cpu_inc_return(*p);
-       t = __this_cpu_read(pcp->stat_threshold);
+       if (item >= NR_VM_NUMA_STAT_ITEMS)
+               t = __this_cpu_read(pcp->stat_threshold);
+       else
+               t = VM_NUMA_STAT_THRESHOLD;
+
        if (unlikely(v > t)) {
                s16 overstep = t >> 1;
 
@@ -549,7 +555,10 @@ static inline void mod_node_state(struct pglist_data 
*pgdat,
                 * Most of the time the thresholds are the same anyways
                 * for all cpus in a node.
                 */
-               t = this_cpu_read(pcp->stat_threshold);
+               if (item >= NR_VM_NUMA_STAT_ITEMS)
+                       t = this_cpu_read(pcp->stat_threshold);
+               else
+                       t = VM_NUMA_STAT_THRESHOLD;
 
                o = this_cpu_read(*p);
                n = delta + o;
-- 
2.7.4

Reply via email to