In pageset_set_batch() and setup_pagelist_highmark(), ensure that batch
is always set to a safe value (1) prior to updating high, and ensure
that high is fully updated before setting the real value of batch.

Suggested by Gilad Ben-Yossef <gi...@benyossef.com> in this thread:

        https://lkml.org/lkml/2013/4/9/23

Also reproduces his proposed comment.

Signed-off-by: Cody P Schafer <c...@linux.vnet.ibm.com>
---
 mm/page_alloc.c | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d259599..a07bd4c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4007,11 +4007,26 @@ static int __meminit zone_batchsize(struct zone *zone)
 #endif
 }
 
+static void pageset_update_prep(struct per_cpu_pages *pcp)
+{
+       /*
+        * We're about to mess with PCP in an non atomic fashion.  Put an
+        * intermediate safe value of batch and make sure it is visible before
+        * any other change
+        */
+       pcp->batch = 1;
+       smp_wmb();
+}
+
 /* a companion to setup_pagelist_highmark() */
 static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
 {
        struct per_cpu_pages *pcp = &p->pcp;
+       pageset_update_prep(pcp);
+
        pcp->high = 6 * batch;
+       smp_wmb();
+
        pcp->batch = max(1UL, 1 * batch);
 }
 
@@ -4039,7 +4054,11 @@ static void setup_pagelist_highmark(struct 
per_cpu_pageset *p,
        struct per_cpu_pages *pcp;
 
        pcp = &p->pcp;
+       pageset_update_prep(pcp);
+
        pcp->high = high;
+       smp_wmb();
+
        pcp->batch = max(1UL, high/4);
        if ((high/4) > (PAGE_SHIFT * 8))
                pcp->batch = PAGE_SHIFT * 8;
-- 
1.8.2

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to