From: Joonsoo Kim <iamjoonsoo....@lge.com>

CPU partial support can introduce level of indeterminism that is not
wanted in certain context (like a realtime kernel). Make it
configurable.

This patch is based on Christoph Lameter's "slub: Make cpu partial slab
support configurable V2".

Acked-by: Christoph Lameter <c...@linux.com>
Signed-off-by: Joonsoo Kim <iamjoonsoo....@lge.com>
Signed-off-by: Pekka Enberg <penb...@kernel.org>

https://jira.sw.ru/browse/PSBM-83199
[ Setting SLUB_CPU_PARTIAL=n save us about a 1G in dvd-store test ]
(cherry picked from commit 345c905d13a4ec9f774b6b4bc038fe4aef26cced)
Signed-off-by: Andrey Ryabinin <aryabi...@virtuozzo.com>
---
 init/Kconfig | 11 +++++++++++
 mm/slub.c    | 28 +++++++++++++++++++++-------
 2 files changed, 32 insertions(+), 7 deletions(-)

diff --git a/init/Kconfig b/init/Kconfig
index 0cbeec104e9a..f48c74bcedac 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1597,6 +1597,17 @@ config SLOB
 
 endchoice
 
+config SLUB_CPU_PARTIAL
+       default y
+       depends on SLUB
+       bool "SLUB per cpu partial cache"
+       help
+         Per cpu partial caches accellerate objects allocation and freeing
+         that is local to a processor at the price of more indeterminism
+         in the latency of the free. On overflow these caches will be cleared
+         which requires the taking of locks that may cause latency spikes.
+         Typically one would choose no for a realtime system.
+
 config MMAP_ALLOW_UNINITIALIZED
        bool "Allow mmapped anonymous memory to be uninitialized"
        depends on EXPERT && !MMU
diff --git a/mm/slub.c b/mm/slub.c
index d5b0bc1fcd56..de39b176c995 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -124,6 +124,15 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
 #endif
 }
 
+static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
+{
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+       return !kmem_cache_debug(s);
+#else
+       return false;
+#endif
+}
+
 /*
  * Issues still to be resolved:
  *
@@ -1685,7 +1694,8 @@ static void *get_partial_node(struct kmem_cache *s, 
struct kmem_cache_node *n,
                        put_cpu_partial(s, page, 0);
                        stat(s, CPU_PARTIAL_NODE);
                }
-               if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
+               if (!kmem_cache_has_cpu_partial(s)
+                       || available > s->cpu_partial / 2)
                        break;
 
        }
@@ -1999,6 +2009,7 @@ static void deactivate_slab(struct kmem_cache *s, struct 
page *page, void *freel
 static void unfreeze_partials(struct kmem_cache *s,
                struct kmem_cache_cpu *c)
 {
+#ifdef CONFIG_SLUB_CPU_PARTIAL
        struct kmem_cache_node *n = NULL, *n2 = NULL;
        struct page *page, *discard_page = NULL;
 
@@ -2053,6 +2064,7 @@ static void unfreeze_partials(struct kmem_cache *s,
                discard_slab(s, page);
                stat(s, FREE_SLAB);
        }
+#endif
 }
 
 /*
@@ -2066,6 +2078,7 @@ static void unfreeze_partials(struct kmem_cache *s,
  */
 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
 {
+#ifdef CONFIG_SLUB_CPU_PARTIAL
        struct page *oldpage;
        int pages;
        int pobjects;
@@ -2112,6 +2125,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct 
page *page, int drain)
                local_irq_restore(flags);
        }
        preempt_enable();
+#endif
 }
 
 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
@@ -2638,8 +2652,7 @@ static void __slab_free(struct kmem_cache *s, struct page 
*page,
                new.inuse -= cnt;
                if ((!new.inuse || !prior) && !was_frozen) {
 
-                       if (!kmem_cache_debug(s) && !prior) {
-
+                       if (kmem_cache_has_cpu_partial(s) && !prior) {
                                /*
                                 * Slab was on no list before and will be 
partially empty
                                 * We can defer the list move and instead 
freeze it.
@@ -2693,8 +2706,9 @@ static void __slab_free(struct kmem_cache *s, struct page 
*page,
         * Objects left in the slab. If it was not on the partial list before
         * then add it.
         */
-       if (kmem_cache_debug(s) && unlikely(!prior)) {
-               remove_full(s, n, page);
+       if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
+               if (kmem_cache_debug(s))
+                       remove_full(s, n, page);
                add_partial(n, page, DEACTIVATE_TO_TAIL);
                stat(s, FREE_ADD_PARTIAL);
        }
@@ -3396,7 +3410,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned 
long flags)
         *    per node list when we run out of per cpu objects. We only fetch 
50%
         *    to keep some capacity around for frees.
         */
-       if (kmem_cache_debug(s))
+       if (!kmem_cache_has_cpu_partial(s))
                s->cpu_partial = 0;
        else if (s->size >= PAGE_SIZE)
                s->cpu_partial = 2;
@@ -4836,7 +4850,7 @@ static ssize_t cpu_partial_store(struct kmem_cache *s, 
const char *buf,
        err = strict_strtoul(buf, 10, &objects);
        if (err)
                return err;
-       if (objects && kmem_cache_debug(s))
+       if (objects && !kmem_cache_has_cpu_partial(s))
                return -EINVAL;
 
        s->cpu_partial = objects;
-- 
2.19.2

_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to