Refactor the NE_FIT_TYPE split case when it comes to an
allocation of one extra object. We need it in order to
build a remaining space.

Introduce ne_fit_preload()/ne_fit_preload_end() functions
for preloading one extra vmap_area object to ensure that
we have it available when fit type is NE_FIT_TYPE.

The preload is done per CPU in non-atomic context thus with
GFP_KERNEL allocation masks. More permissive parameters can
be beneficial for systems which are suffer from high memory
pressure or low memory condition.

Signed-off-by: Uladzislau Rezki (Sony) <ure...@gmail.com>
---
 mm/vmalloc.c | 79 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 76 insertions(+), 3 deletions(-)

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ea1b65fac599..b553047aa05b 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -364,6 +364,13 @@ static LIST_HEAD(free_vmap_area_list);
  */
 static struct rb_root free_vmap_area_root = RB_ROOT;
 
+/*
+ * Preload a CPU with one object for "no edge" split case. The
+ * aim is to get rid of allocations from the atomic context, thus
+ * to use more permissive allocation masks.
+ */
+static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
+
 static __always_inline unsigned long
 va_size(struct vmap_area *va)
 {
@@ -950,9 +957,24 @@ adjust_va_to_fit_type(struct vmap_area *va,
                 *   L V  NVA  V R
                 * |---|-------|---|
                 */
-               lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
-               if (unlikely(!lva))
-                       return -1;
+               lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
+               if (unlikely(!lva)) {
+                       /*
+                        * For percpu allocator we do not do any pre-allocation
+                        * and leave it as it is. The reason is it most likely
+                        * never ends up with NE_FIT_TYPE splitting. In case of
+                        * percpu allocations offsets and sizes are aligned to
+                        * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
+                        * are its main fitting cases.
+                        *
+                        * There are a few exceptions though, as an example it 
is
+                        * a first allocation (early boot up) when we have "one"
+                        * big free space that has to be split.
+                        */
+                       lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
+                       if (!lva)
+                               return -1;
+               }
 
                /*
                 * Build the remainder.
@@ -1023,6 +1045,48 @@ __alloc_vmap_area(unsigned long size, unsigned long 
align,
 }
 
 /*
+ * Preload this CPU with one extra vmap_area object to ensure
+ * that we have it available when fit type of free area is
+ * NE_FIT_TYPE.
+ *
+ * The preload is done in non-atomic context, thus it allows us
+ * to use more permissive allocation masks to be more stable under
+ * low memory condition and high memory pressure.
+ *
+ * If success it returns 1 with preemption disabled. In case
+ * of error 0 is returned with preemption not disabled. Note it
+ * has to be paired with ne_fit_preload_end().
+ */
+static int
+ne_fit_preload(int nid)
+{
+       preempt_disable();
+
+       if (!__this_cpu_read(ne_fit_preload_node)) {
+               struct vmap_area *node;
+
+               preempt_enable();
+               node = kmem_cache_alloc_node(vmap_area_cachep, GFP_KERNEL, nid);
+               if (node == NULL)
+                       return 0;
+
+               preempt_disable();
+
+               if (__this_cpu_cmpxchg(ne_fit_preload_node, NULL, node))
+                       kmem_cache_free(vmap_area_cachep, node);
+       }
+
+       return 1;
+}
+
+static void
+ne_fit_preload_end(int preloaded)
+{
+       if (preloaded)
+               preempt_enable();
+}
+
+/*
  * Allocate a region of KVA of the specified size and alignment, within the
  * vstart and vend.
  */
@@ -1034,6 +1098,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long 
size,
        struct vmap_area *va;
        unsigned long addr;
        int purged = 0;
+       int preloaded;
 
        BUG_ON(!size);
        BUG_ON(offset_in_page(size));
@@ -1056,6 +1121,12 @@ static struct vmap_area *alloc_vmap_area(unsigned long 
size,
        kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
 
 retry:
+       /*
+        * Even if it fails we do not really care about that.
+        * Just proceed as it is. "overflow" path will refill
+        * the cache we allocate from.
+        */
+       preloaded = ne_fit_preload(node);
        spin_lock(&vmap_area_lock);
 
        /*
@@ -1063,6 +1134,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long 
size,
         * returned. Therefore trigger the overflow path.
         */
        addr = __alloc_vmap_area(size, align, vstart, vend);
+       ne_fit_preload_end(preloaded);
+
        if (unlikely(addr == vend))
                goto overflow;
 
-- 
2.11.0

Reply via email to