Instead of keeping open-coded style, move the code related to
preloading into a separate function. Therefore introduce the
preload_this_cpu_lock() routine that prelaods a current CPU
with one extra vmap_area object.

There is no functional change as a result of this patch.

Signed-off-by: Uladzislau Rezki (Sony) <ure...@gmail.com>
---
 mm/vmalloc.c | 60 +++++++++++++++++++++++-----------------------------
 1 file changed, 27 insertions(+), 33 deletions(-)

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 8b564f91a610..093c7e034ca2 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1430,6 +1430,29 @@ static void free_vmap_area(struct vmap_area *va)
        spin_unlock(&free_vmap_area_lock);
 }
 
+static inline void
+preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
+{
+       struct vmap_area *va = NULL;
+
+       /*
+        * Preload this CPU with one extra vmap_area object. It is used
+        * when fit type of free area is NE_FIT_TYPE. It guarantees that
+        * a CPU that does an allocation is preloaded.
+        *
+        * We do it in non-atomic context, thus it allows us to use more
+        * permissive allocation masks to be more stable under low memory
+        * condition and high memory pressure.
+        */
+       if (!this_cpu_read(ne_fit_preload_node))
+               va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
+
+       spin_lock(lock);
+
+       if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
+               kmem_cache_free(vmap_area_cachep, va);
+}
+
 /*
  * Allocate a region of KVA of the specified size and alignment, within the
  * vstart and vend.
@@ -1439,7 +1462,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long 
size,
                                unsigned long vstart, unsigned long vend,
                                int node, gfp_t gfp_mask)
 {
-       struct vmap_area *va, *pva;
+       struct vmap_area *va;
        unsigned long addr;
        int purged = 0;
        int ret;
@@ -1465,43 +1488,14 @@ static struct vmap_area *alloc_vmap_area(unsigned long 
size,
        kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
 
 retry:
-       /*
-        * Preload this CPU with one extra vmap_area object. It is used
-        * when fit type of free area is NE_FIT_TYPE. Please note, it
-        * does not guarantee that an allocation occurs on a CPU that
-        * is preloaded, instead we minimize the case when it is not.
-        * It can happen because of cpu migration, because there is a
-        * race until the below spinlock is taken.
-        *
-        * The preload is done in non-atomic context, thus it allows us
-        * to use more permissive allocation masks to be more stable under
-        * low memory condition and high memory pressure. In rare case,
-        * if not preloaded, GFP_NOWAIT is used.
-        *
-        * Set "pva" to NULL here, because of "retry" path.
-        */
-       pva = NULL;
-
-       if (!this_cpu_read(ne_fit_preload_node))
-               /*
-                * Even if it fails we do not really care about that.
-                * Just proceed as it is. If needed "overflow" path
-                * will refill the cache we allocate from.
-                */
-               pva = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
-
-       spin_lock(&free_vmap_area_lock);
-
-       if (pva && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva))
-               kmem_cache_free(vmap_area_cachep, pva);
+       preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
+       addr = __alloc_vmap_area(size, align, vstart, vend);
+       spin_unlock(&free_vmap_area_lock);
 
        /*
         * If an allocation fails, the "vend" address is
         * returned. Therefore trigger the overflow path.
         */
-       addr = __alloc_vmap_area(size, align, vstart, vend);
-       spin_unlock(&free_vmap_area_lock);
-
        if (unlikely(addr == vend))
                goto overflow;
 
-- 
2.20.1

Reply via email to