In the current implementation, KMALLOC_RECLAIM is not initialized
until all the KMALLOC_NORMAL sizes have been initialized.

But for a particular size, create_kmalloc_caches() can be executed
faster by initializing different types of kmalloc in order.

$ ./scripts/bloat-o-meter vmlinux.patch_1-5 vmlinux.patch_1-6
add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-11 (-11)
Function                                     old     new   delta
create_kmalloc_caches                        214     203     -11
Total: Before=14788968, After=14788957, chg -0.00%

Although the benefits are small (more judgment is made for
robustness), create_kmalloc_caches() is much simpler.

Besides, KMALLOC_DMA will be initialized after "slab_state = UP",
this does not seem to be necessary.

Commit f97d5f634d3b ("slab: Common function to create the kmalloc
array") introduces create_kmalloc_caches().

And I found that for SLAB, KMALLOC_DMA is initialized before
"slab_state = UP". But for SLUB, KMALLOC_DMA is initialized after
"slab_state = UP".

Based on this fact, I think it is okay to initialize KMALLOC_DMA
before "slab_state = UP".

Signed-off-by: Pengfei Li <lpf.vec...@gmail.com>
---
 mm/slab_common.c | 35 +++++++++++++----------------------
 1 file changed, 13 insertions(+), 22 deletions(-)

diff --git a/mm/slab_common.c b/mm/slab_common.c
index eeef5ac8d04d..00f2cfc66dbd 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1168,11 +1168,11 @@ void __init setup_kmalloc_cache_index_table(void)
                size_index[size_index_elem(i)] = 0;
 }
 
-static void __init
+static __always_inline void __init
 new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
 {
-       if (type == KMALLOC_RECLAIM)
-               flags |= SLAB_RECLAIM_ACCOUNT;
+       if (kmalloc_caches[type][idx])
+               return;
 
        kmalloc_caches[type][idx] = create_kmalloc_cache(
                                        kmalloc_info[idx].name[type],
@@ -1188,30 +1188,21 @@ new_kmalloc_cache(int idx, enum kmalloc_cache_type 
type, slab_flags_t flags)
 void __init create_kmalloc_caches(slab_flags_t flags)
 {
        int i;
-       enum kmalloc_cache_type type;
 
-       for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) {
-               for (i = 0; i < KMALLOC_CACHE_NUM; i++) {
-                       if (!kmalloc_caches[type][i])
-                               new_kmalloc_cache(i, type, flags);
-               }
-       }
+       for (i = 0; i < KMALLOC_CACHE_NUM; i++) {
+               new_kmalloc_cache(i, KMALLOC_NORMAL, flags);
 
-       /* Kmalloc array is now usable */
-       slab_state = UP;
+               new_kmalloc_cache(i, KMALLOC_RECLAIM,
+                                       flags | SLAB_RECLAIM_ACCOUNT);
 
 #ifdef CONFIG_ZONE_DMA
-       for (i = 0; i < KMALLOC_CACHE_NUM; i++) {
-               struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i];
-
-               if (s) {
-                       kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(
-                               kmalloc_info[i].name[KMALLOC_DMA],
-                               kmalloc_info[i].size,
-                               SLAB_CACHE_DMA | flags, 0, 0);
-               }
-       }
+               new_kmalloc_cache(i, KMALLOC_DMA,
+                                       flags | SLAB_CACHE_DMA);
 #endif
+       }
+
+       /* Kmalloc array is now usable */
+       slab_state = UP;
 }
 #endif /* !CONFIG_SLOB */
 
-- 
2.21.0

Reply via email to