Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=f1b263393626fe66bee34ccdbf0487cd377e0213
Commit:     f1b263393626fe66bee34ccdbf0487cd377e0213
Parent:     dfce8648d64c07eade40d456d59cb4bfcbba008c
Author:     Christoph Lameter <[EMAIL PROTECTED]>
AuthorDate: Tue Jul 17 04:03:26 2007 -0700
Committer:  Linus Torvalds <[EMAIL PROTECTED]>
CommitDate: Tue Jul 17 10:23:01 2007 -0700

    SLUB: faster more efficient slab determination for __kmalloc
    
    kmalloc_index is a long series of comparisons.  The attempt to replace
    kmalloc_index with something more efficient like ilog2 failed due to 
compiler
    issues with constant folding on gcc 3.3 / powerpc.
    
    kmalloc_index()'es long list of comparisons works fine for constant folding
    since all the comparisons are optimized away.  However, SLUB also uses
    kmalloc_index to determine the slab to use for the __kmalloc_xxx functions.
    This leads to a large set of comparisons in get_slab().
    
    The patch here allows to get rid of that list of comparisons in get_slab():
    
    1. If the requested size is larger than 192 then we can simply use
       fls to determine the slab index since all larger slabs are
       of the power of two type.
    
    2. If the requested size is smaller then we cannot use fls since there
       are non power of two caches to be considered. However, the sizes are
       in a managable range. So we divide the size by 8. Then we have only
       24 possibilities left and then we simply look up the kmalloc index
       in a table.
    
    Code size of slub.o decreases by more than 200 bytes through this patch.
    
    Signed-off-by: Christoph Lameter <[EMAIL PROTECTED]>
    Signed-off-by: Andrew Morton <[EMAIL PROTECTED]>
    Signed-off-by: Linus Torvalds <[EMAIL PROTECTED]>
---
 mm/slub.c |   71 +++++++++++++++++++++++++++++++++++++++++++++++++++++++------
 1 files changed, 64 insertions(+), 7 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index f93adb9..71988f9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2313,20 +2313,59 @@ static noinline struct kmem_cache 
*dma_kmalloc_cache(int index, gfp_t flags)
 }
 #endif
 
+/*
+ * Conversion table for small slabs sizes / 8 to the index in the
+ * kmalloc array. This is necessary for slabs < 192 since we have non power
+ * of two cache sizes there. The size of larger slabs can be determined using
+ * fls.
+ */
+static s8 size_index[24] = {
+       3,      /* 8 */
+       4,      /* 16 */
+       5,      /* 24 */
+       5,      /* 32 */
+       6,      /* 40 */
+       6,      /* 48 */
+       6,      /* 56 */
+       6,      /* 64 */
+       1,      /* 72 */
+       1,      /* 80 */
+       1,      /* 88 */
+       1,      /* 96 */
+       7,      /* 104 */
+       7,      /* 112 */
+       7,      /* 120 */
+       7,      /* 128 */
+       2,      /* 136 */
+       2,      /* 144 */
+       2,      /* 152 */
+       2,      /* 160 */
+       2,      /* 168 */
+       2,      /* 176 */
+       2,      /* 184 */
+       2       /* 192 */
+};
+
 static struct kmem_cache *get_slab(size_t size, gfp_t flags)
 {
-       int index = kmalloc_index(size);
+       int index;
 
-       if (!index)
-               return ZERO_SIZE_PTR;
+       if (size <= 192) {
+               if (!size)
+                       return ZERO_SIZE_PTR;
 
-       /* Allocation too large? */
-       if (index < 0)
-               return NULL;
+               index = size_index[(size - 1) / 8];
+       } else {
+               if (size > KMALLOC_MAX_SIZE)
+                       return NULL;
+
+               index = fls(size - 1);
+       }
 
 #ifdef CONFIG_ZONE_DMA
-       if ((flags & SLUB_DMA))
+       if (unlikely((flags & SLUB_DMA)))
                return dma_kmalloc_cache(index, flags);
+
 #endif
        return &kmalloc_caches[index];
 }
@@ -2532,6 +2571,24 @@ void __init kmem_cache_init(void)
                caches++;
        }
 
+
+       /*
+        * Patch up the size_index table if we have strange large alignment
+        * requirements for the kmalloc array. This is only the case for
+        * mips it seems. The standard arches will not generate any code here.
+        *
+        * Largest permitted alignment is 256 bytes due to the way we
+        * handle the index determination for the smaller caches.
+        *
+        * Make sure that nothing crazy happens if someone starts tinkering
+        * around with ARCH_KMALLOC_MINALIGN
+        */
+       BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
+               (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
+
+       for (i = 8; i < KMALLOC_MIN_SIZE;i++)
+               size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
+
        slab_state = UP;
 
        /* Provide the correct kmalloc names now that the caches are up */
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to