calculate_alignment() function is only used inside 'slab_common.c'.
So make it static and let compiler do more optimizations.

After this patch there's small improvements in 'text' and 'data' size.

$ gcc --version
  gcc (GCC) 7.2.1 20171128

Before:
  text     data     bss     dec      hex        filename
  9890457  3828702  1212364 14931523 e3d643     vmlinux

After:
  text     data     bss     dec      hex        filename
  9890437  3828670  1212364 14931471 e3d60f     vmlinux

Also I fixed a 'style problem' reported by 'scripts/checkpatch.pl'.

  WARNING: Missing a blank line after declarations
  #53: FILE: mm/slab_common.c:286:
  +             unsigned long ralign = cache_line_size();
  +             while (size <= ralign / 2)

Signed-off-by: Byongho Lee <bhlee.ker...@gmail.com>
---
 mm/slab.h        |  3 ---
 mm/slab_common.c | 56 +++++++++++++++++++++++++++++---------------------------
 2 files changed, 29 insertions(+), 30 deletions(-)

diff --git a/mm/slab.h b/mm/slab.h
index 028cdc7df67e..e894889dc24a 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -79,9 +79,6 @@ extern const struct kmalloc_info_struct {
        unsigned long size;
 } kmalloc_info[];
 
-unsigned long calculate_alignment(unsigned long flags,
-               unsigned long align, unsigned long size);
-
 #ifndef CONFIG_SLOB
 /* Kmalloc array related functions */
 void setup_kmalloc_cache_index_table(void);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 0d7fe71ff5e4..d25e7b56e20b 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -267,6 +267,35 @@ static inline void memcg_unlink_cache(struct kmem_cache *s)
 }
 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
 
+/*
+ * Figure out what the alignment of the objects will be given a set of
+ * flags, a user specified alignment and the size of the objects.
+ */
+static unsigned long calculate_alignment(unsigned long flags,
+               unsigned long align, unsigned long size)
+{
+       /*
+        * If the user wants hardware cache aligned objects then follow that
+        * suggestion if the object is sufficiently large.
+        *
+        * The hardware cache alignment cannot override the specified
+        * alignment though. If that is greater then use it.
+        */
+       if (flags & SLAB_HWCACHE_ALIGN) {
+               unsigned long ralign;
+
+               ralign = cache_line_size();
+               while (size <= ralign / 2)
+                       ralign /= 2;
+               align = max(align, ralign);
+       }
+
+       if (align < ARCH_SLAB_MINALIGN)
+               align = ARCH_SLAB_MINALIGN;
+
+       return ALIGN(align, sizeof(void *));
+}
+
 /*
  * Find a mergeable slab cache
  */
@@ -337,33 +366,6 @@ struct kmem_cache *find_mergeable(size_t size, size_t 
align,
        return NULL;
 }
 
-/*
- * Figure out what the alignment of the objects will be given a set of
- * flags, a user specified alignment and the size of the objects.
- */
-unsigned long calculate_alignment(unsigned long flags,
-               unsigned long align, unsigned long size)
-{
-       /*
-        * If the user wants hardware cache aligned objects then follow that
-        * suggestion if the object is sufficiently large.
-        *
-        * The hardware cache alignment cannot override the specified
-        * alignment though. If that is greater then use it.
-        */
-       if (flags & SLAB_HWCACHE_ALIGN) {
-               unsigned long ralign = cache_line_size();
-               while (size <= ralign / 2)
-                       ralign /= 2;
-               align = max(align, ralign);
-       }
-
-       if (align < ARCH_SLAB_MINALIGN)
-               align = ARCH_SLAB_MINALIGN;
-
-       return ALIGN(align, sizeof(void *));
-}
-
 static struct kmem_cache *create_cache(const char *name,
                size_t object_size, size_t size, size_t align,
                unsigned long flags, void (*ctor)(void *),
-- 
2.15.1

Reply via email to