ChangeSet 1.2231.1.8, 2005/03/28 19:18:25-08:00, [EMAIL PROTECTED]

        [PATCH] slab: kfree(null) is unlikely
        
        - mark kfree(NULL) as being unlikely
        
        - Coding-style consistency for function definitions
        
        Signed-off-by: Andrew Morton <[EMAIL PROTECTED]>
        Signed-off-by: Linus Torvalds <[EMAIL PROTECTED]>



 slab.c |   62 +++++++++++++++++++++++++++-----------------------------------
 1 files changed, 27 insertions(+), 35 deletions(-)


diff -Nru a/mm/slab.c b/mm/slab.c
--- a/mm/slab.c 2005-03-28 21:07:59 -08:00
+++ b/mm/slab.c 2005-03-28 21:07:59 -08:00
@@ -510,7 +510,6 @@
        CACHE(ULONG_MAX)
 #undef CACHE
 };
-
 EXPORT_SYMBOL(malloc_sizes);
 
 /* Must match cache_sizes above. Out of line to keep cache footprint low. */
@@ -574,7 +573,7 @@
 static void enable_cpucache (kmem_cache_t *cachep);
 static void cache_reap (void *unused);
 
-static inline void ** ac_entry(struct array_cache *ac)
+static inline void **ac_entry(struct array_cache *ac)
 {
        return (void**)(ac+1);
 }
@@ -584,7 +583,7 @@
        return cachep->array[smp_processor_id()];
 }
 
-static inline kmem_cache_t *kmem_find_general_cachep (size_t size, int 
gfpflags)
+static inline kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags)
 {
        struct cache_sizes *csizep = malloc_sizes;
 
@@ -609,7 +608,7 @@
 }
 
 /* Cal the num objs, wastage, and bytes left over for a given slab size. */
-static void cache_estimate (unsigned long gfporder, size_t size, size_t align,
+static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
                 int flags, size_t *left_over, unsigned int *num)
 {
        int i;
@@ -667,7 +666,8 @@
        }
 }
 
-static struct array_cache *alloc_arraycache(int cpu, int entries, int 
batchcount)
+static struct array_cache *alloc_arraycache(int cpu, int entries,
+                                               int batchcount)
 {
        int memsize = sizeof(void*)*entries+sizeof(struct array_cache);
        struct array_cache *nc = NULL;
@@ -690,8 +690,7 @@
 }
 
 static int __devinit cpuup_callback(struct notifier_block *nfb,
-                                 unsigned long action,
-                                 void *hcpu)
+                                 unsigned long action, void *hcpu)
 {
        long cpu = (long)hcpu;
        kmem_cache_t* cachep;
@@ -958,7 +957,8 @@
 #if DEBUG
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
-static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, 
unsigned long caller)
+static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
+                               unsigned long caller)
 {
        int size = obj_reallen(cachep);
 
@@ -1616,7 +1616,6 @@
 
        return __cache_shrink(cachep);
 }
-
 EXPORT_SYMBOL(kmem_cache_shrink);
 
 /**
@@ -1636,7 +1635,7 @@
  * The caller must guarantee that noone will allocate memory from the cache
  * during the kmem_cache_destroy().
  */
-int kmem_cache_destroy (kmem_cache_t * cachep)
+int kmem_cache_destroy(kmem_cache_t * cachep)
 {
        int i;
 
@@ -1681,11 +1680,10 @@
 
        return 0;
 }
-
 EXPORT_SYMBOL(kmem_cache_destroy);
 
 /* Get the memory for a slab management obj. */
-static struct slab* alloc_slabmgmt (kmem_cache_t *cachep,
+static struct slab* alloc_slabmgmt(kmem_cache_t *cachep,
                        void *objp, int colour_off, int local_flags)
 {
        struct slab *slabp;
@@ -1711,8 +1709,8 @@
        return (kmem_bufctl_t *)(slabp+1);
 }
 
-static void cache_init_objs (kmem_cache_t * cachep,
-                       struct slab * slabp, unsigned long ctor_flags)
+static void cache_init_objs(kmem_cache_t *cachep,
+                       struct slab *slabp, unsigned long ctor_flags)
 {
        int i;
 
@@ -1787,7 +1785,7 @@
  * Grow (by 1) the number of slabs within a cache.  This is called by
  * kmem_cache_alloc() when there are no active objs left in a cache.
  */
-static int cache_grow (kmem_cache_t * cachep, int flags, int nodeid)
+static int cache_grow(kmem_cache_t *cachep, int flags, int nodeid)
 {
        struct slab     *slabp;
        void            *objp;
@@ -1892,7 +1890,8 @@
        }
 }
 
-static void *cache_free_debugcheck (kmem_cache_t * cachep, void * objp, void 
*caller)
+static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
+                                       void *caller)
 {
        struct page *page;
        unsigned int objnr;
@@ -1990,7 +1989,7 @@
 #define check_slabp(x,y) do { } while(0)
 #endif
 
-static void* cache_alloc_refill(kmem_cache_t* cachep, int flags)
+static void *cache_alloc_refill(kmem_cache_t *cachep, int flags)
 {
        int batchcount;
        struct kmem_list3 *l3;
@@ -2142,7 +2141,7 @@
 #endif
 
 
-static inline void * __cache_alloc (kmem_cache_t *cachep, int flags)
+static inline void *__cache_alloc(kmem_cache_t *cachep, int flags)
 {
        unsigned long save_flags;
        void* objp;
@@ -2221,7 +2220,7 @@
        }
 }
 
-static void cache_flusharray (kmem_cache_t* cachep, struct array_cache *ac)
+static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
 {
        int batchcount;
 
@@ -2278,7 +2277,7 @@
  *
  * Called with disabled ints.
  */
-static inline void __cache_free (kmem_cache_t *cachep, void* objp)
+static inline void __cache_free(kmem_cache_t *cachep, void *objp)
 {
        struct array_cache *ac = ac_data(cachep);
 
@@ -2304,11 +2303,10 @@
  * Allocate an object from this cache.  The flags are only relevant
  * if the cache has no available objects.
  */
-void * kmem_cache_alloc (kmem_cache_t *cachep, int flags)
+void *kmem_cache_alloc(kmem_cache_t *cachep, int flags)
 {
        return __cache_alloc(cachep, flags);
 }
-
 EXPORT_SYMBOL(kmem_cache_alloc);
 
 /**
@@ -2461,7 +2459,7 @@
  * platforms.  For example, on i386, it means that the memory must come
  * from the first 16MB.
  */
-void * __kmalloc (size_t size, int flags)
+void *__kmalloc(size_t size, int flags)
 {
        kmem_cache_t *cachep;
 
@@ -2470,7 +2468,6 @@
                return NULL;
        return __cache_alloc(cachep, flags);
 }
-
 EXPORT_SYMBOL(__kmalloc);
 
 #ifdef CONFIG_SMP
@@ -2514,7 +2511,6 @@
        kfree(pdata);
        return NULL;
 }
-
 EXPORT_SYMBOL(__alloc_percpu);
 #endif
 
@@ -2526,7 +2522,7 @@
  * Free an object which was previously allocated from this
  * cache.
  */
-void kmem_cache_free (kmem_cache_t *cachep, void *objp)
+void kmem_cache_free(kmem_cache_t *cachep, void *objp)
 {
        unsigned long flags;
 
@@ -2534,7 +2530,6 @@
        __cache_free(cachep, objp);
        local_irq_restore(flags);
 }
-
 EXPORT_SYMBOL(kmem_cache_free);
 
 /**
@@ -2555,7 +2550,6 @@
                memset(ret, 0, n * size);
        return ret;
 }
-
 EXPORT_SYMBOL(kcalloc);
 
 /**
@@ -2565,12 +2559,12 @@
  * Don't free memory not originally allocated by kmalloc()
  * or you will run into trouble.
  */
-void kfree (const void *objp)
+void kfree(const void *objp)
 {
        kmem_cache_t *c;
        unsigned long flags;
 
-       if (!objp)
+       if (unlikely(!objp))
                return;
        local_irq_save(flags);
        kfree_debugcheck(objp);
@@ -2578,7 +2572,6 @@
        __cache_free(c, (void*)objp);
        local_irq_restore(flags);
 }
-
 EXPORT_SYMBOL(kfree);
 
 #ifdef CONFIG_SMP
@@ -2602,7 +2595,6 @@
        }
        kfree(p);
 }
-
 EXPORT_SYMBOL(free_percpu);
 #endif
 
@@ -2610,7 +2602,6 @@
 {
        return obj_reallen(cachep);
 }
-
 EXPORT_SYMBOL(kmem_cache_size);
 
 struct ccupdate_struct {
@@ -2631,7 +2622,8 @@
 }
 
 
-static int do_tune_cpucache (kmem_cache_t* cachep, int limit, int batchcount, 
int shared)
+static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
+                               int shared)
 {
        struct ccupdate_struct new;
        struct array_cache *new_shared;
@@ -2686,7 +2678,7 @@
 }
 
 
-static void enable_cpucache (kmem_cache_t *cachep)
+static void enable_cpucache(kmem_cache_t *cachep)
 {
        int err;
        int limit, shared;
-
To unsubscribe from this list: send the line "unsubscribe bk-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to