On Mon, 3 Nov 2014, Vladimir Davydov wrote: > +static __always_inline void slab_free(struct kmem_cache *cachep, void *objp); > + > static __always_inline void * > slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, > unsigned long caller) > @@ -3185,6 +3187,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t > flags, int nodeid, > kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size); > if (unlikely(flags & __GFP_ZERO)) > memset(ptr, 0, cachep->object_size); > + if (unlikely(memcg_kmem_recharge_slab(ptr, flags))) { > + slab_free(cachep, ptr); > + ptr = NULL; > + } > } > > return ptr; > @@ -3250,6 +3256,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, > unsigned long caller) > kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size); > if (unlikely(flags & __GFP_ZERO)) > memset(objp, 0, cachep->object_size); > + if (unlikely(memcg_kmem_recharge_slab(objp, flags))) { > + slab_free(cachep, objp); > + objp = NULL; > + } > } >
Please do not add code to the hotpaths if its avoidable. Can you charge the full slab only when allocated please? -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/