Account slab allocations using codetag reference embedded into slabobj_ext.

Signed-off-by: Suren Baghdasaryan <sur...@google.com>
Co-developed-by: Kent Overstreet <kent.overstr...@linux.dev>
Signed-off-by: Kent Overstreet <kent.overstr...@linux.dev>
---
 include/linux/slab_def.h |  2 +-
 include/linux/slub_def.h |  4 ++--
 mm/slab.c                |  4 +++-
 mm/slab.h                | 32 ++++++++++++++++++++++++++++++++
 4 files changed, 38 insertions(+), 4 deletions(-)

diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index a61e7d55d0d3..23f14dcb8d5b 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -107,7 +107,7 @@ static inline void *nearest_obj(struct kmem_cache *cache, 
const struct slab *sla
  *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
  */
 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
-                                       const struct slab *slab, void *obj)
+                                       const struct slab *slab, const void 
*obj)
 {
        u32 offset = (obj - slab->s_mem);
        return reciprocal_divide(offset, cache->reciprocal_buffer_size);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index deb90cf4bffb..43fda4a5f23a 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -182,14 +182,14 @@ static inline void *nearest_obj(struct kmem_cache *cache, 
const struct slab *sla
 
 /* Determine object index from a given position */
 static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
-                                         void *addr, void *obj)
+                                         void *addr, const void *obj)
 {
        return reciprocal_divide(kasan_reset_tag(obj) - addr,
                                 cache->reciprocal_size);
 }
 
 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
-                                       const struct slab *slab, void *obj)
+                                       const struct slab *slab, const void 
*obj)
 {
        if (is_kfence_address(obj))
                return 0;
diff --git a/mm/slab.c b/mm/slab.c
index cefcb7499b6c..18923f5f05b5 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3348,9 +3348,11 @@ static void cache_flusharray(struct kmem_cache *cachep, 
struct array_cache *ac)
 static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
                                         unsigned long caller)
 {
+       struct slab *slab = virt_to_slab(objp);
        bool init;
 
-       memcg_slab_free_hook(cachep, virt_to_slab(objp), &objp, 1);
+       memcg_slab_free_hook(cachep, slab, &objp, 1);
+       alloc_tagging_slab_free_hook(cachep, slab, &objp, 1);
 
        if (is_kfence_address(objp)) {
                kmemleak_free_recursive(objp, cachep->flags);
diff --git a/mm/slab.h b/mm/slab.h
index 293210ed10a9..4859ce1f8808 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -533,6 +533,32 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t 
flags, void *p)
 
 #endif /* CONFIG_SLAB_OBJ_EXT */
 
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+
+static inline void alloc_tagging_slab_free_hook(struct kmem_cache *s, struct 
slab *slab,
+                                       void **p, int objects)
+{
+       struct slabobj_ext *obj_exts;
+       int i;
+
+       obj_exts = slab_obj_exts(slab);
+       if (!obj_exts)
+               return;
+
+       for (i = 0; i < objects; i++) {
+               unsigned int off = obj_to_index(s, slab, p[i]);
+
+               alloc_tag_sub(&obj_exts[off].ref, s->size);
+       }
+}
+
+#else
+
+static inline void alloc_tagging_slab_free_hook(struct kmem_cache *s, struct 
slab *slab,
+                                       void **p, int objects) {}
+
+#endif /* CONFIG_MEM_ALLOC_PROFILING */
+
 #ifdef CONFIG_MEMCG_KMEM
 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
                     enum node_stat_item idx, int nr);
@@ -827,6 +853,12 @@ static inline void slab_post_alloc_hook(struct kmem_cache 
*s,
                                         s->flags, flags);
                kmsan_slab_alloc(s, p[i], flags);
                obj_exts = prepare_slab_obj_exts_hook(s, flags, p[i]);
+
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+               /* obj_exts can be allocated for other reasons */
+               if (likely(obj_exts) && mem_alloc_profiling_enabled())
+                       alloc_tag_add(&obj_exts->ref, current->alloc_tag, 
s->size);
+#endif
        }
 
        memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
-- 
2.42.0.758.gaed0368e0e-goog


Reply via email to