On 5/20/20 11:00 PM, Roman Gushchin wrote:
> 
> From beeaecdac85c3a395dcfb99944dc8c858b541cbf Mon Sep 17 00:00:00 2001
> From: Roman Gushchin <g...@fb.com>
> Date: Mon, 29 Jul 2019 18:18:42 -0700
> Subject: [PATCH v3.2 04/19] mm: slub: implement SLUB version of obj_to_index()
> 
> This commit implements SLUB version of the obj_to_index() function,
> which will be required to calculate the offset of obj_cgroup in the
> obj_cgroups vector to store/obtain the objcg ownership data.
> 
> To make it faster, let's repeat the SLAB's trick introduced by
> commit 6a2d7a955d8d ("[PATCH] SLAB: use a multiply instead of a
> divide in obj_to_index()") and avoid an expensive division.
> 
> Vlastimil Babka noticed, that SLUB does have already a similar
> function called slab_index(), which is defined only if SLUB_DEBUG
> is enabled. The function does a similar math, but with a division,
> and it also takes a page address instead of a page pointer.
> 
> Let's remove slab_index() and replace it with the new helper
> __obj_to_index(), which takes a page address. obj_to_index()
> will be a simple wrapper taking a page pointer and passing
> page_address(page) into __obj_to_index().
> 
> Signed-off-by: Roman Gushchin <g...@fb.com>

Looks good!

Reviewed-by: Vlastimil Babka <vba...@suse.cz>

> ---
>  include/linux/slub_def.h | 16 ++++++++++++++++
>  mm/slub.c                | 15 +++++----------
>  2 files changed, 21 insertions(+), 10 deletions(-)
> 
> diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
> index d2153789bd9f..30e91c83d401 100644
> --- a/include/linux/slub_def.h
> +++ b/include/linux/slub_def.h
> @@ -8,6 +8,7 @@
>   * (C) 2007 SGI, Christoph Lameter
>   */
>  #include <linux/kobject.h>
> +#include <linux/reciprocal_div.h>
>  
>  enum stat_item {
>       ALLOC_FASTPATH,         /* Allocation from cpu slab */
> @@ -86,6 +87,7 @@ struct kmem_cache {
>       unsigned long min_partial;
>       unsigned int size;      /* The size of an object including metadata */
>       unsigned int object_size;/* The size of an object without metadata */
> +     struct reciprocal_value reciprocal_size;
>       unsigned int offset;    /* Free pointer offset */
>  #ifdef CONFIG_SLUB_CPU_PARTIAL
>       /* Number of per cpu partial objects to keep around */
> @@ -182,4 +184,18 @@ static inline void *nearest_obj(struct kmem_cache 
> *cache, struct page *page,
>       return result;
>  }
>  
> +/* Determine object index from a given position */
> +static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
> +                                       void *addr, void *obj)
> +{
> +     return reciprocal_divide(kasan_reset_tag(obj) - addr,
> +                              cache->reciprocal_size);
> +}
> +
> +static inline unsigned int obj_to_index(const struct kmem_cache *cache,
> +                                     const struct page *page, void *obj)
> +{
> +     return __obj_to_index(cache, page_address(page), obj);
> +}
> +
>  #endif /* _LINUX_SLUB_DEF_H */
> diff --git a/mm/slub.c b/mm/slub.c
> index 2df4d4a420d1..d605d18b3c1b 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -313,12 +313,6 @@ static inline void set_freepointer(struct kmem_cache *s, 
> void *object, void *fp)
>               __p < (__addr) + (__objects) * (__s)->size; \
>               __p += (__s)->size)
>  
> -/* Determine object index from a given position */
> -static inline unsigned int slab_index(void *p, struct kmem_cache *s, void 
> *addr)
> -{
> -     return (kasan_reset_tag(p) - addr) / s->size;
> -}
> -
>  static inline unsigned int order_objects(unsigned int order, unsigned int 
> size)
>  {
>       return ((unsigned int)PAGE_SIZE << order) / size;
> @@ -461,7 +455,7 @@ static unsigned long *get_map(struct kmem_cache *s, 
> struct page *page)
>       bitmap_zero(object_map, page->objects);
>  
>       for (p = page->freelist; p; p = get_freepointer(s, p))
> -             set_bit(slab_index(p, s, addr), object_map);
> +             set_bit(__obj_to_index(s, addr, p), object_map);
>  
>       return object_map;
>  }
> @@ -3682,6 +3676,7 @@ static int calculate_sizes(struct kmem_cache *s, int 
> forced_order)
>        */
>       size = ALIGN(size, s->align);
>       s->size = size;
> +     s->reciprocal_size = reciprocal_value(size);
>       if (forced_order >= 0)
>               order = forced_order;
>       else
> @@ -3788,7 +3783,7 @@ static void list_slab_objects(struct kmem_cache *s, 
> struct page *page,
>       map = get_map(s, page);
>       for_each_object(p, s, addr, page->objects) {
>  
> -             if (!test_bit(slab_index(p, s, addr), map)) {
> +             if (!test_bit(__obj_to_index(s, addr, p), map)) {
>                       pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
>                       print_tracking(s, p);
>               }
> @@ -4513,7 +4508,7 @@ static void validate_slab(struct kmem_cache *s, struct 
> page *page)
>       /* Now we know that a valid freelist exists */
>       map = get_map(s, page);
>       for_each_object(p, s, addr, page->objects) {
> -             u8 val = test_bit(slab_index(p, s, addr), map) ?
> +             u8 val = test_bit(__obj_to_index(s, addr, p), map) ?
>                        SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
>  
>               if (!check_object(s, page, p, val))
> @@ -4704,7 +4699,7 @@ static void process_slab(struct loc_track *t, struct 
> kmem_cache *s,
>  
>       map = get_map(s, page);
>       for_each_object(p, s, addr, page->objects)
> -             if (!test_bit(slab_index(p, s, addr), map))
> +             if (!test_bit(__obj_to_index(s, addr, p), map))
>                       add_location(t, s, get_track(s, p, alloc));
>       put_map(map);
>  }
> 

Reply via email to