On Fri, Feb 05, 2021 at 06:34PM +0100, Andrey Konovalov wrote:
> Mark all static functions in common.c and kasan.h that are used for
> hardware tag-based KASAN as inline to avoid unnecessary function calls.
> 
> Signed-off-by: Andrey Konovalov <andreyk...@google.com>

Reviewed-by: Marco Elver <el...@google.com>

> ---
>  mm/kasan/common.c | 13 +++++++------
>  1 file changed, 7 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 7ffb1e6de2ef..7b53291dafa1 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -279,7 +279,8 @@ void __kasan_poison_object_data(struct kmem_cache *cache, 
> void *object)
>   *    based on objects indexes, so that objects that are next to each other
>   *    get different tags.
>   */
> -static u8 assign_tag(struct kmem_cache *cache, const void *object, bool init)
> +static inline u8 assign_tag(struct kmem_cache *cache,
> +                                     const void *object, bool init)
>  {
>       if (IS_ENABLED(CONFIG_KASAN_GENERIC))
>               return 0xff;
> @@ -321,8 +322,8 @@ void * __must_check __kasan_init_slab_obj(struct 
> kmem_cache *cache,
>       return (void *)object;
>  }
>  
> -static bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
> -                           unsigned long ip, bool quarantine)
> +static inline bool ____kasan_slab_free(struct kmem_cache *cache,
> +                             void *object, unsigned long ip, bool quarantine)
>  {
>       u8 tag;
>       void *tagged_object;
> @@ -366,7 +367,7 @@ bool __kasan_slab_free(struct kmem_cache *cache, void 
> *object, unsigned long ip)
>       return ____kasan_slab_free(cache, object, ip, true);
>  }
>  
> -static bool ____kasan_kfree_large(void *ptr, unsigned long ip)
> +static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
>  {
>       if (ptr != page_address(virt_to_head_page(ptr))) {
>               kasan_report_invalid_free(ptr, ip);
> @@ -461,8 +462,8 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache 
> *cache,
>       return tagged_object;
>  }
>  
> -static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
> -                                     size_t size, gfp_t flags)
> +static inline void *____kasan_kmalloc(struct kmem_cache *cache,
> +                             const void *object, size_t size, gfp_t flags)
>  {
>       unsigned long redzone_start;
>       unsigned long redzone_end;
> -- 
> 2.30.0.365.g02bc693789-goog
> 

Reply via email to