Hi,

[auto build test WARNING on v4.7-rc2]
[cannot apply to next-20160608]
[if your patch is applied to the wrong git tree, please drop us a note to help 
improve the system]

url:    
https://github.com/0day-ci/linux/commits/Alexander-Potapenko/mm-kasan-switch-SLUB-to-stackdepot-enable-memory-quarantine-for-SLUB/20160609-024216
config: x86_64-allmodconfig (attached as .config)
compiler: gcc-6 (Debian 6.1.1-1) 6.1.1 20160430
reproduce:
        # save the attached .config to linux build tree
        make ARCH=x86_64 

All warnings (new ones prefixed by >>):

   mm/kasan/kasan.c: In function 'kasan_cache_create':
>> mm/kasan/kasan.c:374:22: warning: unused variable 'orig_size' 
>> [-Wunused-variable]
     int redzone_adjust, orig_size = *size;
                         ^~~~~~~~~
   mm/kasan/kasan.c: In function 'kasan_slab_free':
>> mm/kasan/kasan.c:561:4: warning: 'return' with no value, in function 
>> returning non-void [-Wreturn-type]
       return;
       ^~~~~~
   mm/kasan/kasan.c:547:6: note: declared here
    bool kasan_slab_free(struct kmem_cache *cache, void *object)
         ^~~~~~~~~~~~~~~

vim +/orig_size +374 mm/kasan/kasan.c

   368          return rz;
   369  }
   370  
   371  void kasan_cache_create(struct kmem_cache *cache, size_t *size,
   372                          unsigned long *flags)
   373  {
 > 374          int redzone_adjust, orig_size = *size;
   375  
   376  #ifdef CONFIG_SLAB
   377          /*
   378           * Make sure the adjusted size is still less than
   379           * KMALLOC_MAX_CACHE_SIZE, i.e. we don't use the page allocator.
   380           */
   381  
   382          if (*size > KMALLOC_MAX_CACHE_SIZE -
   383              sizeof(struct kasan_alloc_meta) -
   384              sizeof(struct kasan_free_meta))
   385                  return;
   386  #endif
   387          *flags |= SLAB_KASAN;
   388  
   389          /* Add alloc meta. */
   390          cache->kasan_info.alloc_meta_offset = *size;
   391          *size += sizeof(struct kasan_alloc_meta);
   392  
   393          /* Add free meta. */
   394          if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
   395              cache->object_size < sizeof(struct kasan_free_meta)) {
   396                  cache->kasan_info.free_meta_offset = *size;
   397                  *size += sizeof(struct kasan_free_meta);
   398          } else {
   399                  cache->kasan_info.free_meta_offset = 0;
   400          }
   401          redzone_adjust = optimal_redzone(cache->object_size) -
   402                  (*size - cache->object_size);
   403  
   404          if (redzone_adjust > 0)
   405                  *size += redzone_adjust;
   406  
   407  #ifdef CONFIG_SLAB
   408          *size = min(KMALLOC_MAX_CACHE_SIZE,
   409                      max(*size,
   410                          cache->object_size +
   411                          optimal_redzone(cache->object_size)));
   412          /*
   413           * If the metadata doesn't fit, disable KASAN at all.
   414           */
   415          if (*size <= cache->kasan_info.alloc_meta_offset ||
   416                          *size <= cache->kasan_info.free_meta_offset) {
   417                  *flags &= ~SLAB_KASAN;
   418                  *size = orig_size;
   419                  cache->kasan_info.alloc_meta_offset = -1;
   420                  cache->kasan_info.free_meta_offset = -1;
   421          }
   422  #else
   423          *size = max(*size,
   424                          cache->object_size +
   425                          optimal_redzone(cache->object_size));
   426  
   427  #endif
   428  }
   429  
   430  void kasan_cache_shrink(struct kmem_cache *cache)
   431  {
   432          quarantine_remove_cache(cache);
   433  }
   434  
   435  void kasan_cache_destroy(struct kmem_cache *cache)
   436  {
   437          quarantine_remove_cache(cache);
   438  }
   439  
   440  void kasan_poison_slab(struct page *page)
   441  {
   442          kasan_poison_shadow(page_address(page),
   443                          PAGE_SIZE << compound_order(page),
   444                          KASAN_KMALLOC_REDZONE);
   445  }
   446  
   447  void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
   448  {
   449          kasan_unpoison_shadow(object, cache->object_size);
   450  }
   451  
   452  void kasan_poison_object_data(struct kmem_cache *cache, void *object)
   453  {
   454          kasan_poison_shadow(object,
   455                          round_up(cache->object_size, 
KASAN_SHADOW_SCALE_SIZE),
   456                          KASAN_KMALLOC_REDZONE);
   457          if (cache->flags & SLAB_KASAN) {
   458                  struct kasan_alloc_meta *alloc_info =
   459                          get_alloc_info(cache, object);
   460                  if (alloc_info)
   461                          alloc_info->state = KASAN_STATE_INIT;
   462          }
   463  }
   464  
   465  static inline int in_irqentry_text(unsigned long ptr)
   466  {
   467          return (ptr >= (unsigned long)&__irqentry_text_start &&
   468                  ptr < (unsigned long)&__irqentry_text_end) ||
   469                  (ptr >= (unsigned long)&__softirqentry_text_start &&
   470                   ptr < (unsigned long)&__softirqentry_text_end);
   471  }
   472  
   473  static inline void filter_irq_stacks(struct stack_trace *trace)
   474  {
   475          int i;
   476  
   477          if (!trace->nr_entries)
   478                  return;
   479          for (i = 0; i < trace->nr_entries; i++)
   480                  if (in_irqentry_text(trace->entries[i])) {
   481                          /* Include the irqentry function into the 
stack. */
   482                          trace->nr_entries = i + 1;
   483                          break;
   484                  }
   485  }
   486  
   487  static inline depot_stack_handle_t save_stack(gfp_t flags)
   488  {
   489          unsigned long entries[KASAN_STACK_DEPTH];
   490          struct stack_trace trace = {
   491                  .nr_entries = 0,
   492                  .entries = entries,
   493                  .max_entries = KASAN_STACK_DEPTH,
   494                  .skip = 0
   495          };
   496  
   497          save_stack_trace(&trace);
   498          filter_irq_stacks(&trace);
   499          if (trace.nr_entries != 0 &&
   500              trace.entries[trace.nr_entries-1] == ULONG_MAX)
   501                  trace.nr_entries--;
   502  
   503          return depot_save_stack(&trace, flags);
   504  }
   505  
   506  static inline void set_track(struct kasan_track *track, gfp_t flags)
   507  {
   508          track->pid = current->pid;
   509          track->stack = save_stack(flags);
   510  }
   511  
   512  struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
   513                                          const void *object)
   514  {
   515          BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
   516          if (cache->kasan_info.alloc_meta_offset == -1)
   517                  return NULL;
   518          return (void *)object + cache->kasan_info.alloc_meta_offset;
   519  }
   520  
   521  struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
   522                                        const void *object)
   523  {
   524          BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
   525          if (cache->kasan_info.free_meta_offset == -1)
   526                  return NULL;
   527          return (void *)object + cache->kasan_info.free_meta_offset;
   528  }
   529  
   530  void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t 
flags)
   531  {
   532          kasan_kmalloc(cache, object, cache->object_size, flags);
   533  }
   534  
   535  void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
   536  {
   537          unsigned long size = cache->object_size;
   538          unsigned long rounded_up_size = round_up(size, 
KASAN_SHADOW_SCALE_SIZE);
   539  
   540          /* RCU slabs could be legally used after free within the RCU 
period */
   541          if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
   542                  return;
   543  
   544          kasan_poison_shadow(object, rounded_up_size, 
KASAN_KMALLOC_FREE);
   545  }
   546  
   547  bool kasan_slab_free(struct kmem_cache *cache, void *object)
   548  {
   549          /* RCU slabs could be legally used after free within the RCU 
period */
   550          if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
   551                  return false;
   552  
   553          if (likely(cache->flags & SLAB_KASAN)) {
   554                  struct kasan_alloc_meta *alloc_info =
   555                          get_alloc_info(cache, object);
   556                  struct kasan_free_meta *free_info =
   557                          get_free_info(cache, object);
   558                  WARN_ON(!alloc_info);
   559                  WARN_ON(!free_info);
   560                  if (!alloc_info || !free_info)
 > 561                          return;
   562                  switch (alloc_info->state) {
   563                  case KASAN_STATE_ALLOC:
   564                          alloc_info->state = KASAN_STATE_QUARANTINE;

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: Binary data

Reply via email to