tree:   https://git.kernel.org/pub/scm/linux/kernel/git/josef/btrfs-next.git 
slab-priority
head:   c5c56bb8db68a328b5e55cab87b5e6306177e9b2
commit: c5c56bb8db68a328b5e55cab87b5e6306177e9b2 [1/1] mm: use sc->priority for 
slab shrink targets
config: i386-randconfig-x001-201746 (attached as .config)
compiler: gcc-6 (Debian 6.4.0-9) 6.4.0 20171026
reproduce:
        git checkout c5c56bb8db68a328b5e55cab87b5e6306177e9b2
        # save the attached .config to linux build tree
        make ARCH=i386 

All errors (new ones prefixed by >>):

   mm/vmscan.c: In function 'do_shrink_slab':
>> mm/vmscan.c:336:2: error: 'deleta' undeclared (first use in this function)
     deleta *= 4;
     ^~~~~~
   mm/vmscan.c:336:2: note: each undeclared identifier is reported only once 
for each function it appears in

vim +/deleta +336 mm/vmscan.c

   308  
   309  static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
   310                                      struct shrinker *shrinker, int 
priority)
   311  {
   312          unsigned long freed = 0;
   313          unsigned long long delta;
   314          long total_scan;
   315          long freeable;
   316          long nr;
   317          long new_nr;
   318          int nid = shrinkctl->nid;
   319          long batch_size = shrinker->batch ? shrinker->batch
   320                                            : SHRINK_BATCH;
   321          long scanned = 0, next_deferred;
   322  
   323          freeable = shrinker->count_objects(shrinker, shrinkctl);
   324          if (freeable == 0)
   325                  return 0;
   326  
   327          /*
   328           * copy the current shrinker scan count into a local variable
   329           * and zero it so that other concurrent shrinker invocations
   330           * don't also do this scanning work.
   331           */
   332          nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
   333  
   334          total_scan = nr;
   335          delta = freeable >> priority;
 > 336          deleta *= 4;
   337          do_div(delta, shrinker->seeks);
   338          total_scan += delta;
   339          if (total_scan < 0) {
   340                  pr_err("shrink_slab: %pF negative objects to delete 
nr=%ld\n",
   341                         shrinker->scan_objects, total_scan);
   342                  total_scan = freeable;
   343                  next_deferred = nr;
   344          } else
   345                  next_deferred = total_scan;
   346  
   347          /*
   348           * We need to avoid excessive windup on filesystem shrinkers
   349           * due to large numbers of GFP_NOFS allocations causing the
   350           * shrinkers to return -1 all the time. This results in a large
   351           * nr being built up so when a shrink that can do some work
   352           * comes along it empties the entire cache due to nr >>>
   353           * freeable. This is bad for sustaining a working set in
   354           * memory.
   355           *
   356           * Hence only allow the shrinker to scan the entire cache when
   357           * a large delta change is calculated directly.
   358           */
   359          if (delta < freeable / 4)
   360                  total_scan = min(total_scan, freeable / 2);
   361  
   362          /*
   363           * Avoid risking looping forever due to too large nr value:
   364           * never try to free more than twice the estimate number of
   365           * freeable entries.
   366           */
   367          if (total_scan > freeable * 2)
   368                  total_scan = freeable * 2;
   369  
   370          trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
   371                                     freeable, delta, total_scan, 
priority);
   372  
   373          /*
   374           * Normally, we should not scan less than batch_size objects in 
one
   375           * pass to avoid too frequent shrinker calls, but if the slab 
has less
   376           * than batch_size objects in total and we are really tight on 
memory,
   377           * we will try to reclaim all available objects, otherwise we 
can end
   378           * up failing allocations although there are plenty of 
reclaimable
   379           * objects spread over several slabs with usage less than the
   380           * batch_size.
   381           *
   382           * We detect the "tight on memory" situations by looking at the 
total
   383           * number of objects we want to scan (total_scan). If it is 
greater
   384           * than the total number of objects on slab (freeable), we must 
be
   385           * scanning at high prio and therefore should try to reclaim as 
much as
   386           * possible.
   387           */
   388          while (total_scan >= batch_size ||
   389                 total_scan >= freeable) {
   390                  unsigned long ret;
   391                  unsigned long nr_to_scan = min(batch_size, total_scan);
   392  
   393                  shrinkctl->nr_to_scan = nr_to_scan;
   394                  shrinkctl->nr_scanned = nr_to_scan;
   395                  ret = shrinker->scan_objects(shrinker, shrinkctl);
   396                  if (ret == SHRINK_STOP)
   397                          break;
   398                  freed += ret;
   399  
   400                  count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned);
   401                  total_scan -= shrinkctl->nr_scanned;
   402                  scanned += shrinkctl->nr_scanned;
   403  
   404                  cond_resched();
   405          }
   406  
   407          if (next_deferred >= scanned)
   408                  next_deferred -= scanned;
   409          else
   410                  next_deferred = 0;
   411          /*
   412           * move the unused scan count back into the shrinker in a
   413           * manner that handles concurrent updates. If we exhausted the
   414           * scan, there is no need to do an update.
   415           */
   416          if (next_deferred > 0)
   417                  new_nr = atomic_long_add_return(next_deferred,
   418                                                  
&shrinker->nr_deferred[nid]);
   419          else
   420                  new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
   421  
   422          trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, 
total_scan);
   423          return freed;
   424  }
   425  

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip

Reply via email to