Hi Minchan,

I love your patch! Perhaps something to improve:

[auto build test WARNING on linux/master]
[also build test WARNING on linus/master v5.12-rc2 next-20210309]
[cannot apply to hnaz-linux-mm/master]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    
https://github.com/0day-ci/linux/commits/Minchan-Kim/mm-disable-LRU-pagevec-during-the-migration-temporarily/20210309-131826
base:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 
144c79ef33536b4ecb4951e07dbc1f2b7fa99d32
config: arm64-randconfig-r023-20210308 (attached as .config)
compiler: clang version 13.0.0 (https://github.com/llvm/llvm-project 
820f508b08d7c94b2dd7847e9710d2bc36d3dd45)
reproduce (this is a W=1 build):
        wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
        chmod +x ~/bin/make.cross
        # install arm64 cross compiling tool for clang build
        # apt-get install binutils-aarch64-linux-gnu
        # 
https://github.com/0day-ci/linux/commit/e746db1a2ab13441890fa2cad8604bbec190b401
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review 
Minchan-Kim/mm-disable-LRU-pagevec-during-the-migration-temporarily/20210309-131826
        git checkout e746db1a2ab13441890fa2cad8604bbec190b401
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=arm64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <l...@intel.com>

All warnings (new ones prefixed by >>):

   mm/swap.c:244:4: error: implicit declaration of function 
'lru_cache_disabled' [-Werror,-Wimplicit-function-declaration]
                           lru_cache_disabled())
                           ^
   mm/swap.c:244:4: note: did you mean 'lru_cache_disable'?
   include/linux/swap.h:342:13: note: 'lru_cache_disable' declared here
   extern void lru_cache_disable(void);
               ^
>> mm/swap.c:743:6: warning: no previous prototype for function 
>> '__lru_add_drain_all' [-Wmissing-prototypes]
   void __lru_add_drain_all(bool force_all_cpus)
        ^
   mm/swap.c:743:1: note: declare 'static' if the function is not intended to 
be used outside of this translation unit
   void __lru_add_drain_all(bool force_all_cpus)
   ^
   static 
   mm/swap.c:858:6: error: conflicting types for 'lru_cache_disabled'
   bool lru_cache_disabled(void)
        ^
   mm/swap.c:244:4: note: previous implicit declaration is here
                           lru_cache_disabled())
                           ^
   1 warning and 2 errors generated.


vim +/__lru_add_drain_all +743 mm/swap.c

   742  
 > 743  void __lru_add_drain_all(bool force_all_cpus)
   744  {
   745          /*
   746           * lru_drain_gen - Global pages generation number
   747           *
   748           * (A) Definition: global lru_drain_gen = x implies that all 
generations
   749           *     0 < n <= x are already *scheduled* for draining.
   750           *
   751           * This is an optimization for the highly-contended use case 
where a
   752           * user space workload keeps constantly generating a flow of 
pages for
   753           * each CPU.
   754           */
   755          static unsigned int lru_drain_gen;
   756          static struct cpumask has_work;
   757          static DEFINE_MUTEX(lock);
   758          unsigned cpu, this_gen;
   759  
   760          /*
   761           * Make sure nobody triggers this path before mm_percpu_wq is 
fully
   762           * initialized.
   763           */
   764          if (WARN_ON(!mm_percpu_wq))
   765                  return;
   766  
   767          /*
   768           * Guarantee pagevec counter stores visible by this CPU are 
visible to
   769           * other CPUs before loading the current drain generation.
   770           */
   771          smp_mb();
   772  
   773          /*
   774           * (B) Locally cache global LRU draining generation number
   775           *
   776           * The read barrier ensures that the counter is loaded before 
the mutex
   777           * is taken. It pairs with smp_mb() inside the mutex critical 
section
   778           * at (D).
   779           */
   780          this_gen = smp_load_acquire(&lru_drain_gen);
   781  
   782          mutex_lock(&lock);
   783  
   784          /*
   785           * (C) Exit the draining operation if a newer generation, from 
another
   786           * lru_add_drain_all(), was already scheduled for draining. 
Check (A).
   787           */
   788          if (unlikely(this_gen != lru_drain_gen && !force_all_cpus))
   789                  goto done;
   790  
   791          /*
   792           * (D) Increment global generation number
   793           *
   794           * Pairs with smp_load_acquire() at (B), outside of the critical
   795           * section. Use a full memory barrier to guarantee that the new 
global
   796           * drain generation number is stored before loading pagevec 
counters.
   797           *
   798           * This pairing must be done here, before the 
for_each_online_cpu loop
   799           * below which drains the page vectors.
   800           *
   801           * Let x, y, and z represent some system CPU numbers, where x < 
y < z.
   802           * Assume CPU #z is is in the middle of the for_each_online_cpu 
loop
   803           * below and has already reached CPU #y's per-cpu data. CPU #x 
comes
   804           * along, adds some pages to its per-cpu vectors, then calls
   805           * lru_add_drain_all().
   806           *
   807           * If the paired barrier is done at any later step, e.g. after 
the
   808           * loop, CPU #x will just exit at (C) and miss flushing out all 
of its
   809           * added pages.
   810           */
   811          WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1);
   812          smp_mb();
   813  
   814          cpumask_clear(&has_work);
   815          for_each_online_cpu(cpu) {
   816                  struct work_struct *work = &per_cpu(lru_add_drain_work, 
cpu);
   817  
   818                  if (force_all_cpus ||
   819                      pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
   820                      data_race(pagevec_count(&per_cpu(lru_rotate.pvec, 
cpu))) ||
   821                      
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
   822                      pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, 
cpu)) ||
   823                      pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, 
cpu)) ||
   824                      need_activate_page_drain(cpu)) {
   825                          INIT_WORK(work, lru_add_drain_per_cpu);
   826                          queue_work_on(cpu, mm_percpu_wq, work);
   827                          __cpumask_set_cpu(cpu, &has_work);
   828                  }
   829          }
   830  
   831          for_each_cpu(cpu, &has_work)
   832                  flush_work(&per_cpu(lru_add_drain_work, cpu));
   833  
   834  done:
   835          mutex_unlock(&lock);
   836  }
   837  

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-...@lists.01.org

Attachment: .config.gz
Description: application/gzip

Reply via email to