Hi Minchan,

I love your patch! Yet something to improve:

[auto build test ERROR on block/for-next]
[also build test ERROR on linux/master linus/master v5.12-rc1 next-20210303]
[cannot apply to hnaz-linux-mm/master]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    
https://github.com/0day-ci/linux/commits/Minchan-Kim/mm-disable-LRU-pagevec-during-the-migration-temporarily/20210303-191809
base:   https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git 
for-next
config: powerpc64-randconfig-r006-20210303 (attached as .config)
compiler: clang version 13.0.0 (https://github.com/llvm/llvm-project 
a7cad6680b4087eff8994f1f99ac40c661a6621f)
reproduce (this is a W=1 build):
        wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
        chmod +x ~/bin/make.cross
        # install powerpc64 cross compiling tool for clang build
        # apt-get install binutils-powerpc64-linux-gnu
        # 
https://github.com/0day-ci/linux/commit/a1c74fba81d1258e320ef52bc995cb0333e3e083
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review 
Minchan-Kim/mm-disable-LRU-pagevec-during-the-migration-temporarily/20210303-191809
        git checkout a1c74fba81d1258e320ef52bc995cb0333e3e083
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross 
ARCH=powerpc64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <l...@intel.com>

All errors (new ones prefixed by >>):

>> mm/swap.c:671:2: error: implicit declaration of function 'invalidate_bh_lru' 
>> [-Werror,-Wimplicit-function-declaration]
           invalidate_bh_lru(NULL);
           ^
   mm/swap.c:671:2: note: did you mean 'invalidate_bdev'?
   include/linux/blkdev.h:2021:20: note: 'invalidate_bdev' declared here
   static inline void invalidate_bdev(struct block_device *bdev)
                      ^
>> mm/swap.c:860:7: error: implicit declaration of function 'has_bh_in_lru' 
>> [-Werror,-Wimplicit-function-declaration]
                       has_bh_in_lru(cpu, NULL)) {
                       ^
   2 errors generated.


vim +/invalidate_bh_lru +671 mm/swap.c

   634  
   635  /*
   636   * Drain pages out of the cpu's pagevecs.
   637   * Either "cpu" is the current CPU, and preemption has already been
   638   * disabled; or "cpu" is being hot-unplugged, and is already dead.
   639   */
   640  void lru_add_drain_cpu(int cpu)
   641  {
   642          struct pagevec *pvec = &per_cpu(lru_pvecs.lru_add, cpu);
   643  
   644          if (pagevec_count(pvec))
   645                  __pagevec_lru_add(pvec);
   646  
   647          pvec = &per_cpu(lru_rotate.pvec, cpu);
   648          /* Disabling interrupts below acts as a compiler barrier. */
   649          if (data_race(pagevec_count(pvec))) {
   650                  unsigned long flags;
   651  
   652                  /* No harm done if a racing interrupt already did this 
*/
   653                  local_lock_irqsave(&lru_rotate.lock, flags);
   654                  pagevec_lru_move_fn(pvec, pagevec_move_tail_fn);
   655                  local_unlock_irqrestore(&lru_rotate.lock, flags);
   656          }
   657  
   658          pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu);
   659          if (pagevec_count(pvec))
   660                  pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
   661  
   662          pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu);
   663          if (pagevec_count(pvec))
   664                  pagevec_lru_move_fn(pvec, lru_deactivate_fn);
   665  
   666          pvec = &per_cpu(lru_pvecs.lru_lazyfree, cpu);
   667          if (pagevec_count(pvec))
   668                  pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
   669  
   670          activate_page_drain(cpu);
 > 671          invalidate_bh_lru(NULL);
   672  }
   673  
   674  /**
   675   * deactivate_file_page - forcefully deactivate a file page
   676   * @page: page to deactivate
   677   *
   678   * This function hints the VM that @page is a good reclaim candidate,
   679   * for example if its invalidation fails due to the page being dirty
   680   * or under writeback.
   681   */
   682  void deactivate_file_page(struct page *page)
   683  {
   684          /*
   685           * In a workload with many unevictable page such as mprotect,
   686           * unevictable page deactivation for accelerating reclaim is 
pointless.
   687           */
   688          if (PageUnevictable(page))
   689                  return;
   690  
   691          if (likely(get_page_unless_zero(page))) {
   692                  struct pagevec *pvec;
   693  
   694                  local_lock(&lru_pvecs.lock);
   695                  pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
   696  
   697                  if (pagevec_add_and_need_flush(pvec, page))
   698                          pagevec_lru_move_fn(pvec, 
lru_deactivate_file_fn);
   699                  local_unlock(&lru_pvecs.lock);
   700          }
   701  }
   702  
   703  /*
   704   * deactivate_page - deactivate a page
   705   * @page: page to deactivate
   706   *
   707   * deactivate_page() moves @page to the inactive list if @page was on 
the active
   708   * list and was not an unevictable page.  This is done to accelerate 
the reclaim
   709   * of @page.
   710   */
   711  void deactivate_page(struct page *page)
   712  {
   713          if (PageLRU(page) && PageActive(page) && 
!PageUnevictable(page)) {
   714                  struct pagevec *pvec;
   715  
   716                  local_lock(&lru_pvecs.lock);
   717                  pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate);
   718                  get_page(page);
   719                  if (pagevec_add_and_need_flush(pvec, page))
   720                          pagevec_lru_move_fn(pvec, lru_deactivate_fn);
   721                  local_unlock(&lru_pvecs.lock);
   722          }
   723  }
   724  
   725  /**
   726   * mark_page_lazyfree - make an anon page lazyfree
   727   * @page: page to deactivate
   728   *
   729   * mark_page_lazyfree() moves @page to the inactive file list.
   730   * This is done to accelerate the reclaim of @page.
   731   */
   732  void mark_page_lazyfree(struct page *page)
   733  {
   734          if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
   735              !PageSwapCache(page) && !PageUnevictable(page)) {
   736                  struct pagevec *pvec;
   737  
   738                  local_lock(&lru_pvecs.lock);
   739                  pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree);
   740                  get_page(page);
   741                  if (pagevec_add_and_need_flush(pvec, page))
   742                          pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
   743                  local_unlock(&lru_pvecs.lock);
   744          }
   745  }
   746  
   747  void lru_add_drain(void)
   748  {
   749          local_lock(&lru_pvecs.lock);
   750          lru_add_drain_cpu(smp_processor_id());
   751          local_unlock(&lru_pvecs.lock);
   752  }
   753  
   754  void lru_add_drain_cpu_zone(struct zone *zone)
   755  {
   756          local_lock(&lru_pvecs.lock);
   757          lru_add_drain_cpu(smp_processor_id());
   758          drain_local_pages(zone);
   759          local_unlock(&lru_pvecs.lock);
   760  }
   761  
   762  #ifdef CONFIG_SMP
   763  
   764  static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
   765  
   766  static void lru_add_drain_per_cpu(struct work_struct *dummy)
   767  {
   768          lru_add_drain();
   769  }
   770  
   771  /*
   772   * Doesn't need any cpu hotplug locking because we do rely on per-cpu
   773   * kworkers being shut down before our page_alloc_cpu_dead callback is
   774   * executed on the offlined cpu.
   775   * Calling this function with cpu hotplug locks held can actually lead
   776   * to obscure indirect dependencies via WQ context.
   777   */
   778  void lru_add_drain_all(bool force_all_cpus)
   779  {
   780          /*
   781           * lru_drain_gen - Global pages generation number
   782           *
   783           * (A) Definition: global lru_drain_gen = x implies that all 
generations
   784           *     0 < n <= x are already *scheduled* for draining.
   785           *
   786           * This is an optimization for the highly-contended use case 
where a
   787           * user space workload keeps constantly generating a flow of 
pages for
   788           * each CPU.
   789           */
   790          static unsigned int lru_drain_gen;
   791          static struct cpumask has_work;
   792          static DEFINE_MUTEX(lock);
   793          unsigned cpu, this_gen;
   794  
   795          /*
   796           * Make sure nobody triggers this path before mm_percpu_wq is 
fully
   797           * initialized.
   798           */
   799          if (WARN_ON(!mm_percpu_wq))
   800                  return;
   801  
   802          /*
   803           * Guarantee pagevec counter stores visible by this CPU are 
visible to
   804           * other CPUs before loading the current drain generation.
   805           */
   806          smp_mb();
   807  
   808          /*
   809           * (B) Locally cache global LRU draining generation number
   810           *
   811           * The read barrier ensures that the counter is loaded before 
the mutex
   812           * is taken. It pairs with smp_mb() inside the mutex critical 
section
   813           * at (D).
   814           */
   815          this_gen = smp_load_acquire(&lru_drain_gen);
   816  
   817          mutex_lock(&lock);
   818  
   819          /*
   820           * (C) Exit the draining operation if a newer generation, from 
another
   821           * lru_add_drain_all(), was already scheduled for draining. 
Check (A).
   822           */
   823          if (unlikely(this_gen != lru_drain_gen))
   824                  goto done;
   825  
   826          /*
   827           * (D) Increment global generation number
   828           *
   829           * Pairs with smp_load_acquire() at (B), outside of the critical
   830           * section. Use a full memory barrier to guarantee that the new 
global
   831           * drain generation number is stored before loading pagevec 
counters.
   832           *
   833           * This pairing must be done here, before the 
for_each_online_cpu loop
   834           * below which drains the page vectors.
   835           *
   836           * Let x, y, and z represent some system CPU numbers, where x < 
y < z.
   837           * Assume CPU #z is is in the middle of the for_each_online_cpu 
loop
   838           * below and has already reached CPU #y's per-cpu data. CPU #x 
comes
   839           * along, adds some pages to its per-cpu vectors, then calls
   840           * lru_add_drain_all().
   841           *
   842           * If the paired barrier is done at any later step, e.g. after 
the
   843           * loop, CPU #x will just exit at (C) and miss flushing out all 
of its
   844           * added pages.
   845           */
   846          WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1);
   847          smp_mb();
   848  
   849          cpumask_clear(&has_work);
   850          for_each_online_cpu(cpu) {
   851                  struct work_struct *work = &per_cpu(lru_add_drain_work, 
cpu);
   852  
   853                  if (force_all_cpus ||
   854                      pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
   855                      data_race(pagevec_count(&per_cpu(lru_rotate.pvec, 
cpu))) ||
   856                      
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
   857                      pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, 
cpu)) ||
   858                      pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, 
cpu)) ||
   859                      need_activate_page_drain(cpu) ||
 > 860                      has_bh_in_lru(cpu, NULL)) {
   861                          INIT_WORK(work, lru_add_drain_per_cpu);
   862                          queue_work_on(cpu, mm_percpu_wq, work);
   863                          __cpumask_set_cpu(cpu, &has_work);
   864                  }
   865          }
   866  
   867          for_each_cpu(cpu, &has_work)
   868                  flush_work(&per_cpu(lru_add_drain_work, cpu));
   869  
   870  done:
   871          mutex_unlock(&lock);
   872  }
   873  #else
   874  void lru_add_drain_all(void)
   875  {
   876          lru_add_drain();
   877  }
   878  #endif /* CONFIG_SMP */
   879  

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-...@lists.01.org

Attachment: .config.gz
Description: application/gzip

Reply via email to