Hi Patrick,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on tip/sched/core]
[also build test ERROR on next-20180604]
[cannot apply to v4.17]
[if your patch is applied to the wrong git tree, please drop us a note to help 
improve the system]

url:    
https://github.com/0day-ci/linux/commits/Patrick-Bellasi/sched-fair-pelt-use-u32-for-util_avg/20180605-082640
config: i386-randconfig-a1-06041847 (attached as .config)
compiler: gcc-4.9 (Debian 4.9.4-2) 4.9.4
reproduce:
        # save the attached .config to linux build tree
        make ARCH=i386 

All errors (new ones prefixed by >>):

   kernel/sched/fair.c: In function 'enqueue_task_fair':
>> kernel/sched/fair.c:5450:2: error: implicit declaration of function 
>> 'util_est_enqueue_running' [-Werror=implicit-function-declaration]
     util_est_enqueue_running(p);
     ^
   cc1: some warnings being treated as errors

vim +/util_est_enqueue_running +5450 kernel/sched/fair.c

  5389  
  5390  /*
  5391   * The enqueue_task method is called before nr_running is
  5392   * increased. Here we update the fair scheduling stats and
  5393   * then put the task into the rbtree:
  5394   */
  5395  static void
  5396  enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
  5397  {
  5398          struct cfs_rq *cfs_rq;
  5399          struct sched_entity *se = &p->se;
  5400  
  5401          /*
  5402           * The code below (indirectly) updates schedutil which looks at
  5403           * the cfs_rq utilization to select a frequency.
  5404           * Let's add the task's estimated utilization to the cfs_rq's
  5405           * estimated utilization, before we update schedutil.
  5406           */
  5407          util_est_enqueue(&rq->cfs, p);
  5408  
  5409          /*
  5410           * If in_iowait is set, the code below may not trigger any 
cpufreq
  5411           * utilization updates, so do it here explicitly with the 
IOWAIT flag
  5412           * passed.
  5413           */
  5414          if (p->in_iowait)
  5415                  cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
  5416  
  5417          for_each_sched_entity(se) {
  5418                  if (se->on_rq)
  5419                          break;
  5420                  cfs_rq = cfs_rq_of(se);
  5421                  enqueue_entity(cfs_rq, se, flags);
  5422  
  5423                  /*
  5424                   * end evaluation on encountering a throttled cfs_rq
  5425                   *
  5426                   * note: in the case of encountering a throttled cfs_rq 
we will
  5427                   * post the final h_nr_running increment below.
  5428                   */
  5429                  if (cfs_rq_throttled(cfs_rq))
  5430                          break;
  5431                  cfs_rq->h_nr_running++;
  5432  
  5433                  flags = ENQUEUE_WAKEUP;
  5434          }
  5435  
  5436          for_each_sched_entity(se) {
  5437                  cfs_rq = cfs_rq_of(se);
  5438                  cfs_rq->h_nr_running++;
  5439  
  5440                  if (cfs_rq_throttled(cfs_rq))
  5441                          break;
  5442  
  5443                  update_load_avg(cfs_rq, se, UPDATE_TG);
  5444                  update_cfs_group(se);
  5445          }
  5446  
  5447          if (!se)
  5448                  add_nr_running(rq, 1);
  5449  
> 5450          util_est_enqueue_running(p);
  5451  
  5452          hrtick_update(rq);
  5453  }
  5454  

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip

Reply via email to