tree:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 
master
head:   dd0d718152e4c65b173070d48ea9dfc06894c3e5
commit: 0b0695f2b34a4afa3f6e9aa1ff0e5336d8dad912 sched/fair: Rework 
load_balance()
date:   8 months ago
config: x86_64-randconfig-r011-20200623 (attached as .config)
compiler: gcc-5 (Ubuntu 5.5.0-12ubuntu1) 5.5.0 20171010
reproduce (this is a W=1 build):
        git checkout 0b0695f2b34a4afa3f6e9aa1ff0e5336d8dad912
        # save the attached .config to linux build tree
        make W=1 ARCH=x86_64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <l...@intel.com>

All warnings (new ones prefixed by >>):

   In file included from arch/x86/include/asm/percpu.h:45:0,
                    from arch/x86/include/asm/current.h:6,
                    from include/linux/sched.h:12,
                    from kernel//sched/sched.h:5,
                    from kernel//sched/fair.c:23:
   kernel//sched/fair.c: In function 'calculate_imbalance':
   include/linux/kernel.h:942:63: warning: comparison is always false due to 
limited range of data type [-Wtype-limits]
    #define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
                                                                  ^
   include/linux/kernel.h:858:30: note: in definition of macro '__cmp'
    #define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
                                 ^
   include/linux/kernel.h:942:27: note: in expansion of macro '__careful_cmp'
    #define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
                              ^
>> kernel//sched/fair.c:8429:20: note: in expansion of macro 'max_t'
      env->imbalance = max_t(long, 0, (local->idle_cpus -
                       ^

vim +/max_t +8429 kernel//sched/fair.c

  8336  
  8337  /**
  8338   * calculate_imbalance - Calculate the amount of imbalance present 
within the
  8339   *                       groups of a given sched_domain during load 
balance.
  8340   * @env: load balance environment
  8341   * @sds: statistics of the sched_domain whose imbalance is to be 
calculated.
  8342   */
  8343  static inline void calculate_imbalance(struct lb_env *env, struct 
sd_lb_stats *sds)
  8344  {
  8345          struct sg_lb_stats *local, *busiest;
  8346  
  8347          local = &sds->local_stat;
  8348          busiest = &sds->busiest_stat;
  8349  
  8350          if (busiest->group_type == group_misfit_task) {
  8351                  /* Set imbalance to allow misfit tasks to be balanced. 
*/
  8352                  env->migration_type = migrate_misfit;
  8353                  env->imbalance = busiest->group_misfit_task_load;
  8354                  return;
  8355          }
  8356  
  8357          if (busiest->group_type == group_asym_packing) {
  8358                  /*
  8359                   * In case of asym capacity, we will try to migrate all 
load to
  8360                   * the preferred CPU.
  8361                   */
  8362                  env->migration_type = migrate_task;
  8363                  env->imbalance = busiest->sum_h_nr_running;
  8364                  return;
  8365          }
  8366  
  8367          if (busiest->group_type == group_imbalanced) {
  8368                  /*
  8369                   * In the group_imb case we cannot rely on group-wide 
averages
  8370                   * to ensure CPU-load equilibrium, try to move any task 
to fix
  8371                   * the imbalance. The next load balance will take care 
of
  8372                   * balancing back the system.
  8373                   */
  8374                  env->migration_type = migrate_task;
  8375                  env->imbalance = 1;
  8376                  return;
  8377          }
  8378  
  8379          /*
  8380           * Try to use spare capacity of local group without overloading 
it or
  8381           * emptying busiest
  8382           */
  8383          if (local->group_type == group_has_spare) {
  8384                  if (busiest->group_type > group_fully_busy) {
  8385                          /*
  8386                           * If busiest is overloaded, try to fill spare
  8387                           * capacity. This might end up creating spare 
capacity
  8388                           * in busiest or busiest still being overloaded 
but
  8389                           * there is no simple way to directly compute 
the
  8390                           * amount of load to migrate in order to 
balance the
  8391                           * system.
  8392                           */
  8393                          env->migration_type = migrate_util;
  8394                          env->imbalance = max(local->group_capacity, 
local->group_util) -
  8395                                           local->group_util;
  8396  
  8397                          /*
  8398                           * In some cases, the group's utilization is 
max or even
  8399                           * higher than capacity because of migrations 
but the
  8400                           * local CPU is (newly) idle. There is at least 
one
  8401                           * waiting task in this overloaded busiest 
group. Let's
  8402                           * try to pull it.
  8403                           */
  8404                          if (env->idle != CPU_NOT_IDLE && env->imbalance 
== 0) {
  8405                                  env->migration_type = migrate_task;
  8406                                  env->imbalance = 1;
  8407                          }
  8408  
  8409                          return;
  8410                  }
  8411  
  8412                  if (busiest->group_weight == 1 || sds->prefer_sibling) {
  8413                          unsigned int nr_diff = 
busiest->sum_h_nr_running;
  8414                          /*
  8415                           * When prefer sibling, evenly spread running 
tasks on
  8416                           * groups.
  8417                           */
  8418                          env->migration_type = migrate_task;
  8419                          lsub_positive(&nr_diff, 
local->sum_h_nr_running);
  8420                          env->imbalance = nr_diff >> 1;
  8421                          return;
  8422                  }
  8423  
  8424                  /*
  8425                   * If there is no overload, we just want to even the 
number of
  8426                   * idle cpus.
  8427                   */
  8428                  env->migration_type = migrate_task;
> 8429                  env->imbalance = max_t(long, 0, (local->idle_cpus -
  8430                                                   busiest->idle_cpus) >> 
1);
  8431                  return;
  8432          }
  8433  
  8434          /*
  8435           * Local is fully busy but has to take more load to relieve the
  8436           * busiest group
  8437           */
  8438          if (local->group_type < group_overloaded) {
  8439                  /*
  8440                   * Local will become overloaded so the avg_load metrics 
are
  8441                   * finally needed.
  8442                   */
  8443  
  8444                  local->avg_load = (local->group_load * 
SCHED_CAPACITY_SCALE) /
  8445                                    local->group_capacity;
  8446  
  8447                  sds->avg_load = (sds->total_load * 
SCHED_CAPACITY_SCALE) /
  8448                                  sds->total_capacity;
  8449          }
  8450  
  8451          /*
  8452           * Both group are or will become overloaded and we're trying to 
get all
  8453           * the CPUs to the average_load, so we don't want to push 
ourselves
  8454           * above the average load, nor do we wish to reduce the max 
loaded CPU
  8455           * below the average load. At the same time, we also don't want 
to
  8456           * reduce the group load below the group capacity. Thus we look 
for
  8457           * the minimum possible imbalance.
  8458           */
  8459          env->migration_type = migrate_load;
  8460          env->imbalance = min(
  8461                  (busiest->avg_load - sds->avg_load) * 
busiest->group_capacity,
  8462                  (sds->avg_load - local->avg_load) * 
local->group_capacity
  8463          ) / SCHED_CAPACITY_SCALE;
  8464  }
  8465  

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-...@lists.01.org

Attachment: .config.gz
Description: application/gzip

Reply via email to