group_asym_packing

On Tue, 1 Oct 2019 at 10:15, Dietmar Eggemann <dietmar.eggem...@arm.com> wrote:
>
> On 19/09/2019 09:33, Vincent Guittot wrote:
>
>
> [...]
>
> > @@ -8042,14 +8104,24 @@ static inline void update_sg_lb_stats(struct lb_env 
> > *env,
> >               }
> >       }
> >
> > -     /* Adjust by relative CPU capacity of the group */
> > +     /* Check if dst cpu is idle and preferred to this group */
> > +     if (env->sd->flags & SD_ASYM_PACKING &&
> > +         env->idle != CPU_NOT_IDLE &&
> > +         sgs->sum_h_nr_running &&
> > +         sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu)) {
> > +             sgs->group_asym_packing = 1;
> > +     }
> > +
>
> Since asym_packing check is done per-sg rather per-CPU (like misfit_task), 
> can you
> not check for asym_packing in group_classify() directly (like for overloaded) 
> and
> so get rid of struct sg_lb_stats::group_asym_packing?

asym_packing uses a lot of fields of env to set group_asym_packing but
env is not statistics and i'd like to remove env from
group_classify() argument so it will use only statistics.
At now, env is an arg of group_classify only for imbalance_pct field
and I have replaced env by imabalnce_pct in a patch that is under
preparation.
With this change, I can use group_classify outside load_balance()

>
> Something like (only compile tested).
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index d0c3aa1dc290..b2848d6f8a2a 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -7692,7 +7692,6 @@ struct sg_lb_stats {
>         unsigned int idle_cpus;
>         unsigned int group_weight;
>         enum group_type group_type;
> -       unsigned int group_asym_packing; /* Tasks should be moved to 
> preferred CPU */
>         unsigned long group_misfit_task_load; /* A CPU has a task too big for 
> its capacity */
>  #ifdef CONFIG_NUMA_BALANCING
>         unsigned int nr_numa_running;
> @@ -7952,6 +7951,20 @@ group_is_overloaded(struct lb_env *env, struct 
> sg_lb_stats *sgs)
>         return false;
>  }
>
> +static inline bool
> +group_has_asym_packing(struct lb_env *env, struct sched_group *sg,
> +                      struct sg_lb_stats *sgs)
> +{
> +       if (env->sd->flags & SD_ASYM_PACKING &&
> +           env->idle != CPU_NOT_IDLE &&
> +           sgs->sum_h_nr_running &&
> +           sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) {
> +               return true;
> +       }
> +
> +       return false;
> +}
> +
>  /*
>   * group_smaller_min_cpu_capacity: Returns true if sched_group sg has smaller
>   * per-CPU capacity than sched_group ref.
> @@ -7983,7 +7996,7 @@ group_type group_classify(struct lb_env *env,
>         if (sg_imbalanced(group))
>                 return group_imbalanced;
>
> -       if (sgs->group_asym_packing)
> +       if (group_has_asym_packing(env, group, sgs))
>                 return group_asym_packing;
>
>         if (sgs->group_misfit_task_load)
> @@ -8076,14 +8089,6 @@ static inline void update_sg_lb_stats(struct lb_env 
> *env,
>                 }
>         }
>
> -       /* Check if dst cpu is idle and preferred to this group */
> -       if (env->sd->flags & SD_ASYM_PACKING &&
> -           env->idle != CPU_NOT_IDLE &&
> -           sgs->sum_h_nr_running &&
> -           sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu)) {
> -               sgs->group_asym_packing = 1;
> -       }
> -
>         sgs->group_capacity = group->sgc->capacity;
>
> [...]

Reply via email to