On Tue, Jun 21, 2016 at 03:29:49PM +0200, Vincent Guittot wrote:
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -692,6 +692,7 @@ void init_entity_runnable_average(struct
> >
> >  static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
> >  static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool 
> > update_freq);
> > +static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force);
> >  static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct 
> > sched_entity *se);
> >
> >  /*
> > @@ -757,7 +758,8 @@ void post_init_entity_util_avg(struct sc
> >                 }
> >         }
> >
> > -       update_cfs_rq_load_avg(now, cfs_rq, false);
> > +       if (update_cfs_rq_load_avg(now, cfs_rq, false))
> > +               update_tg_load_avg(cfs_rq, false);
> 
> You should move update_tg_load_avg after attach_entity_load_avg to
> take into account the newly attached task

Right you are, I've also updated the comment to reflect this.

Reply via email to