On Tue, Dec 05, 2017 at 05:10:16PM +0000, Patrick Bellasi wrote:
> @@ -562,6 +577,12 @@ struct task_struct {
>  
>       const struct sched_class        *sched_class;
>       struct sched_entity             se;
> +     /*
> +      * Since we use se.avg.util_avg to update util_est fields,
> +      * this last can benefit from being close to se which
> +      * also defines se.avg as cache aligned.
> +      */
> +     struct util_est                 util_est;
>       struct sched_rt_entity          rt;
>  #ifdef CONFIG_CGROUP_SCHED
>       struct task_group               *sched_task_group;


> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index b19552a212de..8371839075fa 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -444,6 +444,7 @@ struct cfs_rq {
>        * CFS load tracking
>        */
>       struct sched_avg avg;
> +     unsigned long util_est_runnable;
>  #ifndef CONFIG_64BIT
>       u64 load_last_update_time_copy;
>  #endif


So you put the util_est in task_struct (not sched_entity) but the
util_est_runnable in cfs_rq (not rq). Seems inconsistent.

Reply via email to