Hi,

On Thu, Aug 13, 2015 at 02:55:55PM +0900, byungchul.p...@lge.com wrote:
> +static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct 
> sched_entity *se)
> +{
> +     se->avg.last_update_time = cfs_rq->avg.last_update_time;
> +     cfs_rq->avg.load_avg += se->avg.load_avg;
> +     cfs_rq->avg.load_sum += se->avg.load_sum;
> +     cfs_rq->avg.util_avg += se->avg.util_avg;
> +     cfs_rq->avg.util_sum += se->avg.util_sum;
> +}

I see this function is used in enqueue_entity_load_avg() also.
In tip tree code, enqueue_entity_load_avg() uses cfs_rq_clock_task()
as se->last_update_time in migration condition.
Here, you use cfs_rq->avg.last_update_time as se->last_update_time.
If se->last_update_time is different, next decay may be different too.
Just from inspecting code, maybe some reasonable there?

Thanks
> +static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct 
> sched_entity *se)
> +{
> +     __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
> +                     &se->avg, se->on_rq * scale_load_down(se->load.weight),
> +                     cfs_rq->curr == se, NULL);
> +
> +     cfs_rq->avg.load_avg =
> +             max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
> +     cfs_rq->avg.load_sum =
> +             max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
> +     cfs_rq->avg.util_avg =
> +             max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
> +     cfs_rq->avg.util_sum =
> +             max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
> +}
> +
>  /* Add the load generated by se into cfs_rq's load average */
>  static inline void
>  enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
> @@ -2717,27 +2742,20 @@ enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct 
> sched_entity *se)
>       u64 now = cfs_rq_clock_task(cfs_rq);
>       int migrated = 0, decayed;
>  
> -     if (sa->last_update_time == 0) {
> -             sa->last_update_time = now;
> +     if (sa->last_update_time == 0)
>               migrated = 1;
> -     }
> -     else {
> +     else
>               __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
> -                     se->on_rq * scale_load_down(se->load.weight),
> -                     cfs_rq->curr == se, NULL);
> -     }
> +                             se->on_rq * scale_load_down(se->load.weight),
> +                             cfs_rq->curr == se, NULL);
>  
>       decayed = update_cfs_rq_load_avg(now, cfs_rq);
>  
>       cfs_rq->runnable_load_avg += sa->load_avg;
>       cfs_rq->runnable_load_sum += sa->load_sum;
>  
> -     if (migrated) {
> -             cfs_rq->avg.load_avg += sa->load_avg;
> -             cfs_rq->avg.load_sum += sa->load_sum;
> -             cfs_rq->avg.util_avg += sa->util_avg;
> -             cfs_rq->avg.util_sum += sa->util_sum;
> -     }
> +     if (migrated)
> +             attach_entity_load_avg(cfs_rq, se);
>  
>       if (decayed || migrated)
>               update_tg_load_avg(cfs_rq, 0);
> @@ -7911,17 +7929,7 @@ static void switched_from_fair(struct rq *rq, struct 
> task_struct *p)
>  
>  #ifdef CONFIG_SMP
>       /* Catch up with the cfs_rq and remove our load when we leave */
> -     __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq), &se->avg,
> -             se->on_rq * scale_load_down(se->load.weight), cfs_rq->curr == 
> se, NULL);
> -
> -     cfs_rq->avg.load_avg =
> -             max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
> -     cfs_rq->avg.load_sum =
> -             max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
> -     cfs_rq->avg.util_avg =
> -             max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
> -     cfs_rq->avg.util_sum =
> -             max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
> +     detach_entity_load_avg(cfs_rq, se);
>  #endif
>  }
>  
> @@ -7938,6 +7946,11 @@ static void switched_to_fair(struct rq *rq, struct 
> task_struct *p)
>        */
>       se->depth = se->parent ? se->parent->depth + 1 : 0;
>  #endif
> +
> +#ifdef CONFIG_SMP
> +     /* synchronize task with its cfs_rq */
> +     attach_entity_load_avg(cfs_rq_of(&p->se), &p->se);
> +#endif
>       if (!task_on_rq_queued(p))
>               return;
>  
> @@ -8023,16 +8036,7 @@ static void task_move_group_fair(struct task_struct 
> *p, int queued)
>  
>  #ifdef CONFIG_SMP
>       /* synchronize task with its prev cfs_rq */
> -     if (!queued)
> -             __update_load_avg(cfs_rq->avg.last_update_time, 
> cpu_of(rq_of(cfs_rq)),
> -                             &se->avg, se->on_rq * 
> scale_load_down(se->load.weight),
> -                             cfs_rq->curr == se, NULL);
> -
> -     /* remove our load when we leave */
> -     cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - 
> se->avg.load_avg, 0);
> -     cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - 
> se->avg.load_sum, 0);
> -     cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - 
> se->avg.util_avg, 0);
> -     cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - 
> se->avg.util_sum, 0);
> +     detach_entity_load_avg(cfs_rq, se);
>  #endif
>       set_task_rq(p, task_cpu(p));
>       se->depth = se->parent ? se->parent->depth + 1 : 0;
> @@ -8042,11 +8046,7 @@ static void task_move_group_fair(struct task_struct 
> *p, int queued)
>  
>  #ifdef CONFIG_SMP
>       /* Virtually synchronize task with its new cfs_rq */
> -     p->se.avg.last_update_time = cfs_rq->avg.last_update_time;
> -     cfs_rq->avg.load_avg += p->se.avg.load_avg;
> -     cfs_rq->avg.load_sum += p->se.avg.load_sum;
> -     cfs_rq->avg.util_avg += p->se.avg.util_avg;
> -     cfs_rq->avg.util_sum += p->se.avg.util_sum;
> +     attach_entity_load_avg(cfs_rq, se);
>  #endif
>  }

-- 
Tao
--
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to