On 10/26/18 6:11 PM, Vincent Guittot wrote:

[...]

  static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
  static unsigned long task_h_load(struct task_struct *p);
@@ -764,7 +763,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
                         * such that the next switched_to_fair() has the
                         * expected state.
                         */
-                       se->avg.last_update_time = cfs_rq_clock_task(cfs_rq);
+                       se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
                        return;
                }
        }

There is this 1/cpu scaling of se->avg.util_sum (running_sum) in update_tg_cfs_runnable() so it can be used to calculate se->avg.runnable_load_sum (runnable_sum). I guess with your approach this should be removed.

@@ -3466,7 +3465,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, 
struct sched_entity *s
  /* Update task and its cfs_rq load average */
  static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity 
*se, int flags)
  {
-       u64 now = cfs_rq_clock_task(cfs_rq);
+       u64 now = cfs_rq_clock_pelt(cfs_rq);
        struct rq *rq = rq_of(cfs_rq);
        int cpu = cpu_of(rq);
        int decayed;
@@ -6694,6 +6693,12 @@ done: __maybe_unused;
        if (new_tasks > 0)
                goto again;
+ /*
+        * rq is about to be idle, check if we need to update the
+        * lost_idle_time of clock_pelt
+        */
+       update_idle_rq_clock_pelt(rq);
+
        return NULL;
  }

[...]

Reply via email to