At present scheduler resets task's wait start timestamp when the task
migrates to another rq.  This misleads scheduler itself into reporting
less wait time than actual by omitting time spent for waiting prior to
migration and also more wait count than actual by counting migration as
wait end event which can be seen by trace or /proc/<pid>/sched with
CONFIG_SCHEDSTATS=y.

Carry forward migrating task's wait time prior to migration and
don't count migration as a wait end event to fix such statistics error.

In order to determine whether task is migrating mark task->on_rq with
TASK_ON_RQ_MIGRATING while dequeuing and enqueuing due to migration.

To: Ingo Molnar <mi...@kernel.org>
To: Peter Zijlstra <pet...@infradead.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Joonwoo Park <joonw...@codeaurora.org>
---
Changes in v2: 
 * Set p->on_rq = TASK_ON_RQ_MIGRATING while doing migration dequeue/enqueue
   and check whether task's migrating with task_on_rq_migrating().
Changes in v3: 
 * Fixed "WARNING: CPU: 0 PID: 3 at kernel/sched/fair.c:260 
update_stats_wait_end+0x23/0x30()" caught by Intel kernel test robot.

 kernel/sched/core.c |  4 ++--
 kernel/sched/fair.c | 27 ++++++++++++++++++++++-----
 2 files changed, 24 insertions(+), 7 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index bcd214e..d9e4ad5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1069,8 +1069,8 @@ static struct rq *move_queued_task(struct rq *rq, struct 
task_struct *p, int new
 {
        lockdep_assert_held(&rq->lock);
 
-       dequeue_task(rq, p, 0);
        p->on_rq = TASK_ON_RQ_MIGRATING;
+       dequeue_task(rq, p, 0);
        set_task_cpu(p, new_cpu);
        raw_spin_unlock(&rq->lock);
 
@@ -1078,8 +1078,8 @@ static struct rq *move_queued_task(struct rq *rq, struct 
task_struct *p, int new
 
        raw_spin_lock(&rq->lock);
        BUG_ON(task_cpu(p) != new_cpu);
-       p->on_rq = TASK_ON_RQ_QUEUED;
        enqueue_task(rq, p, 0);
+       p->on_rq = TASK_ON_RQ_QUEUED;
        check_preempt_curr(rq, p, 0);
 
        return rq;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9a5e60f..4c174a1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -740,7 +740,11 @@ static void update_curr_fair(struct rq *rq)
 static inline void
 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-       schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
+       schedstat_set(se->statistics.wait_start,
+               entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
+               likely(rq_clock(rq_of(cfs_rq)) > se->statistics.wait_start) ?
+               rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start :
+               rq_clock(rq_of(cfs_rq)));
 }
 
 /*
@@ -756,22 +760,35 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, 
struct sched_entity *se)
                update_stats_wait_start(cfs_rq, se);
 }
 
+#ifdef CONFIG_SCHEDSTATS
 static void
 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
+       if (entity_is_task(se) && task_on_rq_migrating(task_of(se))) {
+               schedstat_set(se->statistics.wait_start,
+                             rq_clock(rq_of(cfs_rq)) -
+                             se->statistics.wait_start);
+               return;
+       }
+
        schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
                        rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
        schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
        schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
                        rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
-#ifdef CONFIG_SCHEDSTATS
+
        if (entity_is_task(se)) {
                trace_sched_stat_wait(task_of(se),
                        rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
        }
-#endif
        schedstat_set(se->statistics.wait_start, 0);
 }
+#else
+static inline void
+update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+}
+#endif
 
 static inline void
 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -5656,8 +5673,8 @@ static void detach_task(struct task_struct *p, struct 
lb_env *env)
 {
        lockdep_assert_held(&env->src_rq->lock);
 
-       deactivate_task(env->src_rq, p, 0);
        p->on_rq = TASK_ON_RQ_MIGRATING;
+       deactivate_task(env->src_rq, p, 0);
        set_task_cpu(p, env->dst_cpu);
 }
 
@@ -5790,8 +5807,8 @@ static void attach_task(struct rq *rq, struct task_struct 
*p)
        lockdep_assert_held(&rq->lock);
 
        BUG_ON(task_rq(p) != rq);
-       p->on_rq = TASK_ON_RQ_QUEUED;
        activate_task(rq, p, 0);
+       p->on_rq = TASK_ON_RQ_QUEUED;
        check_preempt_curr(rq, p, 0);
 }
 
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
hosted by The Linux Foundation

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to