Modify update_blocked_averages() and update_cfs_rq_h_load() so that they
won't access the next higher hierarchy level, for which they don't hold a
lock.

This will have to be touched again, when load balancing is made
functional.

Signed-off-by: Jan H. Schönherr <jscho...@amazon.de>
---
 kernel/sched/fair.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bc219c9c3097..210fcd534917 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7686,7 +7686,7 @@ static void update_blocked_averages(int cpu)
 
                /* Propagate pending load changes to the parent, if any: */
                se = cfs_rq->my_se;
-               if (se && !skip_blocked_update(se))
+               if (se && !is_sd_se(se) && !skip_blocked_update(se))
                        update_load_avg(cfs_rq_of(se), se, 0);
 
                /*
@@ -7731,6 +7731,8 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
 
        cfs_rq->h_load_next = NULL;
        for_each_sched_entity(se) {
+               if (is_sd_se(se))
+                       break;
                cfs_rq = cfs_rq_of(se);
                cfs_rq->h_load_next = se;
                if (cfs_rq->last_h_load_update == now)
-- 
2.9.3.1.gcba166c.dirty

Reply via email to