Hello, Peter.

Your changes need the following fix patch.  With the fix and
"sched/fair: Always propagate runnable_load_avg" applied, it seems to
work fine.  The propagated number is a bit different but I don't see
noticeable difference in behavior and the new number seems to better
represent what we need.

Thanks.
---
 kernel/sched/fair.c |    7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2645,7 +2645,8 @@ enum shares_type {
 #ifdef CONFIG_FAIR_GROUP_SCHED
 # ifdef CONFIG_SMP
 static long
-calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg, enum shares_type)
+calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg,
+               enum shares_type shares_type)
 {
        long tg_weight, tg_shares, load, shares;
 
@@ -2705,7 +2706,7 @@ calc_cfs_shares(struct cfs_rq *cfs_rq, s
 }
 # else /* CONFIG_SMP */
 static inline long
-calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg, enum shares_type)
+calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg, enum shares_type 
shares_type)
 {
        return tg->shares;
 }
@@ -3104,7 +3105,7 @@ static inline void
 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        struct cfs_rq *gcfs_rq = group_cfs_rq(se);
-       long delta, load = calc_cfs_shares(gcfs_rq, gcfs_rq->tg, 
shares_runnable);
+       long delta, load = scale_load_down(calc_cfs_shares(gcfs_rq, 
gcfs_rq->tg, shares_runnable));
 
        delta = load - se->avg.load_avg;
 

Reply via email to