Rename sum_nr_running to sum_h_nr_running because it effectively tracks
cfs->h_nr_running so we can use sum_nr_running to track rq->nr_running
when needed.

There is no functional changes.

Signed-off-by: Vincent Guittot <vincent.guit...@linaro.org>
---
 kernel/sched/fair.c | 32 ++++++++++++++++----------------
 1 file changed, 16 insertions(+), 16 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3175fea..02ab6b5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7680,7 +7680,7 @@ struct sg_lb_stats {
        unsigned long load_per_task;
        unsigned long group_capacity;
        unsigned long group_util; /* Total utilization of the group */
-       unsigned int sum_nr_running; /* Nr tasks running in the group */
+       unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group 
*/
        unsigned int idle_cpus;
        unsigned int group_weight;
        enum group_type group_type;
@@ -7725,7 +7725,7 @@ static inline void init_sd_lb_stats(struct sd_lb_stats 
*sds)
                .total_capacity = 0UL,
                .busiest_stat = {
                        .avg_load = 0UL,
-                       .sum_nr_running = 0,
+                       .sum_h_nr_running = 0,
                        .group_type = group_other,
                },
        };
@@ -7916,7 +7916,7 @@ static inline int sg_imbalanced(struct sched_group *group)
 static inline bool
 group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
 {
-       if (sgs->sum_nr_running < sgs->group_weight)
+       if (sgs->sum_h_nr_running < sgs->group_weight)
                return true;
 
        if ((sgs->group_capacity * 100) >
@@ -7937,7 +7937,7 @@ group_has_capacity(struct lb_env *env, struct sg_lb_stats 
*sgs)
 static inline bool
 group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
 {
-       if (sgs->sum_nr_running <= sgs->group_weight)
+       if (sgs->sum_h_nr_running <= sgs->group_weight)
                return false;
 
        if ((sgs->group_capacity * 100) <
@@ -8029,7 +8029,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
 
                sgs->group_load += cpu_runnable_load(rq);
                sgs->group_util += cpu_util(i);
-               sgs->sum_nr_running += rq->cfs.h_nr_running;
+               sgs->sum_h_nr_running += rq->cfs.h_nr_running;
 
                nr_running = rq->nr_running;
                if (nr_running > 1)
@@ -8059,8 +8059,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
        sgs->group_capacity = group->sgc->capacity;
        sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / 
sgs->group_capacity;
 
-       if (sgs->sum_nr_running)
-               sgs->load_per_task = sgs->group_load / sgs->sum_nr_running;
+       if (sgs->sum_h_nr_running)
+               sgs->load_per_task = sgs->group_load / sgs->sum_h_nr_running;
 
        sgs->group_weight = group->group_weight;
 
@@ -8117,7 +8117,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
         * capable CPUs may harm throughput. Maximize throughput,
         * power/energy consequences are not considered.
         */
-       if (sgs->sum_nr_running <= sgs->group_weight &&
+       if (sgs->sum_h_nr_running <= sgs->group_weight &&
            group_smaller_min_cpu_capacity(sds->local, sg))
                return false;
 
@@ -8148,7 +8148,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
         * perform better since they share less core resources.  Hence when we
         * have idle threads, we want them to be the higher ones.
         */
-       if (sgs->sum_nr_running &&
+       if (sgs->sum_h_nr_running &&
            sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) {
                sgs->group_asym_packing = 1;
                if (!sds->busiest)
@@ -8166,9 +8166,9 @@ static bool update_sd_pick_busiest(struct lb_env *env,
 #ifdef CONFIG_NUMA_BALANCING
 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
 {
-       if (sgs->sum_nr_running > sgs->nr_numa_running)
+       if (sgs->sum_h_nr_running > sgs->nr_numa_running)
                return regular;
-       if (sgs->sum_nr_running > sgs->nr_preferred_running)
+       if (sgs->sum_h_nr_running > sgs->nr_preferred_running)
                return remote;
        return all;
 }
@@ -8243,7 +8243,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, 
struct sd_lb_stats *sd
                 */
                if (prefer_sibling && sds->local &&
                    group_has_capacity(env, local) &&
-                   (sgs->sum_nr_running > local->sum_nr_running + 1)) {
+                   (sgs->sum_h_nr_running > local->sum_h_nr_running + 1)) {
                        sgs->group_no_capacity = 1;
                        sgs->group_type = group_classify(sg, sgs);
                }
@@ -8255,7 +8255,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, 
struct sd_lb_stats *sd
 
 next_group:
                /* Now, start updating sd_lb_stats */
-               sds->total_running += sgs->sum_nr_running;
+               sds->total_running += sgs->sum_h_nr_running;
                sds->total_load += sgs->group_load;
                sds->total_capacity += sgs->group_capacity;
 
@@ -8309,7 +8309,7 @@ void fix_small_imbalance(struct lb_env *env, struct 
sd_lb_stats *sds)
        local = &sds->local_stat;
        busiest = &sds->busiest_stat;
 
-       if (!local->sum_nr_running)
+       if (!local->sum_h_nr_running)
                local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
        else if (busiest->load_per_task > local->load_per_task)
                imbn = 1;
@@ -8407,7 +8407,7 @@ static inline void calculate_imbalance(struct lb_env 
*env, struct sd_lb_stats *s
         */
        if (busiest->group_type == group_overloaded &&
            local->group_type   == group_overloaded) {
-               load_above_capacity = busiest->sum_nr_running * 
SCHED_CAPACITY_SCALE;
+               load_above_capacity = busiest->sum_h_nr_running * 
SCHED_CAPACITY_SCALE;
                if (load_above_capacity > busiest->group_capacity) {
                        load_above_capacity -= busiest->group_capacity;
                        load_above_capacity *= scale_load_down(NICE_0_LOAD);
@@ -8488,7 +8488,7 @@ static struct sched_group *find_busiest_group(struct 
lb_env *env)
                goto force_balance;
 
        /* There is no busy sibling group to pull tasks from */
-       if (!sds.busiest || busiest->sum_nr_running == 0)
+       if (!sds.busiest || busiest->sum_h_nr_running == 0)
                goto out_balanced;
 
        /* XXX broken for overlapping NUMA groups */
-- 
2.7.4

Reply via email to