Now, without load_idx source_load is just weighted_cpuload, so
replace it to reduce a conception for cpu load.

Signed-off-by: Alex Shi <alex....@linaro.org>
---
 kernel/sched/fair.c | 21 ++++-----------------
 1 file changed, 4 insertions(+), 17 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d91d925..66fa69b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1015,7 +1015,6 @@ bool should_numa_migrate_memory(struct task_struct *p, 
struct page * page,
 }
 
 static unsigned long weighted_cpuload(const int cpu);
-static unsigned long source_load(int cpu);
 static unsigned long target_load(int cpu, int imbalance_pct);
 static unsigned long power_of(int cpu);
 static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
@@ -3939,27 +3938,15 @@ static void dequeue_task_fair(struct rq *rq, struct 
task_struct *p, int flags)
 }
 
 #ifdef CONFIG_SMP
-/* Used instead of source_load when we know the type == 0 */
+/* weighted cpu load with runnable time, 'nice' value on CFS class */
 static unsigned long weighted_cpuload(const int cpu)
 {
        return cpu_rq(cpu)->cfs.runnable_load_avg;
 }
 
 /*
- * Return a low guess at the load of a migration-source cpu weighted
- * according to the scheduling class and "nice" value.
- *
- * We want to under-estimate the load of migration sources, to
- * balance conservatively.
- */
-static unsigned long source_load(int cpu)
-{
-       return weighted_cpuload(cpu);
-}
-
-/*
  * Return a high guess at the load of a migration-target cpu weighted
- * according to the scheduling class and "nice" value.
+ * according to the runnable time and "nice" value.
  */
 static unsigned long target_load(int cpu, int imbalance_pct)
 {
@@ -4287,7 +4274,7 @@ find_idlest_group(struct sched_domain *sd, struct 
task_struct *p, int this_cpu)
                for_each_cpu(i, sched_group_cpus(group)) {
                        /* Bias balancing toward cpus of our domain */
                        if (local_group)
-                               load = source_load(i);
+                               load = weighted_cpuload(i);
                        else
                                load = target_load(i, imbalance);
 
@@ -5752,7 +5739,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
                if (local_group && env->idle != CPU_IDLE)
                        load = target_load(i, bias);
                else
-                       load = source_load(i);
+                       load = weighted_cpuload(i);
 
                sgs->group_load += load;
                sgs->sum_nr_running += rq->nr_running;
-- 
1.8.1.2

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to