Lower the lower limit of idle cpu search in select_idle_cpu() and also put
an upper limit. This helps in scalability of the search by restricting the
search window. Also rotating the search window with help of next_cpu
ensures any idle cpu is eventually found in case of high load.

Signed-off-by: subhra mazumdar <subhra.mazum...@oracle.com>
---
 kernel/sched/fair.c | 19 ++++++++++++++-----
 1 file changed, 14 insertions(+), 5 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d1d4769..62d585b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6279,7 +6279,7 @@ static int select_idle_cpu(struct task_struct *p, struct 
sched_domain *sd, int t
        u64 avg_cost, avg_idle;
        u64 time, cost;
        s64 delta;
-       int cpu, nr = INT_MAX;
+       int cpu, target_tmp, nr = INT_MAX;
 
        this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
        if (!this_sd)
@@ -6297,15 +6297,24 @@ static int select_idle_cpu(struct task_struct *p, 
struct sched_domain *sd, int t
 
        if (sched_feat(SIS_PROP)) {
                u64 span_avg = sd->span_weight * avg_idle;
-               if (span_avg > 4*avg_cost)
+               if (span_avg > 2*avg_cost) {
                        nr = div_u64(span_avg, avg_cost);
-               else
-                       nr = 4;
+                       if (nr > 4)
+                               nr = 4;
+               } else {
+                       nr = 2;
+               }
        }
 
+       if (per_cpu(next_cpu, target) != -1)
+               target_tmp = per_cpu(next_cpu, target);
+       else
+               target_tmp = target;
+
        time = local_clock();
 
-       for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
+       for_each_cpu_wrap(cpu, sched_domain_span(sd), target_tmp) {
+               per_cpu(next_cpu, target) = cpu;
                if (!--nr)
                        return -1;
                if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
-- 
2.9.3

Reply via email to