Signed-off-by: Yuyang Du <yuyang...@intel.com>
---
 kernel/sched/fair.c |   15 ++++++++++++++-
 1 file changed, 14 insertions(+), 1 deletion(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e7153ff..65f1651 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4365,9 +4365,16 @@ static int select_idle_sibling(struct task_struct *p, 
int target)
        struct sched_domain *sd;
        struct sched_group *sg;
        int i = task_cpu(p);
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+       int ret;
 
+       ret = workload_consolidation_wakeup(i, target);
+       if (ret < nr_cpu_ids)
+               return ret;
+#else
        if (idle_cpu(target))
                return target;
+#endif
 
        /*
         * If the prevous cpu is cache affine and idle, don't be stupid.
@@ -4460,7 +4467,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, 
int sd_flag, int wake_f
        }
 
        while (sd) {
-               struct sched_group *group;
+               struct sched_group *group = NULL;
                int weight;
 
                if (!(sd->flags & sd_flag)) {
@@ -4468,6 +4475,12 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, 
int sd_flag, int wake_f
                        continue;
                }
 
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+               if (sd->flags & SD_ASYM_CONCURRENCY)
+                       group = workload_consolidation_find_group(sd, p, cpu);
+
+               if (!group)
+#endif
                group = find_idlest_group(sd, p, cpu, sd_flag);
                if (!group) {
                        sd = sd->child;
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to