Commit-ID:  f1d88b4468188ddcd2620b8d612068faf6662a62
Gitweb:     https://git.kernel.org/tip/f1d88b4468188ddcd2620b8d612068faf6662a62
Author:     Viresh Kumar <viresh.ku...@linaro.org>
AuthorDate: Thu, 26 Apr 2018 16:00:50 +0530
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Fri, 4 May 2018 10:00:07 +0200

sched/fair: Rearrange select_task_rq_fair() to optimize it

Rearrange select_task_rq_fair() a bit to avoid executing some
conditional statements in few specific code-paths. That gets rid of the
goto as well.

This shouldn't result in any functional changes.

Tested-by: Rohit Jain <rohit.k.j...@oracle.com>
Signed-off-by: Viresh Kumar <viresh.ku...@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Reviewed-by: Valentin Schneider <valentin.schnei...@arm.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Vincent Guittot <vincent.guit...@linaro.org>
Link: 
http://lkml.kernel.org/r/20831b8d237bf3a20e4e328286f678b425ff04c9.1524738578.git.viresh.ku...@linaro.org
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 kernel/sched/fair.c | 37 ++++++++++++++++---------------------
 1 file changed, 16 insertions(+), 21 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e3002e5ada31..4b346f358005 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6613,7 +6613,7 @@ static int wake_cap(struct task_struct *p, int cpu, int 
prev_cpu)
 static int
 select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int 
wake_flags)
 {
-       struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
+       struct sched_domain *tmp, *sd = NULL;
        int cpu = smp_processor_id();
        int new_cpu = prev_cpu;
        int want_affine = 0;
@@ -6636,7 +6636,10 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, 
int sd_flag, int wake_f
                 */
                if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
                    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
-                       affine_sd = tmp;
+                       if (cpu != prev_cpu)
+                               new_cpu = wake_affine(tmp, p, cpu, prev_cpu, 
sync);
+
+                       sd = NULL; /* Prefer wake_affine over balance flags */
                        break;
                }
 
@@ -6646,33 +6649,25 @@ select_task_rq_fair(struct task_struct *p, int 
prev_cpu, int sd_flag, int wake_f
                        break;
        }
 
-       if (affine_sd) {
-               sd = NULL; /* Prefer wake_affine over balance flags */
-               if (cpu == prev_cpu)
-                       goto pick_cpu;
-
-               new_cpu = wake_affine(affine_sd, p, cpu, prev_cpu, sync);
-       }
+       if (unlikely(sd)) {
+               /* Slow path */
 
-       if (sd && !(sd_flag & SD_BALANCE_FORK)) {
                /*
                 * We're going to need the task's util for capacity_spare_wake
                 * in find_idlest_group. Sync it up to prev_cpu's
                 * last_update_time.
                 */
-               sync_entity_load_avg(&p->se);
-       }
-
-       if (!sd) {
-pick_cpu:
-               if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */
-                       new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
+               if (!(sd_flag & SD_BALANCE_FORK))
+                       sync_entity_load_avg(&p->se);
 
-                       if (want_affine)
-                               current->recent_used_cpu = cpu;
-               }
-       } else {
                new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
+       } else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */
+               /* Fast path */
+
+               new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
+
+               if (want_affine)
+                       current->recent_used_cpu = cpu;
        }
        rcu_read_unlock();
 

Reply via email to