Checking if a CPU is avoid can add a slight overhead and should be 
done only when necessary. 

Add a static key check which makes it almost nop when key is false. 
Arch needs to set the key when it decides to. Refer to debug patch
for example. 

Signed-off-by: Shrikanth Hegde <sshe...@linux.ibm.com>
---
This method avoids additional ifdefs. So kept it that way instead of 
CONFIG_PARAVIRT. 

Added a helper function for cpu_avoid, since including sched.h fails in 
cpumask.h

 kernel/sched/core.c  | 8 ++++----
 kernel/sched/fair.c  | 5 +++--
 kernel/sched/rt.c    | 8 ++++----
 kernel/sched/sched.h | 9 +++++++++
 4 files changed, 20 insertions(+), 10 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index aea4232e3ec4..51426b17ef55 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -148,9 +148,9 @@ __read_mostly int sysctl_resched_latency_warn_once = 1;
  * Limited because this is done with IRQs disabled.
  */
 __read_mostly unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
-
 __read_mostly int scheduler_running;
 
+DEFINE_STATIC_KEY_FALSE(paravirt_cpu_avoid_enabled);
 #ifdef CONFIG_SCHED_CORE
 
 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
@@ -2438,7 +2438,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, 
int cpu)
                return false;
 
        /* CPU marked as avoid, shouldn't chosen to run any task*/
-       if (cpu_avoid(cpu))
+       if (cpu_avoid_check(cpu))
                return false;
 
        /* But are allowed during online. */
@@ -5578,7 +5578,7 @@ void sched_tick(void)
        sched_clock_tick();
 
        /* push the current task out if cpu is marked as avoid */
-       if (cpu_avoid(cpu))
+       if (cpu_avoid_check(cpu))
                push_current_task(rq);
 
        rq_lock(rq, &rf);
@@ -8048,7 +8048,7 @@ void push_current_task(struct rq *rq)
        unsigned long flags;
 
        /* idle task can't be pused out */
-       if (rq->curr == rq->idle || !cpu_avoid(rq->cpu))
+       if (rq->curr == rq->idle || !cpu_avoid_check(rq->cpu))
                return;
 
        /* Do for only SCHED_NORMAL AND RT for now */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 406288aef535..21370f76d61b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8547,7 +8547,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, 
int wake_flags)
        rcu_read_unlock();
 
        /* Don't select a CPU marked as avoid for wakeup */
-       if (cpu_avoid(new_cpu))
+       if (cpu_avoid_check(new_cpu))
                return cpu;
        else
                return new_cpu;
@@ -11668,7 +11668,8 @@ static int sched_balance_rq(int this_cpu, struct rq 
*this_rq,
        cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
 
        /* Don't spread load into CPUs marked as avoid */
-       cpumask_andnot(cpus, cpus, cpu_avoid_mask);
+       if (static_branch_unlikely(&paravirt_cpu_avoid_enabled))
+               cpumask_andnot(cpus, cpus, cpu_avoid_mask);
 
        schedstat_inc(sd->lb_count[idle]);
 
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index fd9df6f46135..0ab3fdf7a637 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1549,7 +1549,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int 
flags)
                if (!test && target != -1 && !rt_task_fits_capacity(p, target))
                        goto out_unlock;
 
-               if (cpu_avoid(target))
+               if (cpu_avoid_check(target))
                        goto out_unlock;
                /*
                 * Don't bother moving it if the destination CPU is
@@ -1873,7 +1873,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct 
*task, struct rq *rq)
        for (tries = 0; tries < RT_MAX_TRIES; tries++) {
                cpu = find_lowest_rq(task);
 
-               if ((cpu == -1) || (cpu == rq->cpu) || cpu_avoid(cpu))
+               if ((cpu == -1) || (cpu == rq->cpu) || cpu_avoid_check(cpu))
                        break;
 
                lowest_rq = cpu_rq(cpu);
@@ -1971,7 +1971,7 @@ static int push_rt_task(struct rq *rq, bool pull)
                        return 0;
 
                cpu = find_lowest_rq(rq->curr);
-               if (cpu == -1 || cpu == rq->cpu || cpu_avoid(cpu))
+               if (cpu == -1 || cpu == rq->cpu || cpu_avoid_check(cpu))
                        return 0;
 
                /*
@@ -2234,7 +2234,7 @@ static void pull_rt_task(struct rq *this_rq)
        if (likely(!rt_overload_count))
                return;
 
-       if (cpu_avoid(this_rq->cpu))
+       if (cpu_avoid_check(this_rq->cpu))
                return;
 
        /*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b9614873762e..707fdfa46772 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1710,6 +1710,15 @@ struct rq_flags {
 
 extern struct balance_callback balance_push_callback;
 void push_current_task(struct rq *rq);
+DECLARE_STATIC_KEY_FALSE(paravirt_cpu_avoid_enabled);
+
+static inline bool cpu_avoid_check(int cpu)
+{
+       if (static_branch_unlikely(&paravirt_cpu_avoid_enabled))
+               return cpu_avoid(cpu);
+
+       return false;
+}
 
 #ifdef CONFIG_SCHED_CLASS_EXT
 extern const struct sched_class ext_sched_class;
-- 
2.43.0


Reply via email to