Introduce a per-cpu variable to track the limit upto which idle cpu search was done in select_idle_cpu(). This will help to start the search next time from there. This is necessary for rotating the search window over entire LLC domain.
Signed-off-by: subhra mazumdar <subhra.mazum...@oracle.com> --- kernel/sched/core.c | 2 ++ kernel/sched/sched.h | 1 + 2 files changed, 3 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 874c427..80657fc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -24,6 +24,7 @@ #include <trace/events/sched.h> DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); +DEFINE_PER_CPU_SHARED_ALIGNED(int, next_cpu); #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) /* @@ -5966,6 +5967,7 @@ void __init sched_init(void) for_each_possible_cpu(i) { struct rq *rq; + per_cpu(next_cpu, i) = -1; rq = cpu_rq(i); raw_spin_lock_init(&rq->lock); rq->nr_running = 0; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b52ed1a..4cecfa2 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -994,6 +994,7 @@ static inline void update_idle_core(struct rq *rq) { } #endif DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); +DECLARE_PER_CPU_SHARED_ALIGNED(int, next_cpu); #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) #define this_rq() this_cpu_ptr(&runqueues) -- 2.9.3