From: Luca Abeni <luca.ab...@unitn.it> Original GRUB tends to reclaim 100% of the CPU time... And this allows a CPU hog to starve non-deadline tasks. To address this issue, allow the scheduler to reclaim only a specified fraction of CPU time.
Signed-off-by: Luca Abeni <luca.ab...@unitn.it> --- kernel/sched/core.c | 4 ++++ kernel/sched/deadline.c | 7 ++++++- kernel/sched/sched.h | 6 ++++++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5030b3c..4010af7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8286,6 +8286,10 @@ static void sched_dl_do_global(void) raw_spin_unlock_irqrestore(&dl_b->lock, flags); rcu_read_unlock_sched(); + if (dl_b->bw == -1) + cpu_rq(cpu)->dl.non_deadline_bw = 0; + else + cpu_rq(cpu)->dl.non_deadline_bw = (1 << 20) - new_bw; } } diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 361887b..7585dfb 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -151,6 +151,11 @@ void init_dl_rq(struct dl_rq *dl_rq) #else init_dl_bw(&dl_rq->dl_bw); #endif + if (global_rt_runtime() == RUNTIME_INF) + dl_rq->non_deadline_bw = 0; + else + dl_rq->non_deadline_bw = (1 << 20) - + to_ratio(global_rt_period(), global_rt_runtime()); } #ifdef CONFIG_SMP @@ -773,7 +778,7 @@ extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); */ u64 grub_reclaim(u64 delta, struct rq *rq) { - return (delta * rq->dl.running_bw) >> 20; + return (delta * (rq->dl.non_deadline_bw + rq->dl.running_bw)) >> 20; } /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index e422803..ef4bdaa 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -542,6 +542,12 @@ struct dl_rq { * task blocks */ u64 running_bw; + + /* + * Fraction of the CPU utilization that cannot be reclaimed + * by the GRUB algorithm. + */ + u64 non_deadline_bw; }; #ifdef CONFIG_SMP -- 2.7.4