In order to evaluate tick dependency, we need to account SCHED_RR and SCHED_FIFO tasks separately as those policies don't have the same preemption requirements.
We still keep rt_nr_running as a cache to avoid additions between nr_rr and nr_fifo all over the place. Cc: Christoph Lameter <c...@linux.com> Cc: Chris Metcalf <cmetc...@ezchip.com> Cc: Ingo Molnar <mi...@kernel.org> Cc: Luiz Capitulino <lcapitul...@redhat.com> Cc: Peter Zijlstra <pet...@infradead.org> Cc: Rik van Riel <r...@redhat.com> Cc: Thomas Gleixner <t...@linutronix.de> Cc: Viresh Kumar <viresh.ku...@linaro.org> Signed-off-by: Frederic Weisbecker <fweis...@gmail.com> --- kernel/sched/rt.c | 34 ++++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 2 ++ 2 files changed, 36 insertions(+) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index d2ea593..0e80458 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1152,12 +1152,43 @@ unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se) } static inline +unsigned int rt_se_fifo_nr_running(struct sched_rt_entity *rt_se) +{ + struct rt_rq *group_rq = group_rt_rq(rt_se); + struct task_struct *tsk; + + if (group_rq) + return group_rq->fifo_nr_running; + + tsk = rt_task_of(rt_se); + + return (tsk->policy == SCHED_FIFO) ? 1 : 0; +} + +static inline +unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se) +{ + struct rt_rq *group_rq = group_rt_rq(rt_se); + struct task_struct *tsk; + + if (group_rq) + return group_rq->rr_nr_running; + + tsk = rt_task_of(rt_se); + + return (tsk->policy == SCHED_RR) ? 1 : 0; +} + +static inline void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { int prio = rt_se_prio(rt_se); WARN_ON(!rt_prio(prio)); rt_rq->rt_nr_running += rt_se_nr_running(rt_se); + rt_rq->fifo_nr_running += rt_se_fifo_nr_running(rt_se); + rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se); + WARN_ON_ONCE(rt_rq->rt_nr_running != rt_rq->fifo_nr_running + rt_rq->rr_nr_running); inc_rt_prio(rt_rq, prio); inc_rt_migration(rt_se, rt_rq); @@ -1170,6 +1201,9 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) WARN_ON(!rt_prio(rt_se_prio(rt_se))); WARN_ON(!rt_rq->rt_nr_running); rt_rq->rt_nr_running -= rt_se_nr_running(rt_se); + rt_rq->fifo_nr_running -= rt_se_fifo_nr_running(rt_se); + rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se); + WARN_ON_ONCE(rt_rq->rt_nr_running != rt_rq->fifo_nr_running + rt_rq->rr_nr_running); dec_rt_prio(rt_rq, rt_se_prio(rt_se)); dec_rt_migration(rt_se, rt_rq); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 6d2a119..cfafbdd 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -433,6 +433,8 @@ static inline int rt_bandwidth_enabled(void) struct rt_rq { struct rt_prio_array active; unsigned int rt_nr_running; + unsigned int fifo_nr_running; + unsigned int rr_nr_running; #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED struct { int curr; /* highest queued rt task prio */ -- 2.5.3 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/