The current sched_slice() seems to have issues; there's two possible
things that could be improved:

 - the 'nr_running' used for __sched_period() is daft when cgroups are
   considered. Using the RQ wide h_nr_running seems like a much more
   consistent number.

 - (esp) cgroups can slice it real fine, which makes for easy
   over-scheduling, ensure min_gran is what the name says.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 kernel/sched/fair.c     |   12 +++++++++++-
 kernel/sched/features.h |    3 +++
 2 files changed, 14 insertions(+), 1 deletion(-)

--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -680,7 +680,13 @@ static u64 __sched_period(unsigned long
  */
 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-       u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
+       unsigned int nr_running = cfs_rq->nr_running;
+       u64 slice;
+
+       if (sched_feat(ALT_PERIOD))
+               nr_running = rq_of(cfs_rq)->cfs.h_nr_running;
+
+       slice = __sched_period(nr_running + !se->on_rq);
 
        for_each_sched_entity(se) {
                struct load_weight *load;
@@ -697,6 +703,10 @@ static u64 sched_slice(struct cfs_rq *cf
                }
                slice = __calc_delta(slice, se->load.weight, load);
        }
+
+       if (sched_feat(BASE_SLICE))
+               slice = min(slice, (u64)sysctl_sched_min_granularity);
+
        return slice;
 }
 
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -90,3 +90,6 @@ SCHED_FEAT(WA_BIAS, true)
  */
 SCHED_FEAT(UTIL_EST, true)
 SCHED_FEAT(UTIL_EST_FASTUP, true)
+
+SCHED_FEAT(ALT_PERIOD, true)
+SCHED_FEAT(BASE_SLICE, true)


Reply via email to