Hi Peter, On Fri, 20 Jul 2018 at 14:31, Peter Zijlstra <pet...@infradead.org> wrote: > > On Thu, Jul 19, 2018 at 02:00:06PM +0200, Vincent Guittot wrote: > > But the compiler is not able to optimize the sequence (at least with > > aarch64 GCC 7.2.1) > > free *= (max - irq); > > free /= max; > > when irq is fixed to 0 > > So much for compilers.... I though those things were supposed to be > 'clever'. > > > +#if defined(SMP) \ > > + && (defined(CONFIG_IRQ_TIME_ACCOUNTING) || > > defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)) > > That's atrocious :-) > > Fixed it with the below.
Thanks for the fix > > > --- a/kernel/sched/core.c > +++ b/kernel/sched/core.c > @@ -135,7 +135,7 @@ static void update_rq_clock_task(struct > * In theory, the compile should just see 0 here, and optimize out the call > * to sched_rt_avg_update. But I don't trust it... > */ > -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || > defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) > +#ifdef HAVE_SCHED_AVG_IRQ > s64 steal = 0, irq_delta = 0; > #endif > #ifdef CONFIG_IRQ_TIME_ACCOUNTING > @@ -177,7 +177,7 @@ static void update_rq_clock_task(struct > > rq->clock_task += delta; > > -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || > defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) > +#ifdef HAVE_SCHED_AVG_IRQ > if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) > update_irq_load_avg(rq, irq_delta + steal); > #endif > --- a/kernel/sched/sched.h > +++ b/kernel/sched/sched.h > @@ -856,6 +856,7 @@ struct rq { > struct sched_avg avg_rt; > struct sched_avg avg_dl; > #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || > defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) > +#define HAVE_SCHED_AVG_IRQ > struct sched_avg avg_irq; > #endif > u64 idle_stamp; > @@ -2212,8 +2213,7 @@ static inline unsigned long cpu_util_rt( > } > #endif > > -#if defined(SMP) \ > - && (defined(CONFIG_IRQ_TIME_ACCOUNTING) || > defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)) > +#ifdef HAVE_SCHED_AVG_IRQ > static inline unsigned long cpu_util_irq(struct rq *rq) > { > return rq->avg_irq.util_avg;