Peter, Is there anything in this patch that you'd consider salvageable, or would it be better to just throw the whole thing out? In either case, I appreciate your honesty regarding this patch's (lack of) quality, and apologize for what is most likely a waste of your time.
On Sat, Sep 22, 2018 at 12:26:40PM +0200, Peter Zijlstra wrote: > On Fri, Sep 21, 2018 at 03:22:03PM -0500, PierceGriffiths wrote: > > diff --git a/kernel/sched/core.c b/kernel/sched/core.c > > index 625bc9897f62..443a1f235cfd 100644 > > --- a/kernel/sched/core.c > > +++ b/kernel/sched/core.c > > @@ -617,12 +617,8 @@ bool sched_can_stop_tick(struct rq *rq) > > * If there are more than one RR tasks, we need the tick to effect the > > * actual RR behaviour. > > */ > > - if (rq->rt.rr_nr_running) { > > - if (rq->rt.rr_nr_running == 1) > > - return true; > > - else > > - return false; > > - } > > + if (rq->rt.rr_nr_running) > > + return rq->rt.rr_nr_running == 1; > > > > /* > > * If there's no RR tasks, but FIFO tasks, we can skip the tick, no > > That one is OK I suppose. > > > diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c > > index 5e54cbcae673..a8fd4bd68954 100644 > > --- a/kernel/sched/cpufreq.c > > +++ b/kernel/sched/cpufreq.c > > @@ -34,10 +34,7 @@ void cpufreq_add_update_util_hook(int cpu, struct > > update_util_data *data, > > void (*func)(struct update_util_data *data, u64 time, > > unsigned int flags)) > > { > > - if (WARN_ON(!data || !func)) > > - return; > > - > > - if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu))) > > + if (WARN_ON(!data || !func || per_cpu(cpufreq_update_util_data, cpu))) > > return; > > > > data->func = func; > > But I'm not a fan of this one. It mixes a different class of function > and the WARN condition gets too complicated. Its easier to have separate > warns. > > > diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c > > index daaadf939ccb..152c133e8247 100644 > > --- a/kernel/sched/cpupri.c > > +++ b/kernel/sched/cpupri.c > > @@ -29,20 +29,16 @@ > > #include "sched.h" > > > > /* Convert between a 140 based task->prio, and our 102 based cpupri */ > > -static int convert_prio(int prio) > > +static int convert_prio(const int prio) > > { > > - int cpupri; > > - > > if (prio == CPUPRI_INVALID) > > - cpupri = CPUPRI_INVALID; > > + return CPUPRI_INVALID; > > else if (prio == MAX_PRIO) > > - cpupri = CPUPRI_IDLE; > > + return CPUPRI_IDLE; > > else if (prio >= MAX_RT_PRIO) > > - cpupri = CPUPRI_NORMAL; > > + return CPUPRI_NORMAL; > > else > > - cpupri = MAX_RT_PRIO - prio + 1; > > - > > - return cpupri; > > + return MAX_RT_PRIO - prio + 1; > > The code looks even better if you leave out the last else. > > > } > > > > /** > > @@ -95,10 +91,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, > > smp_rmb(); > > > > /* Need to do the rmb for every iteration */ > > - if (skip) > > - continue; > > - > > - if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) > > + if (skip || cpumask_any_and(&p->cpus_allowed, vec->mask) > > + >= nr_cpu_ids) > > continue; > > > > if (lowest_mask) { > > That just makes the code ugly for no reason. > > > @@ -222,7 +216,7 @@ int cpupri_init(struct cpupri *cp) > > return 0; > > > > cleanup: > > - for (i--; i >= 0; i--) > > + while (--i >= 0) > > free_cpumask_var(cp->pri_to_cpu[i].mask); > > return -ENOMEM; > > } > > diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c > > index 2e2955a8cf8f..acf1b94669ad 100644 > > --- a/kernel/sched/rt.c > > +++ b/kernel/sched/rt.c > > @@ -142,10 +142,12 @@ void free_rt_sched_group(struct task_group *tg) > > destroy_rt_bandwidth(&tg->rt_bandwidth); > > > > for_each_possible_cpu(i) { > > - if (tg->rt_rq) > > - kfree(tg->rt_rq[i]); > > - if (tg->rt_se) > > - kfree(tg->rt_se[i]); > > + /* Don't need to check if tg->rt_rq[i] > > + * or tg->rt_se[i] are NULL, since kfree(NULL) > > + * simply performs no operation > > + */ > > That's an invalid comment style. > > > + kfree(tg->rt_rq[i]); > > + kfree(tg->rt_se[i]); > > } > > > > kfree(tg->rt_rq); > > @@ -1015,10 +1017,7 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq) > > > > BUG_ON(&rq->rt != rt_rq); > > > > - if (rt_rq->rt_queued) > > - return; > > - > > - if (rt_rq_throttled(rt_rq)) > > + if (rt_rq->rt_queued || rt_rq_throttled(rt_rq)) > > return; > > > > if (rt_rq->rt_nr_running) { > > The compiler can do this transformation and the old code was simpler. > > > @@ -1211,10 +1210,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, > > struct rt_rq *rt_rq) > > */ > > static inline bool move_entity(unsigned int flags) > > { > > - if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) > > - return false; > > - > > - return true; > > + return !((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) > > } > > Again, I find the new code harder to read. > > > > > @@ -2518,12 +2513,10 @@ static int tg_set_rt_bandwidth(struct task_group > > *tg, > > /* > > * Disallowing the root group RT runtime is BAD, it would disallow the > > * kernel creating (and or operating) RT threads. > > + * > > + * No period doesn't make any sense. > > */ > > - if (tg == &root_task_group && rt_runtime == 0) > > - return -EINVAL; > > - > > - /* No period doesn't make any sense. */ > > - if (rt_period == 0) > > + if ((tg == &root_task_group && !rt_runtime) || !rt_period) > > return -EINVAL; > > > > mutex_lock(&rt_constraints_mutex); > > Again, far harder to read. > > In short, while all the transformations are 'correct' the end result is > horrible. Please don't do this.