On Fri, 20 Nov 2020 at 10:06, Mel Gorman <mgor...@techsingularity.net> wrote: > > This is simply a preparation patch to make the following patches easier > to read. No functional change. > > Signed-off-by: Mel Gorman <mgor...@techsingularity.net>
Reviewed-by: Vincent Guittot <vincent.guit...@linaro.org> > --- > kernel/sched/fair.c | 10 ++++++---- > 1 file changed, 6 insertions(+), 4 deletions(-) > > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c > index 6d78b68847f9..5fbed29e4001 100644 > --- a/kernel/sched/fair.c > +++ b/kernel/sched/fair.c > @@ -1550,7 +1550,7 @@ struct task_numa_env { > static unsigned long cpu_load(struct rq *rq); > static unsigned long cpu_runnable(struct rq *rq); > static unsigned long cpu_util(int cpu); > -static inline long adjust_numa_imbalance(int imbalance, int nr_running); > +static inline long adjust_numa_imbalance(int imbalance, int dst_running); > > static inline enum > numa_type numa_classify(unsigned int imbalance_pct, > @@ -8991,7 +8991,9 @@ static inline void update_sd_lb_stats(struct lb_env > *env, struct sd_lb_stats *sd > } > } > > -static inline long adjust_numa_imbalance(int imbalance, int nr_running) > +#define NUMA_IMBALANCE_MIN 2 > + > +static inline long adjust_numa_imbalance(int imbalance, int dst_running) > { > unsigned int imbalance_min; > > @@ -8999,8 +9001,8 @@ static inline long adjust_numa_imbalance(int imbalance, > int nr_running) > * Allow a small imbalance based on a simple pair of communicating > * tasks that remain local when the source domain is almost idle. > */ > - imbalance_min = 2; > - if (nr_running <= imbalance_min) > + imbalance_min = NUMA_IMBALANCE_MIN; > + if (dst_running <= imbalance_min) > return 0; > > return imbalance; > -- > 2.26.2 >