Commit-ID:  dfa444dc2ff62edbaf1ff95ed22dd2ce8a5715da
Gitweb:     https://git.kernel.org/tip/dfa444dc2ff62edbaf1ff95ed22dd2ce8a5715da
Author:     Vincent Guittot <vincent.guit...@linaro.org>
AuthorDate: Thu, 28 Jun 2018 17:45:11 +0200
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Sun, 15 Jul 2018 23:51:21 +0200

sched/cpufreq: Remove sugov_aggregate_util()

There is no reason why sugov_get_util() and sugov_aggregate_util()
were in fact separate functions.

Signed-off-by: Vincent Guittot <vincent.guit...@linaro.org>
[ Rebased after adding irq tracking and fixed some compilation errors. ]
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Acked-by: Viresh Kumar <viresh.ku...@linaro.org>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: morten.rasmus...@arm.com
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: clau...@evidence.eu.com
Cc: daniel.lezc...@linaro.org
Cc: dietmar.eggem...@arm.com
Cc: j...@joelfernandes.org
Cc: juri.le...@redhat.com
Cc: luca.ab...@santannapisa.it
Cc: patrick.bell...@arm.com
Cc: quentin.per...@arm.com
Cc: r...@rjwysocki.net
Cc: valentin.schnei...@arm.com
Link: 
http://lkml.kernel.org/r/1530200714-4504-9-git-send-email-vincent.guit...@linaro.org
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 kernel/sched/cpufreq_schedutil.c | 44 ++++++++++++++--------------------------
 kernel/sched/sched.h             |  2 +-
 2 files changed, 16 insertions(+), 30 deletions(-)

diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 7016bde9d194..c9622b3f183d 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -53,12 +53,7 @@ struct sugov_cpu {
        unsigned int            iowait_boost_max;
        u64                     last_update;
 
-       /* The fields below are only needed when sharing a policy: */
-       unsigned long           util_cfs;
-       unsigned long           util_dl;
        unsigned long           bw_dl;
-       unsigned long           util_rt;
-       unsigned long           util_irq;
        unsigned long           max;
 
        /* The field below is for single-CPU policies only: */
@@ -182,38 +177,31 @@ static unsigned int get_next_freq(struct sugov_policy 
*sg_policy,
        return cpufreq_driver_resolve_freq(policy, freq);
 }
 
-static void sugov_get_util(struct sugov_cpu *sg_cpu)
+static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
 {
        struct rq *rq = cpu_rq(sg_cpu->cpu);
+       unsigned long util, irq, max;
 
-       sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
-       sg_cpu->util_cfs = cpu_util_cfs(rq);
-       sg_cpu->util_dl  = cpu_util_dl(rq);
-       sg_cpu->bw_dl    = cpu_bw_dl(rq);
-       sg_cpu->util_rt  = cpu_util_rt(rq);
-       sg_cpu->util_irq = cpu_util_irq(rq);
-}
-
-static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
-{
-       struct rq *rq = cpu_rq(sg_cpu->cpu);
-       unsigned long util, max = sg_cpu->max;
+       sg_cpu->max = max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
+       sg_cpu->bw_dl = cpu_bw_dl(rq);
 
        if (rt_rq_is_runnable(&rq->rt))
-               return sg_cpu->max;
+               return max;
+
+       irq = cpu_util_irq(rq);
 
-       if (unlikely(sg_cpu->util_irq >= max))
+       if (unlikely(irq >= max))
                return max;
 
        /* Sum rq utilization */
-       util = sg_cpu->util_cfs;
-       util += sg_cpu->util_rt;
+       util = cpu_util_cfs(rq);
+       util += cpu_util_rt(rq);
 
        /*
         * Interrupt time is not seen by RQS utilization so we can compare
         * them with the CPU capacity
         */
-       if ((util + sg_cpu->util_dl) >= max)
+       if ((util + cpu_util_dl(rq)) >= max)
                return max;
 
        /*
@@ -231,11 +219,11 @@ static unsigned long sugov_aggregate_util(struct 
sugov_cpu *sg_cpu)
         */
 
        /* Weight RQS utilization to normal context window */
-       util *= (max - sg_cpu->util_irq);
+       util *= (max - irq);
        util /= max;
 
        /* Add interrupt utilization */
-       util += sg_cpu->util_irq;
+       util += irq;
 
        /* Add DL bandwidth requirement */
        util += sg_cpu->bw_dl;
@@ -418,9 +406,8 @@ static void sugov_update_single(struct update_util_data 
*hook, u64 time,
 
        busy = sugov_cpu_is_busy(sg_cpu);
 
-       sugov_get_util(sg_cpu);
+       util = sugov_get_util(sg_cpu);
        max = sg_cpu->max;
-       util = sugov_aggregate_util(sg_cpu);
        sugov_iowait_apply(sg_cpu, time, &util, &max);
        next_f = get_next_freq(sg_policy, util, max);
        /*
@@ -459,9 +446,8 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu 
*sg_cpu, u64 time)
                struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
                unsigned long j_util, j_max;
 
-               sugov_get_util(j_sg_cpu);
+               j_util = sugov_get_util(j_sg_cpu);
                j_max = j_sg_cpu->max;
-               j_util = sugov_aggregate_util(j_sg_cpu);
                sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max);
 
                if (j_util * max > j_max * util) {
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b2833e2b4b6a..061d51fb5b44 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2226,7 +2226,7 @@ static inline unsigned long cpu_util_cfs(struct rq *rq)
 
 static inline unsigned long cpu_util_rt(struct rq *rq)
 {
-       return rq->avg_rt.util_avg;
+       return READ_ONCE(rq->avg_rt.util_avg);
 }
 
 #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || 
defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)

Reply via email to