Hi,

On 4/24/19 10:45 AM, Dietmar Eggemann wrote:
The CFS class is the only one maintaining and using the CPU wide load
(rq->load(.weight)). The last use case of the CPU wide load in CFS's
set_next_entity() can be replaced by using the load of the CFS class
(rq->cfs.load(.weight)) instead.

Signed-off-by: Dietmar Eggemann <dietmar.eggem...@arm.com>
---
  kernel/sched/debug.c | 2 --
  kernel/sched/fair.c  | 7 ++-----
  kernel/sched/sched.h | 2 --
  3 files changed, 2 insertions(+), 9 deletions(-)

diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 8039d62ae36e..1148f43dbd42 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -656,8 +656,6 @@ do {                                                        
                \
        SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
P(nr_running);
-       SEQ_printf(m, "  .%-30s: %lu\n", "load",
-                  rq->load.weight);
        P(nr_switches);
        P(nr_load_updates);
        P(nr_uninterruptible);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a4d9e14bf138..73a6718f29cc 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2682,8 +2682,6 @@ static void
  account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  {
        update_load_add(&cfs_rq->load, se->load.weight);
-       if (!parent_entity(se))
-               update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
  #ifdef CONFIG_SMP
        if (entity_is_task(se)) {
                struct rq *rq = rq_of(cfs_rq);
@@ -2699,8 +2697,6 @@ static void
  account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  {
        update_load_sub(&cfs_rq->load, se->load.weight);
-       if (!parent_entity(se))
-               update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
  #ifdef CONFIG_SMP
        if (entity_is_task(se)) {
                account_numa_dequeue(rq_of(cfs_rq), task_of(se));
@@ -4096,7 +4092,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct 
sched_entity *se)
         * least twice that of our own weight (i.e. dont track it
         * when there are only lesser-weight tasks around):
         */
-       if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 
2*se->load.weight) {
+       if (schedstat_enabled() &&
+           rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
                schedstat_set(se->statistics.slice_max,
                        max((u64)schedstat_val(se->statistics.slice_max),
                            se->sum_exec_runtime - se->prev_sum_exec_runtime));
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index efa686eeff26..e4059e81e99c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -830,8 +830,6 @@ struct rq {
        atomic_t nohz_flags;
  #endif /* CONFIG_NO_HZ_COMMON */
- /* capture load from *all* tasks on this CPU: */
-       struct load_weight      load;
        unsigned long           nr_load_updates;
        u64                     nr_switches;

Is there anything else I should do for this patch ?

Thanks,

-- Dietmar

Reply via email to