From: Dietmar Eggemann <[email protected]>

The per sched group sched_group_energy structure plus the related
idle_state and capacity_state arrays are allocated like the other sched
domain (sd) hierarchy data structures. This includes the freeing of
sched_group_energy structures which are not used.

One problem is that the number of elements of the idle_state and the
capacity_state arrays is not fixed and has to be retrieved in
__sdt_alloc() to allocate memory for the sched_group_energy structure and
the two arrays in one chunk. The array pointers (idle_states and
cap_states) are initialized here to point to the correct place inside the
memory chunk.

The new function init_sched_energy() initializes the sched_group_energy
structure and the two arrays in case the sd topology level contains energy
information.

cc: Ingo Molnar <[email protected]>
cc: Peter Zijlstra <[email protected]>

Signed-off-by: Dietmar Eggemann <[email protected]>
---
 kernel/sched/core.c  | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++-
 kernel/sched/sched.h | 33 ++++++++++++++++++++++++
 2 files changed, 103 insertions(+), 1 deletion(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a00a4c3..031ea48 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5707,6 +5707,7 @@ static void free_sched_domain(struct rcu_head *rcu)
                free_sched_groups(sd->groups, 1);
        } else if (atomic_dec_and_test(&sd->groups->ref)) {
                kfree(sd->groups->sgc);
+               kfree(sd->groups->sge);
                kfree(sd->groups);
        }
        kfree(sd);
@@ -5965,6 +5966,8 @@ static int get_group(int cpu, struct sd_data *sdd, struct 
sched_group **sg)
                *sg = *per_cpu_ptr(sdd->sg, cpu);
                (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
                atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */
+               (*sg)->sge = *per_cpu_ptr(sdd->sge, cpu);
+               atomic_set(&(*sg)->sge->ref, 1); /* for claim_allocations */
        }
 
        return cpu;
@@ -6054,6 +6057,28 @@ static void init_sched_groups_capacity(int cpu, struct 
sched_domain *sd)
        atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
 }
 
+static void init_sched_energy(int cpu, struct sched_domain *sd,
+                             struct sched_domain_topology_level *tl)
+{
+       struct sched_group *sg = sd->groups;
+       struct sched_group_energy *energy = sg->sge;
+       sched_domain_energy_f fn = tl->energy;
+       struct cpumask *mask = sched_group_cpus(sg);
+
+       if (!fn || !fn(cpu))
+               return;
+
+       if (cpumask_weight(mask) > 1)
+               check_sched_energy_data(cpu, fn, mask);
+
+       energy->nr_idle_states = fn(cpu)->nr_idle_states;
+       memcpy(energy->idle_states, fn(cpu)->idle_states,
+              energy->nr_idle_states*sizeof(struct idle_state));
+       energy->nr_cap_states = fn(cpu)->nr_cap_states;
+       memcpy(energy->cap_states, fn(cpu)->cap_states,
+              energy->nr_cap_states*sizeof(struct capacity_state));
+}
+
 /*
  * Initializers for schedule domains
  * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
@@ -6144,6 +6169,9 @@ static void claim_allocations(int cpu, struct 
sched_domain *sd)
 
        if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
                *per_cpu_ptr(sdd->sgc, cpu) = NULL;
+
+       if (atomic_read(&(*per_cpu_ptr(sdd->sge, cpu))->ref))
+               *per_cpu_ptr(sdd->sge, cpu) = NULL;
 }
 
 #ifdef CONFIG_NUMA
@@ -6609,10 +6637,24 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
                if (!sdd->sgc)
                        return -ENOMEM;
 
+               sdd->sge = alloc_percpu(struct sched_group_energy *);
+               if (!sdd->sge)
+                       return -ENOMEM;
+
                for_each_cpu(j, cpu_map) {
                        struct sched_domain *sd;
                        struct sched_group *sg;
                        struct sched_group_capacity *sgc;
+                       struct sched_group_energy *sge;
+                       sched_domain_energy_f fn = tl->energy;
+                       unsigned int nr_idle_states = 0;
+                       unsigned int nr_cap_states = 0;
+
+                       if (fn && fn(j)) {
+                               nr_idle_states = fn(j)->nr_idle_states;
+                               nr_cap_states = fn(j)->nr_cap_states;
+                               BUG_ON(!nr_idle_states || !nr_cap_states);
+                       }
 
                        sd = kzalloc_node(sizeof(struct sched_domain) + 
cpumask_size(),
                                        GFP_KERNEL, cpu_to_node(j));
@@ -6636,6 +6678,26 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
                                return -ENOMEM;
 
                        *per_cpu_ptr(sdd->sgc, j) = sgc;
+
+                       sge = kzalloc_node(sizeof(struct sched_group_energy) +
+                               nr_idle_states*sizeof(struct idle_state) +
+                               nr_cap_states*sizeof(struct capacity_state),
+                               GFP_KERNEL, cpu_to_node(j));
+
+                       if (!sge)
+                               return -ENOMEM;
+
+                       sge->idle_states = (struct idle_state *)
+                                          ((void *)&sge->cap_states +
+                                           sizeof(sge->cap_states));
+
+                       sge->cap_states = (struct capacity_state *)
+                                         ((void *)&sge->cap_states +
+                                          sizeof(sge->cap_states) +
+                                          nr_idle_states*
+                                          sizeof(struct idle_state));
+
+                       *per_cpu_ptr(sdd->sge, j) = sge;
                }
        }
 
@@ -6664,6 +6726,8 @@ static void __sdt_free(const struct cpumask *cpu_map)
                                kfree(*per_cpu_ptr(sdd->sg, j));
                        if (sdd->sgc)
                                kfree(*per_cpu_ptr(sdd->sgc, j));
+                       if (sdd->sge)
+                               kfree(*per_cpu_ptr(sdd->sge, j));
                }
                free_percpu(sdd->sd);
                sdd->sd = NULL;
@@ -6671,6 +6735,8 @@ static void __sdt_free(const struct cpumask *cpu_map)
                sdd->sg = NULL;
                free_percpu(sdd->sgc);
                sdd->sgc = NULL;
+               free_percpu(sdd->sge);
+               sdd->sge = NULL;
        }
 }
 
@@ -6756,10 +6822,13 @@ static int build_sched_domains(const struct cpumask 
*cpu_map,
 
        /* Calculate CPU capacity for physical packages and nodes */
        for (i = nr_cpumask_bits-1; i >= 0; i--) {
+               struct sched_domain_topology_level *tl = sched_domain_topology;
+
                if (!cpumask_test_cpu(i, cpu_map))
                        continue;
 
-               for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
+               for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent, tl++) {
+                       init_sched_energy(i, sd, tl);
                        claim_allocations(i, sd);
                        init_sched_groups_capacity(i, sd);
                }
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0e9dcc6..86cf6b2 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -854,6 +854,39 @@ static inline unsigned int group_first_cpu(struct 
sched_group *group)
 
 extern int group_balance_cpu(struct sched_group *sg);
 
+/*
+ * Check that the per-cpu provided sd energy data is consistent for all cpus
+ * within the mask.
+ */
+static inline void check_sched_energy_data(int cpu, sched_domain_energy_f fn,
+                                          const struct cpumask *cpumask)
+{
+       struct cpumask mask;
+       int i;
+
+       cpumask_xor(&mask, cpumask, get_cpu_mask(cpu));
+
+       for_each_cpu(i, &mask) {
+               int y;
+
+               BUG_ON(fn(i)->nr_idle_states != fn(cpu)->nr_idle_states);
+
+               for (y = 0; y < (fn(i)->nr_idle_states); y++) {
+                       BUG_ON(fn(i)->idle_states[y].power !=
+                                       fn(cpu)->idle_states[y].power);
+               }
+
+               BUG_ON(fn(i)->nr_cap_states != fn(cpu)->nr_cap_states);
+
+               for (y = 0; y < (fn(i)->nr_cap_states); y++) {
+                       BUG_ON(fn(i)->cap_states[y].cap !=
+                                       fn(cpu)->cap_states[y].cap);
+                       BUG_ON(fn(i)->cap_states[y].power !=
+                                       fn(cpu)->cap_states[y].power);
+               }
+       }
+}
+
 #else
 
 static inline void sched_ttwu_pending(void) { }
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to