Signed-off-by: Lauro Ramos Venancio <lvena...@redhat.com>
---
 kernel/sched/topology.c | 19 ++++++++++---------
 1 file changed, 10 insertions(+), 9 deletions(-)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 55bbaf7..e77c93a 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -495,14 +495,6 @@ enum s_alloc {
 /*
  * Build an iteration mask that can exclude certain CPUs from the upwards
  * domain traversal.
- *
- * Asymmetric node setups can result in situations where the domain tree is of
- * unequal depth, make sure to skip domains that already cover the entire
- * range.
- *
- * In that case build_sched_domains() will have terminated the iteration early
- * and our sibling sd spans will be empty. Domains should always include the
- * CPU they're built on, so check that.
  */
 static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
 {
@@ -612,7 +604,16 @@ static void init_overlap_sched_group(struct sched_domain 
*sd,
 
                sibling = *per_cpu_ptr(sdd->sd, i);
 
-               /* See the comment near build_group_mask(). */
+               /*
+                * Asymmetric node setups can result in situations where the
+                * domain tree is of unequal depth, make sure to skip domains
+                * that already cover the entire range.
+                *
+                * In that case build_sched_domains() will have terminated the
+                * iteration early and our sibling sd spans will be empty.
+                * Domains should always include the CPU they're built on, so
+                * check that.
+                */
                if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
                        continue;
 
-- 
1.8.3.1

Reply via email to