The generate_sched_domains() function and the hotplug code are modified
to make them use the newly introduced isolated_cpus mask for schedule
domains generation.

Signed-off-by: Waiman Long <long...@redhat.com>
---
 kernel/cgroup/cpuset.c | 33 +++++++++++++++++++++++++++++----
 1 file changed, 29 insertions(+), 4 deletions(-)

diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 368e1b7..0e75f83 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -672,13 +672,14 @@ static int generate_sched_domains(cpumask_var_t **domains,
        int ndoms = 0;          /* number of sched domains in result */
        int nslot;              /* next empty doms[] struct cpumask slot */
        struct cgroup_subsys_state *pos_css;
+       bool root_load_balance = is_sched_load_balance(&top_cpuset);
 
        doms = NULL;
        dattr = NULL;
        csa = NULL;
 
        /* Special case for the 99% of systems with one, full, sched domain */
-       if (is_sched_load_balance(&top_cpuset)) {
+       if (root_load_balance && !top_cpuset.isolation_count) {
                ndoms = 1;
                doms = alloc_sched_domains(ndoms);
                if (!doms)
@@ -701,6 +702,8 @@ static int generate_sched_domains(cpumask_var_t **domains,
        csn = 0;
 
        rcu_read_lock();
+       if (root_load_balance)
+               csa[csn++] = &top_cpuset;
        cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
                if (cp == &top_cpuset)
                        continue;
@@ -711,6 +714,9 @@ static int generate_sched_domains(cpumask_var_t **domains,
                 * parent's cpus, so just skip them, and then we call
                 * update_domain_attr_tree() to calc relax_domain_level of
                 * the corresponding sched domain.
+                *
+                * If root is load-balancing, we can skip @cp if it
+                * is a subset of the root's effective_cpus.
                 */
                if (!cpumask_empty(cp->cpus_allowed) &&
                    !(is_sched_load_balance(cp) &&
@@ -718,11 +724,16 @@ static int generate_sched_domains(cpumask_var_t **domains,
                                         housekeeping_cpumask(HK_FLAG_DOMAIN))))
                        continue;
 
+               if (root_load_balance &&
+                   cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
+                       continue;
+
                if (is_sched_load_balance(cp))
                        csa[csn++] = cp;
 
-               /* skip @cp's subtree */
-               pos_css = css_rightmost_descendant(pos_css);
+               /* skip @cp's subtree if not a scheduling domain */
+               if (!is_sched_domain(cp))
+                       pos_css = css_rightmost_descendant(pos_css);
        }
        rcu_read_unlock();
 
@@ -849,7 +860,12 @@ static void rebuild_sched_domains_locked(void)
         * passing doms with offlined cpu to partition_sched_domains().
         * Anyways, hotplug work item will rebuild sched domains.
         */
-       if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
+       if (!top_cpuset.isolation_count &&
+           !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
+               goto out;
+
+       if (top_cpuset.isolation_count &&
+          !cpumask_subset(top_cpuset.effective_cpus, cpu_active_mask))
                goto out;
 
        /* Generate domain masks and attrs */
@@ -2624,6 +2640,11 @@ static void cpuset_hotplug_workfn(struct work_struct 
*work)
        cpumask_copy(&new_cpus, cpu_active_mask);
        new_mems = node_states[N_MEMORY];
 
+       /*
+        * If isolated_cpus is populated, it is likely that the check below
+        * will produce a false positive on cpus_updated when the cpu list
+        * isn't changed. It is extra work, but it is better to be safe.
+        */
        cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
        mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
 
@@ -2632,6 +2653,10 @@ static void cpuset_hotplug_workfn(struct work_struct 
*work)
                spin_lock_irq(&callback_lock);
                if (!on_dfl)
                        cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
+
+               if (top_cpuset.isolation_count)
+                       cpumask_andnot(&new_cpus, &new_cpus,
+                                       top_cpuset.isolated_cpus);
                cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
                spin_unlock_irq(&callback_lock);
                /* we don't mess with cpumasks of tasks in top_cpuset */
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-doc" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to