by adding a two step loop. The function now finds a fallback cpu for a given
vcpu using the following precedence...
1) the vcpu's current pcpu
soft affinity step...
2) another pcpu from the vcpu's current runq in the vcpu's soft affinity
3) an online pcpu in the vcpu's domain's cpupool, and in the vcpu's soft
   affinity
hard affinity step...
4) another pcpu from the vcpu's current runq in the vcpu's hard affinity
3) an online pcpu in the vcpu's domain's cpupool, and in the vcpu's hard
   affinity

Signed-off-by: Justin T. Weaver <jtwea...@hawaii.edu>
---
Changes in v4:
 * renamed all uses of csched2_cpumask to scratch_mask
 * updated the comment before the function describing the added soft affinity
   aware functionality
 * updated the function to match the flow of the rewrite in the hard affinity
   patch based on the v3 hard affinity review
 * moved the VCPU2ONLINE section outside of the else block; removed the else
   block
Changes in v3:
 * added balance loop to try to find a soft affinity cpu
Changes in v2:
 * Not submitted in version 2; focus was on the hard affinity patch
---
 xen/common/sched_credit2.c |   31 ++++++++++++++++++++++---------
 1 file changed, 22 insertions(+), 9 deletions(-)

diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 42a1097..66f0a20 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -284,25 +284,38 @@ struct csched2_dom {
  *
  * Function returns a valid pcpu for svc, in order of preference:
  * - svc's current pcpu;
- * - another pcpu from svc's current runq;
+ * - another pcpu from svc's current runq in svc's soft affinity;
+ * - an online pcpu in svc's domain's cpupool, and in svc's soft affinity;
+ * - another pcpu from svc's current runq in svc's hard affinity;
  * - an online pcpu in svc's domain's cpupool, and in svc's hard affinity;
  */
 static int get_fallback_cpu(struct csched2_vcpu *svc)
 {
-    int cpu;
+    int cpu, balance_step;
 
     if ( likely(cpumask_test_cpu(svc->vcpu->processor,
                                  svc->vcpu->cpu_hard_affinity)) )
         return svc->vcpu->processor;
 
-    cpumask_and(scratch_mask, svc->vcpu->cpu_hard_affinity,
-                &svc->rqd->active);
-    cpu = cpumask_first(scratch_mask);
-    if ( likely(cpu < nr_cpu_ids) )
-        return cpu;
+    for_each_sched_balance_step( balance_step )
+    {
+        if ( balance_step == SCHED_BALANCE_SOFT_AFFINITY
+            && !__vcpu_has_soft_affinity(svc->vcpu,
+                svc->vcpu->cpu_hard_affinity) )
+            continue;
+
+        sched_balance_cpumask(svc->vcpu, balance_step, scratch_mask);
+        cpumask_and(scratch_mask, scratch_mask, &svc->rqd->active);
+        cpu = cpumask_first(scratch_mask);
+        if ( likely(cpu < nr_cpu_ids) )
+            return cpu;
+
+        sched_balance_cpumask(svc->vcpu, balance_step, scratch_mask);
+        cpumask_and(scratch_mask, scratch_mask, VCPU2ONLINE(svc->vcpu));
+        if ( !cpumask_empty(scratch_mask) )
+            break;
+    }
 
-    cpumask_and(scratch_mask, svc->vcpu->cpu_hard_affinity,
-                VCPU2ONLINE(svc->vcpu));
     ASSERT( !cpumask_empty(scratch_mask) );
     return cpumask_first(scratch_mask);
 }
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

Reply via email to