In fact, csched_vcpu_remove() (i.e., the credit1
implementation of remove_vcpu()) manipulates runqueues,
so holding the runqueue lock is necessary.

Also, while there, *_lock_irq() (for the private lock) is
enough, there is no need to *_lock_irqsave().

Signed-off-by: Dario Faggioli <dario.faggi...@citrix.com>
---
Cc: George Dunlap <george.dun...@eu.citrix.com>
Cc: Andrew Cooper <andrew.coop...@citrix.com>
Cc: Jan Beulich <jbeul...@suse.com>
---
Changes from v1 (within the other series):
 * split the patch (wrt the original patch, in the original
   series), and take care, in this one, only of remove_vcpu();
 * removed pointless parentheses.
---
 xen/common/sched_credit.c |   10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index b8f28fe..6f71e0d 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -926,7 +926,7 @@ csched_vcpu_remove(const struct scheduler *ops, struct vcpu 
*vc)
     struct csched_private *prv = CSCHED_PRIV(ops);
     struct csched_vcpu * const svc = CSCHED_VCPU(vc);
     struct csched_dom * const sdom = svc->sdom;
-    unsigned long flags;
+    spinlock_t *lock;
 
     SCHED_STAT_CRANK(vcpu_remove);
 
@@ -936,15 +936,19 @@ csched_vcpu_remove(const struct scheduler *ops, struct 
vcpu *vc)
         vcpu_unpause(svc->vcpu);
     }
 
+    lock = vcpu_schedule_lock_irq(vc);
+
     if ( __vcpu_on_runq(svc) )
         __runq_remove(svc);
 
-    spin_lock_irqsave(&(prv->lock), flags);
+    vcpu_schedule_unlock_irq(lock, vc);
+
+    spin_lock_irq(&prv->lock);
 
     if ( !list_empty(&svc->active_vcpu_elem) )
         __csched_vcpu_acct_stop_locked(prv, svc);
 
-    spin_unlock_irqrestore(&(prv->lock), flags);
+    spin_unlock_irq(&prv->lock);
 
     BUG_ON( sdom == NULL );
     BUG_ON( !list_empty(&svc->runq_elem) );


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

Reply via email to