Hi Andre,
On 09/06/17 18:41, Andre Przywara wrote:
@@ -285,6 +291,17 @@ void arch_move_irqs(struct vcpu *v)
struct vcpu *v_target;
int i;
+ /*
+ * We don't migrate LPIs at the moment.
+ * If we ever do, we must make sure that the struct pending_irq does
+ * not go away, as there is no lock preventing this here.
+ * To ensure this, we check if the loop below ever touches LPIs.
+ * In the moment vgic_num_irqs() just covers SPIs, as it's mostly used
+ * for allocating the pending_irq and irq_desc array, in which LPIs
+ * don't participate.
IHMO, this paragraph should also be added on top of the definition
vgic_num_irqs.
+ */
+ ASSERT(!is_lpi(vgic_num_irqs(d) - 1));
+
for ( i = 32; i < vgic_num_irqs(d); i++ )
{
v_target = vgic_get_target_vcpu(v, i);
@@ -299,6 +316,7 @@ void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n)
{
const unsigned long mask = r;
struct pending_irq *p;
+ struct irq_desc *desc;
unsigned int irq;
unsigned long flags;
int i = 0;
@@ -307,17 +325,19 @@ void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n)
while ( (i = find_next_bit(&mask, 32, i)) < 32 ) {
irq = i + (32 * n);
v_target = vgic_get_target_vcpu(v, irq);
+
Spurious change.
spin_lock_irqsave(&v_target->arch.vgic.lock, flags);
p = irq_to_pending(v_target, irq);
clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
gic_remove_from_lr_pending(v_target, p);
+ desc = p->desc;
spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags);
Cheers,
--
Julien Grall
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel