On 12.08.2020 14:47, Roger Pau Monne wrote:
> --- a/xen/arch/x86/hvm/vioapic.c
> +++ b/xen/arch/x86/hvm/vioapic.c
> @@ -375,6 +375,50 @@ static const struct hvm_mmio_ops vioapic_mmio_ops = {
>      .write = vioapic_write
>  };
>  
> +static void eoi_callback(struct vcpu *v, unsigned int vector, void *data)
> +{
> +    struct domain *d = v->domain;
> +    struct hvm_irq *hvm_irq = hvm_domain_irq(d);
> +    union vioapic_redir_entry *ent;

While you move the code, could you restrict this variable's scope?

> +    unsigned int i;
> +
> +    ASSERT(has_vioapic(d));
> +
> +    spin_lock(&d->arch.hvm.irq_lock);
> +
> +    for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
> +    {
> +        struct hvm_vioapic *vioapic = domain_vioapic(d, i);
> +        unsigned int pin;
> +
> +        for ( pin = 0; pin < vioapic->nr_pins; pin++ )
> +        {
> +            ent = &vioapic->redirtbl[pin];
> +            if ( ent->fields.vector != vector )
> +                continue;
> +
> +            ent->fields.remote_irr = 0;
> +
> +            if ( is_iommu_enabled(d) )
> +            {
> +                spin_unlock(&d->arch.hvm.irq_lock);
> +                hvm_dpci_eoi(d, vioapic->base_gsi + pin, ent);
> +                spin_lock(&d->arch.hvm.irq_lock);
> +            }

Just as a remark (simply because of it catching my attention) -
this intermediate dropping of the lock can't really be good. We
may want (need?) to think about ways to avoid this.

Jan

Reply via email to