Hi,
On 12/04/17 01:44, Andre Przywara wrote:
The INVALL command instructs an ITS to invalidate the configuration
data for all LPIs associated with a given redistributor (read: VCPU).
This is nasty to emulate exactly with our architecture, so we just
iterate over all mapped LPIs and filter for those from that particular
VCPU.
Signed-off-by: Andre Przywara <andre.przyw...@arm.com>
---
xen/arch/arm/vgic-v3-its.c | 66 ++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 66 insertions(+)
diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
index f2789c5..9b5032b 100644
--- a/xen/arch/arm/vgic-v3-its.c
+++ b/xen/arch/arm/vgic-v3-its.c
@@ -480,6 +480,69 @@ out_unlock:
return ret;
}
+/*
+ * INVALL updates the per-LPI configuration status for every LPI mapped to
+ * a particular redistributor.
+ * We iterate over all mapped LPIs in our radix tree and update those.
+ */
+static int its_handle_invall(struct virt_its *its, uint64_t *cmdptr)
+{
+ uint32_t collid = its_cmd_get_collection(cmdptr);
+ struct vcpu *vcpu;
+ struct pending_irq *pirqs[16];
+ uint64_t vlpi = 0; /* 64-bit to catch overflows */
+ unsigned int nr_lpis, i;
+ unsigned long flags;
+ int ret = 0;
+
+ /*
+ * As this implementation walks over all mapped LPIs, it might take
+ * too long for a real guest, so we might want to revisit this
+ * implementation for DomUs.
+ * However this command is very rare, also we don't expect many
+ * LPIs to be actually mapped, so it's fine for Dom0 to use.
+ */
+ ASSERT(is_hardware_domain(its->d));
+
+ spin_lock(&its->its_lock);
+ vcpu = get_vcpu_from_collection(its, collid);
+ spin_unlock(&its->its_lock);
+
+ spin_lock_irqsave(&vcpu->arch.vgic.lock, flags);
See my remark earlier for the locking.
+ read_lock(&its->d->arch.vgic.pend_lpi_tree_lock);
+
+ do
+ {
+ nr_lpis = radix_tree_gang_lookup(&its->d->arch.vgic.pend_lpi_tree,
+ (void **)pirqs, vlpi,
+ ARRAY_SIZE(pirqs));
+
+ for ( i = 0; i < nr_lpis; i++ )
+ {
+ /* We only care about LPIs on our VCPU. */
+ if ( pirqs[i]->lpi_vcpu_id != vcpu->vcpu_id )
+ continue;
+
+ vlpi = pirqs[i]->irq;
+ /* If that fails for a single LPI, carry on to handle the rest. */
+ ret = update_lpi_property(its->d, vlpi, pirqs[i]);
It is a bit weird that update_lpi_property take vlpi then pending_irq
and ...
+ if ( !ret )
+ update_lpi_vgic_status(vcpu, pirqs[i], vlpi);
update_lpi_vgic_status pending_irq then vlpi.
+ }
+ /*
+ * Loop over the next gang of pending_irqs until we reached the end of
+ * a (fully populated) tree or the lookup function returns less LPIs than
+ * it has been asked for.
+ */
+ } while ( (++vlpi < its->d->arch.vgic.nr_lpis) &&
+ (nr_lpis == ARRAY_SIZE(pirqs)) );
+
+ read_unlock(&its->d->arch.vgic.pend_lpi_tree_lock);
+ spin_unlock_irqrestore(&vcpu->arch.vgic.lock, flags);
+
+ return ret;
+}
+
static int its_handle_mapc(struct virt_its *its, uint64_t *cmdptr)
{
uint32_t collid = its_cmd_get_collection(cmdptr);
@@ -822,6 +885,9 @@ static int vgic_its_handle_cmds(struct domain *d, struct
virt_its *its)
case GITS_CMD_INV:
ret = its_handle_inv(its, command);
break;
+ case GITS_CMD_INVALL:
+ ret = its_handle_invall(its, command);
+ break;
case GITS_CMD_MAPC:
ret = its_handle_mapc(its, command);
break;
Cheers,
--
Julien Grall
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel