Hi Andre,
On 30/01/17 18:31, Andre Przywara wrote:
For the same reason that allocating a struct irq_desc for each
possible LPI is not an option, having a struct pending_irq for each LPI
is also not feasible. However we actually only need those when an
interrupt is on a vCPU (or is about to be injected).
Maintain a list of those structs that we can use for the lifecycle of
a guest LPI. We allocate new entries if necessary, however reuse
pre-owned entries whenever possible.
I added some locking around this list here, however my gut feeling is
that we don't need one because this a per-VCPU structure anyway.
If someone could confirm this, I'd be grateful.
Teach the existing VGIC functions to find the right pointer when being
given a virtual LPI number.
Signed-off-by: Andre Przywara <andre.przyw...@arm.com>
---
xen/arch/arm/gic.c | 3 +++
xen/arch/arm/vgic-v3.c | 3 +++
xen/arch/arm/vgic.c | 64 +++++++++++++++++++++++++++++++++++++++++---
xen/include/asm-arm/domain.h | 2 ++
xen/include/asm-arm/vgic.h | 14 ++++++++++
5 files changed, 83 insertions(+), 3 deletions(-)
diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index a5348f2..bd3c032 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -509,6 +509,9 @@ static void gic_update_one_lr(struct vcpu *v, int i)
struct vcpu *v_target = vgic_get_target_vcpu(v, irq);
irq_set_affinity(p->desc, cpumask_of(v_target->processor));
}
+ /* If this was an LPI, mark this struct as available again. */
+ if ( is_lpi(p->irq) )
+ p->irq = 0;
}
}
}
diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index 1fadb00..b0653c2 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -1426,6 +1426,9 @@ static int vgic_v3_vcpu_init(struct vcpu *v)
if ( v->vcpu_id == last_cpu || (v->vcpu_id == (d->max_vcpus - 1)) )
v->arch.vgic.flags |= VGIC_V3_RDIST_LAST;
+ spin_lock_init(&v->arch.vgic.pending_lpi_list_lock);
+ INIT_LIST_HEAD(&v->arch.vgic.pending_lpi_list);
+
return 0;
}
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 364d5f0..7e3440f 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -31,6 +31,8 @@
#include <asm/mmio.h>
#include <asm/gic.h>
#include <asm/vgic.h>
+#include <asm/gic_v3_defs.h>
+#include <asm/gic_v3_its.h>
I really don't want to see gic_v3_* header included in common code.
static inline struct vgic_irq_rank *vgic_get_rank(struct vcpu *v, int rank)
{
@@ -61,7 +63,7 @@ struct vgic_irq_rank *vgic_rank_irq(struct vcpu *v, unsigned
int irq)
return vgic_get_rank(v, rank);
}
-static void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq)
+void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq)
{
INIT_LIST_HEAD(&p->inflight);
INIT_LIST_HEAD(&p->lr_queue);
@@ -244,10 +246,14 @@ struct vcpu *vgic_get_target_vcpu(struct vcpu *v,
unsigned int virq)
static int vgic_get_virq_priority(struct vcpu *v, unsigned int virq)
{
- struct vgic_irq_rank *rank = vgic_rank_irq(v, virq);
+ struct vgic_irq_rank *rank;
unsigned long flags;
int priority;
+ if ( is_lpi(virq) )
+ return vgic_lpi_get_priority(v->domain, virq);
This would benefit some comments to explain why LPI handling is a
different path.
+
+ rank = vgic_rank_irq(v, virq);
vgic_lock_rank(v, rank, flags);
priority = rank->priority[virq & INTERRUPT_RANK_MASK];
vgic_unlock_rank(v, rank, flags);
@@ -446,13 +452,63 @@ bool vgic_to_sgi(struct vcpu *v, register_t sgir, enum
gic_sgi_mode irqmode,
return true;
}
+/*
+ * Holding struct pending_irq's for each possible virtual LPI in each domain
+ * requires too much Xen memory, also a malicious guest could potentially
+ * spam Xen with LPI map requests. We cannot cover those with (guest allocated)
+ * ITS memory, so we use a dynamic scheme of allocating struct pending_irq's
+ * on demand.
+ */
I am afraid this will not prevent a guest to use too much Xen memory.
Let's imagine the guest is mapping thousands of LPIs but decides to
never handle them or is slowly. You would allocate pending_irq for each
LPI, and never release the memory.
If we use dynamic allocation, we need a way to limit the number of LPIs
received by a guest to avoid memory exhaustion. The only idea I have is
an artificial limit, but I don't think it is good. Any other ideas?
+struct pending_irq *lpi_to_pending(struct vcpu *v, unsigned int lpi,
+ bool allocate)
+{
+ struct lpi_pending_irq *lpi_irq, *empty = NULL;
+
+ spin_lock(&v->arch.vgic.pending_lpi_list_lock);
+ list_for_each_entry(lpi_irq, &v->arch.vgic.pending_lpi_list, entry)
+ {
+ if ( lpi_irq->pirq.irq == lpi )
+ {
+ spin_unlock(&v->arch.vgic.pending_lpi_list_lock);
+ return &lpi_irq->pirq;
+ }
+
+ if ( lpi_irq->pirq.irq == 0 && !empty )
+ empty = lpi_irq;
+ }
+
+ if ( !allocate )
+ {
+ spin_unlock(&v->arch.vgic.pending_lpi_list_lock);
+ return NULL;
+ }
+
+ if ( !empty )
+ {
+ empty = xzalloc(struct lpi_pending_irq);
xzalloc will return NULL if memory is exhausted. There is a general lack
of error checking within this series. Any missing error could be a
potential target from a guest, leading to security issue. Stefano and I
already spot some, it does not mean we found all. So Before sending the
next version, please go through the series and verify *every* return.
Also, I can't find the code which release LPIs neither in this patch nor
in this series. A general rule is too have allocation and free within
the same patch. It is much easier to spot missing free.
+ vgic_init_pending_irq(&empty->pirq, lpi);
+ list_add_tail(&empty->entry, &v->arch.vgic.pending_lpi_list);
+ } else
+ {
+ empty->pirq.status = 0;
+ empty->pirq.irq = lpi;
+ }
+
+ spin_unlock(&v->arch.vgic.pending_lpi_list_lock);
+
+ return &empty->pirq;
+}
+
struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq)
{
struct pending_irq *n;
+
Spurious change.
/* Pending irqs allocation strategy: the first vgic.nr_spis irqs
* are used for SPIs; the rests are used for per cpu irqs */
if ( irq < 32 )
n = &v->arch.vgic.pending_irqs[irq];
+ else if ( is_lpi(irq) )
+ n = lpi_to_pending(v, irq, true);
else
n = &v->domain->arch.vgic.pending_irqs[irq - 32];
return n;
@@ -480,7 +536,7 @@ void vgic_clear_pending_irqs(struct vcpu *v)
void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq)
{
uint8_t priority;
- struct pending_irq *iter, *n = irq_to_pending(v, virq);
+ struct pending_irq *iter, *n;
unsigned long flags;
bool running;
@@ -488,6 +544,8 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq)
spin_lock_irqsave(&v->arch.vgic.lock, flags);
+ n = irq_to_pending(v, virq);
Why did you move this code?
+
/* vcpu offline */
if ( test_bit(_VPF_down, &v->pause_flags) )
{
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 00b9c1a..f44a84b 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -257,6 +257,8 @@ struct arch_vcpu
paddr_t rdist_base;
#define VGIC_V3_RDIST_LAST (1 << 0) /* last vCPU of the rdist */
uint8_t flags;
+ struct list_head pending_lpi_list;
+ spinlock_t pending_lpi_list_lock; /* protects the pending_lpi_list */
} vgic;
/* Timer registers */
diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
index 672f649..03d4d2e 100644
--- a/xen/include/asm-arm/vgic.h
+++ b/xen/include/asm-arm/vgic.h
@@ -83,6 +83,12 @@ struct pending_irq
struct list_head lr_queue;
};
+struct lpi_pending_irq
+{
+ struct list_head entry;
+ struct pending_irq pirq;
+};
+
#define NR_INTERRUPT_PER_RANK 32
#define INTERRUPT_RANK_MASK (NR_INTERRUPT_PER_RANK - 1)
@@ -296,13 +302,21 @@ extern struct vcpu *vgic_get_target_vcpu(struct vcpu *v,
unsigned int virq);
extern void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq);
extern void vgic_vcpu_inject_spi(struct domain *d, unsigned int virq);
extern void vgic_clear_pending_irqs(struct vcpu *v);
+extern void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq);
extern struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq);
extern struct pending_irq *spi_to_pending(struct domain *d, unsigned int irq);
+extern struct pending_irq *lpi_to_pending(struct vcpu *v, unsigned int irq,
+ bool allocate);
extern struct vgic_irq_rank *vgic_rank_offset(struct vcpu *v, int b, int n,
int s);
extern struct vgic_irq_rank *vgic_rank_irq(struct vcpu *v, unsigned int irq);
extern bool vgic_emulate(struct cpu_user_regs *regs, union hsr hsr);
extern void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n);
extern void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n);
+/* placeholder function until the property table gets introduced */
+static inline int vgic_lpi_get_priority(struct domain *d, uint32_t vlpi)
+{
+ return 0xa;
+}
To be fair, you can avoid this function by re-ordering the patches. As
suggested earlier, I would introduce some bits of the vITS before. This
would also make the series easier to read.
extern void register_vgic_ops(struct domain *d, const struct vgic_ops *ops);
int vgic_v2_init(struct domain *d, int *mmio_count);
int vgic_v3_init(struct domain *d, int *mmio_count);
Cheers,
--
Julien Grall
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel