... instead of artificially masking the timer interrupt in the timer state and relying on the guest to unmask (which it isn't required to do per the h/w spec, although Linux does).
By using the newly added hwppi save/restore functionality we make use of the GICD I[SC]ACTIVER registers to save and restore the active state of the interrupt, which prevents the nested invocations which the current masking works around. Signed-off-by: Ian Campbell <ian.campb...@citrix.com> --- v2: Rebased, in particular over Julien's passthrough stuff which reworked a bunch of IRQ related stuff. Also largely rewritten since precursor patches now lay very different groundwork. --- xen/arch/arm/time.c | 26 ++------------------------ xen/arch/arm/vtimer.c | 41 +++++++++++++++++++++++++++++++++++++---- xen/include/asm-arm/domain.h | 1 + 3 files changed, 40 insertions(+), 28 deletions(-) diff --git a/xen/arch/arm/time.c b/xen/arch/arm/time.c index 5ded30c..2a1cdba 100644 --- a/xen/arch/arm/time.c +++ b/xen/arch/arm/time.c @@ -181,28 +181,6 @@ static void timer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) } } -static void vtimer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) -{ - /* - * Edge-triggered interrupts can be used for the virtual timer. Even - * if the timer output signal is masked in the context switch, the - * GIC will keep track that of any interrupts raised while IRQS are - * disabled. As soon as IRQs are re-enabled, the virtual interrupt - * will be injected to Xen. - * - * If an IDLE vCPU was scheduled next then we should ignore the - * interrupt. - */ - if ( unlikely(is_idle_vcpu(current)) ) - return; - - perfc_incr(virt_timer_irqs); - - current->arch.virt_timer.ctl = READ_SYSREG32(CNTV_CTL_EL0); - WRITE_SYSREG32(current->arch.virt_timer.ctl | CNTx_CTL_MASK, CNTV_CTL_EL0); - vgic_vcpu_inject_irq(current, current->arch.virt_timer.irq); -} - /* * Arch timer interrupt really ought to be level triggered, since the * design of the timer/comparator mechanism is based around that @@ -242,8 +220,8 @@ void __cpuinit init_timer_interrupt(void) request_irq(timer_irq[TIMER_HYP_PPI], 0, timer_interrupt, "hyptimer", NULL); - request_irq(timer_irq[TIMER_VIRT_PPI], 0, vtimer_interrupt, - "virtimer", NULL); + route_hwppi_to_current_vcpu(timer_irq[TIMER_VIRT_PPI], "virtimer"); + request_irq(timer_irq[TIMER_PHYS_NONSECURE_PPI], 0, timer_interrupt, "phytimer", NULL); diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c index 1418092..82e2b88 100644 --- a/xen/arch/arm/vtimer.c +++ b/xen/arch/arm/vtimer.c @@ -55,9 +55,19 @@ static void phys_timer_expired(void *data) static void virt_timer_expired(void *data) { struct vtimer *t = data; - t->ctl |= CNTx_CTL_MASK; - vgic_vcpu_inject_irq(t->v, t->irq); - perfc_incr(vtimer_virt_inject); + t->ctl |= CNTx_CTL_PENDING; + if ( !(t->ctl & CNTx_CTL_MASK) ) + { + /* + * An edge triggered interrupt should now be pending. Since + * this timer can never expire while the domain is scheduled + * we know that the gic_restore_hwppi in virt_timer_restore + * will cause the real hwppi to occur and be routed. + */ + gic_hwppi_set_pending(&t->ppi_state); + vcpu_unblock(t->v); + perfc_incr(vtimer_virt_inject); + } } int domain_vtimer_init(struct domain *d, struct xen_arch_domainconfig *config) @@ -96,9 +106,12 @@ int domain_vtimer_init(struct domain *d, struct xen_arch_domainconfig *config) int vcpu_vtimer_init(struct vcpu *v) { + struct pending_irq *p; struct vtimer *t = &v->arch.phys_timer; bool_t d0 = is_hardware_domain(v->domain); + const unsigned host_vtimer_irq_ppi = timer_get_irq(TIMER_VIRT_PPI); + /* * Hardware domain uses the hardware interrupts, guests get the virtual * platform. @@ -116,10 +129,16 @@ int vcpu_vtimer_init(struct vcpu *v) init_timer(&t->timer, virt_timer_expired, t, v->processor); t->ctl = 0; t->irq = d0 - ? timer_get_irq(TIMER_VIRT_PPI) + ? host_vtimer_irq_ppi : GUEST_TIMER_VIRT_PPI; t->v = v; + p = irq_to_pending(v, t->irq); + p->irq = t->irq; + + gic_hwppi_state_init(&v->arch.virt_timer.ppi_state, + host_vtimer_irq_ppi); + v->arch.vtimer_initialized = 1; return 0; @@ -147,6 +166,15 @@ int virt_timer_save(struct vcpu *v) set_timer(&v->arch.virt_timer.timer, ticks_to_ns(v->arch.virt_timer.cval + v->domain->arch.virt_timer_base.offset - boot_count)); } + + /* + * Since the vtimer irq is a PPI we don't need to worry about + * racing against it becoming active while we are saving the + * state, since that requires the guest to be reading the IAR. + */ + gic_save_and_mask_hwppi(v, v->arch.virt_timer.irq, + &v->arch.virt_timer.ppi_state); + return 0; } @@ -161,6 +189,11 @@ int virt_timer_restore(struct vcpu *v) WRITE_SYSREG64(v->domain->arch.virt_timer_base.offset, CNTVOFF_EL2); WRITE_SYSREG64(v->arch.virt_timer.cval, CNTV_CVAL_EL0); WRITE_SYSREG32(v->arch.virt_timer.ctl, CNTV_CTL_EL0); + + gic_restore_hwppi(v, + v->arch.virt_timer.irq, + &v->arch.virt_timer.ppi_state); + return 0; } diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h index 550e28b..aff21dd 100644 --- a/xen/include/asm-arm/domain.h +++ b/xen/include/asm-arm/domain.h @@ -51,6 +51,7 @@ struct vtimer { struct timer timer; uint32_t ctl; uint64_t cval; + struct hwppi_state ppi_state; }; struct arch_domain -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel