Currently, blocked vCPUs are added to PI blocking list if its domain has assign devices. Actually, some blocked vCPUs will not be woken up by wakeup interrupt generated by VT-d hardware. They may be woken up by IPIs or other interrupts from emulated devices. For these vCPUs we don't add them to PI blocking list.
If a vCPU is blocked prior to its getting bound with a IRTE, we need adding this vCPU to blocking list when we bind a vCPU with a IRTE. In that case, arch_vcpu_block() may be called from another vCPU which the current implementation can't handle. This patch expands the arch_vcpu_block(), removing some restrictions expressed by assertions and handling the target vCPU according to its state and its PI blocking list lock (v->arch.hvm_vmx.pi_blocking.lock). Signed-off-by: Chao Gao <chao....@intel.com> --- xen/arch/x86/hvm/vmx/vmx.c | 21 ++++++++++++++------- xen/drivers/passthrough/vtd/intremap.c | 19 +++++++++++++++++++ 2 files changed, 33 insertions(+), 7 deletions(-) diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 76cb421..ae16f39 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -125,6 +125,14 @@ static void vmx_vcpu_block(struct vcpu *v) struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc; spinlock_t *pi_blocking_list_lock; + /* If no IRTE refers to 'pi_desc', no further operation needs */ + if ( v->domain->arch.hvm_domain.pi_ops.in_use && + !v->domain->arch.hvm_domain.pi_ops.in_use(v) ) + return; + + if ( !test_bit(_VPF_blocked, &v->pause_flags) ) + return; + /* * After pCPU goes down, the per-cpu PI blocking list is cleared. * To make sure the parameter vCPU is added to the chosen pCPU's @@ -144,13 +152,8 @@ static void vmx_vcpu_block(struct vcpu *v) old_lock = cmpxchg(&v->arch.hvm_vmx.pi_blocking.lock, NULL, pi_blocking_list_lock); - - /* - * 'v->arch.hvm_vmx.pi_blocking.lock' should be NULL before - * being assigned to a new value, since the vCPU is currently - * running and it cannot be on any blocking list. - */ - ASSERT(old_lock == NULL); + if ( old_lock ) + goto out; atomic_inc(&per_cpu(vmx_pi_blocking, dest_cpu).counter); HVMTRACE_4D(VT_D_PI_BLOCK, v->domain->domain_id, v->vcpu_id, dest_cpu, @@ -171,6 +174,10 @@ static void vmx_vcpu_block(struct vcpu *v) write_atomic(&pi_desc->nv, pi_wakeup_vector); write_atomic(&pi_desc->ndst, (x2apic_enabled ? dest : MASK_INSR(dest, PI_xAPIC_NDST_MASK))); + return; + + out: + spin_unlock_irqrestore(pi_blocking_list_lock, flags); } static void vmx_pi_switch_from(struct vcpu *v) diff --git a/xen/drivers/passthrough/vtd/intremap.c b/xen/drivers/passthrough/vtd/intremap.c index 99f1cce..c43cfa2 100644 --- a/xen/drivers/passthrough/vtd/intremap.c +++ b/xen/drivers/passthrough/vtd/intremap.c @@ -30,6 +30,7 @@ #include "extern.h" #include <asm/apic.h> +#include <asm/hvm/hvm.h> #include <asm/io_apic.h> #define nr_ioapic_entries(i) nr_ioapic_entries[i] @@ -622,6 +623,20 @@ static void pi_put_ref(struct pi_desc *pi_desc) v->domain->arch.hvm_domain.pi_ops.put_ref(v); } +static bool pi_in_use(struct pi_desc *pi_desc) +{ + struct vcpu *v; + + if ( !pi_desc ) + return 0; + + v = pi_desc_to_vcpu(pi_desc); + ASSERT(is_hvm_domain(v->domain)); + if ( v->domain->arch.hvm_domain.pi_ops.in_use ) + return v->domain->arch.hvm_domain.pi_ops.in_use(v); + return 0; +} + static int msi_msg_to_remap_entry( struct iommu *iommu, struct pci_dev *pdev, struct msi_desc *msi_desc, struct msi_msg *msg) @@ -996,6 +1011,7 @@ int pi_update_irte(struct pi_desc *pi_desc, const struct pirq *pirq, struct msi_desc *msi_desc; struct pi_desc *old_pi_desc; int rc; + bool first_ref; desc = pirq_spin_lock_irq_desc(pirq, NULL); if ( !desc ) @@ -1009,7 +1025,10 @@ int pi_update_irte(struct pi_desc *pi_desc, const struct pirq *pirq, } old_pi_desc = msi_desc->pi_desc; + first_ref = !pi_in_use(pi_desc); pi_get_ref(pi_desc); + if ( pi_desc && first_ref ) + arch_vcpu_block(pi_desc_to_vcpu(pi_desc)); msi_desc->pi_desc = pi_desc; msi_desc->gvec = gvec; -- 1.8.3.1 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org https://lists.xen.org/xen-devel