Hi Quan,
On 16/09/2015 14:24, Quan Xu wrote:
to destroy virtual machine, schedule and wait on a waitqueue
until the Device-TLB flush is completed.
Signed-off-by: Quan Xu <quan...@intel.com>
---
xen/common/domain.c | 10 ++++++++++
xen/drivers/passthrough/vtd/iommu.c | 9 +++++++++
xen/include/xen/hvm/iommu.h | 6 ++++++
3 files changed, 25 insertions(+)
Same remarks as the previous patches.
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 1f62e3b..8ccc1a5 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -867,6 +867,16 @@ void domain_destroy(struct domain *d)
rcu_assign_pointer(*pd, d->next_in_hashbucket);
spin_unlock(&domlist_update_lock);
+#ifdef HAS_PASSTHROUGH
+ /*
+ * If the Device-TLB flush is still not completed, schedule
+ * and wait on a waitqueue until the Device-TLB flush is
+ * completed.
+ */
+ if ( need_iommu(d) && QI_FLUSHING(d) )
+ wait_for_qi_flushing(d);
+#endif
+
/* Schedule RCU asynchronous completion of domain destroy. */
call_rcu(&d->rcu, complete_domain_destroy);
}
diff --git a/xen/drivers/passthrough/vtd/iommu.c
b/xen/drivers/passthrough/vtd/iommu.c
index 1297dea..3d98fea 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1070,6 +1070,11 @@ static hw_irq_controller dma_msi_type = {
};
/* IOMMU Queued Invalidation(QI). */
+void wait_for_qi_flushing(struct domain *d)
+{
+ wait_event(qi_wq(d), !QI_FLUSHING(d));
+}
+
static void qi_clear_iwc(struct iommu *iommu)
{
unsigned long flags;
@@ -1188,6 +1193,7 @@ scan_again:
}
spin_unlock(&qi_page_lock(d));
QI_FLUSHING(d) = 0;
+ wake_up_all(&qi_wq(d));
}
rcu_unlock_domain(d);
}
@@ -1494,6 +1500,7 @@ static int intel_iommu_domain_init(struct domain *d)
hd->arch.agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
INIT_PAGE_LIST_HEAD(&qi_hold_page_list(d));
spin_lock_init(&qi_page_lock(d));
+ init_waitqueue_head(&qi_wq(d));
return 0;
}
@@ -1925,6 +1932,8 @@ static void iommu_domain_teardown(struct domain *d)
if ( list_empty(&acpi_drhd_units) )
return;
+ destroy_waitqueue_head(&qi_wq(d));
+
list_for_each_entry_safe ( mrmrr, tmp, &hd->arch.mapped_rmrrs, list )
{
list_del(&mrmrr->list);
diff --git a/xen/include/xen/hvm/iommu.h b/xen/include/xen/hvm/iommu.h
index 5dc0033..f661c8c 100644
--- a/xen/include/xen/hvm/iommu.h
+++ b/xen/include/xen/hvm/iommu.h
@@ -20,6 +20,7 @@
#define __XEN_HVM_IOMMU_H__
#include <xen/iommu.h>
+#include <xen/wait.h>
#include <xen/list.h>
#include <asm/hvm/iommu.h>
@@ -56,12 +57,15 @@ struct hvm_iommu {
struct page_list_head qi_hold_page_list;
spinlock_t qi_lock;
+ struct waitqueue_head qi_wq;
+
/* Features supported by the IOMMU */
DECLARE_BITMAP(features, IOMMU_FEAT_count);
};
void do_qi_flushing(struct domain *d);
void qi_hold_page(struct domain *d, struct page_info *pg);
+void wait_for_qi_flushing(struct domain *d);
#define iommu_set_feature(d, f) set_bit((f), domain_hvm_iommu(d)->features)
#define iommu_clear_feature(d, f) clear_bit((f),
domain_hvm_iommu(d)->features)
@@ -76,5 +80,7 @@ void qi_hold_page(struct domain *d, struct page_info *pg);
(d->arch.hvm_domain.hvm_iommu.qi_hold_page_list)
#define qi_page_lock(d) \
(d->arch.hvm_domain.hvm_iommu.qi_lock)
+#define qi_wq(d) \
+ (d->arch.hvm_domain.hvm_iommu.qi_wq)
#endif /* __XEN_HVM_IOMMU_H__ */
Regards,
--
Julien Grall
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel