This new function will correctly initialize the IOMMU page table for the
current domain.

Also use it in iommu_assign_dt_device even though the current IOMMU
implementation on ARM shares P2M with the processor.

Signed-off-by: Julien Grall <julien.gr...@linaro.org>
Cc: Jan Beulich <jbeul...@suse.com>

---
    Changes in v3:
        - The ASSERT in iommu_construct was redundant with the if ()
        - Remove d->need_iommu = 1 in assign_device has it's already
        done by iommu_construct.
        - Simplify the code in the caller of iommu_construct

    Changes in v2:
        - Add missing Signed-off-by
        - Rename iommu_buildup to iommu_construct
---
 xen/drivers/passthrough/arm/iommu.c   |  6 ++++++
 xen/drivers/passthrough/device_tree.c |  4 ++++
 xen/drivers/passthrough/iommu.c       | 19 +++++++++++++++++++
 xen/drivers/passthrough/pci.c         | 15 ++++-----------
 xen/include/xen/iommu.h               |  2 ++
 5 files changed, 35 insertions(+), 11 deletions(-)

diff --git a/xen/drivers/passthrough/arm/iommu.c 
b/xen/drivers/passthrough/arm/iommu.c
index 3e9303a..5870aef 100644
--- a/xen/drivers/passthrough/arm/iommu.c
+++ b/xen/drivers/passthrough/arm/iommu.c
@@ -68,3 +68,9 @@ void arch_iommu_domain_destroy(struct domain *d)
 {
     iommu_dt_domain_destroy(d);
 }
+
+int arch_iommu_populate_page_table(struct domain *d)
+{
+    /* The IOMMU shares the p2m with the CPU */
+    return -ENOSYS;
+}
diff --git a/xen/drivers/passthrough/device_tree.c 
b/xen/drivers/passthrough/device_tree.c
index 377d41d..88e496e 100644
--- a/xen/drivers/passthrough/device_tree.c
+++ b/xen/drivers/passthrough/device_tree.c
@@ -41,6 +41,10 @@ int iommu_assign_dt_device(struct domain *d, struct 
dt_device_node *dev)
     if ( !list_empty(&dev->domain_list) )
         goto fail;
 
+    rc = iommu_construct(d);
+    if ( rc )
+        goto fail;
+
     rc = hd->platform_ops->assign_device(d, 0, dt_to_dev(dev));
 
     if ( rc )
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index cc12735..8915244 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -187,6 +187,25 @@ void iommu_teardown(struct domain *d)
     tasklet_schedule(&iommu_pt_cleanup_tasklet);
 }
 
+int iommu_construct(struct domain *d)
+{
+    int rc = 0;
+
+    if ( need_iommu(d) > 0 )
+        return 0;
+
+    if ( !iommu_use_hap_pt(d) )
+    {
+        rc = arch_iommu_populate_page_table(d);
+        if ( rc )
+            return rc;
+    }
+
+    d->need_iommu = 1;
+
+    return rc;
+}
+
 void iommu_domain_destroy(struct domain *d)
 {
     struct hvm_iommu *hd = domain_hvm_iommu(d);
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index 43ce5dc..9a47a37 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -1355,18 +1355,11 @@ static int assign_device(struct domain *d, u16 seg, u8 
bus, u8 devfn)
     if ( !spin_trylock(&pcidevs_lock) )
         return -ERESTART;
 
-    if ( need_iommu(d) <= 0 )
+    rc = iommu_construct(d);
+    if ( rc )
     {
-        if ( !iommu_use_hap_pt(d) )
-        {
-            rc = arch_iommu_populate_page_table(d);
-            if ( rc )
-            {
-                spin_unlock(&pcidevs_lock);
-                return rc;
-            }
-        }
-        d->need_iommu = 1;
+        spin_unlock(&pcidevs_lock);
+        return rc;
     }
 
     pdev = pci_get_pdev_by_domain(hardware_domain, seg, bus, devfn);
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index d0f99ef..c146ee4 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -65,6 +65,8 @@ int arch_iommu_domain_init(struct domain *d);
 int arch_iommu_populate_page_table(struct domain *d);
 void arch_iommu_check_autotranslated_hwdom(struct domain *d);
 
+int iommu_construct(struct domain *d);
+
 /* Function used internally, use iommu_domain_destroy */
 void iommu_teardown(struct domain *d);
 
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

Reply via email to