Once the group enters 'owned' mode it can never be assigned back to the
default_domain or to a NULL domain. It must always be actively assigned to
a current domain. If the caller hasn't provided a domain then the core
must provide an explicit DMA blocking domain that has no DMA map.

Lazily create a group-global blocking DMA domain when
iommu_group_claim_dma_owner is first called and immediately assign the
group to it. This ensures that DMA is immediately fully isolated on all
IOMMU drivers.

If the user attaches/detaches while owned then detach will set the group
back to the blocking domain.

Slightly reorganize the call chains so that
__iommu_group_attach_core_domain() is the function that removes any caller
configured domain and sets the domains back a core owned domain with an
appropriate lifetime.

__iommu_group_attach_domain() is the worker function that can change the
domain assigned to a group to any target domain, including NULL.

Add comments clarifying how the NULL vs detach_dev vs default_domain works
based on Robin's remarks.

This fixes an oops with VFIO and SMMUv3 because VFIO will call
iommu_detach_group() and then immediately iommu_domain_free(), but
SMMUv3 has no way to know that the domain it is holding a pointer to
has been freed. Now the iommu_detach_group() will assign the blocking
domain and SMMUv3 will no longer hold a stale domain reference.

Fixes: 1ea2a07a532b ("iommu: Add DMA ownership management interfaces")
Reported-by: Qian Cai <quic_qian...@quicinc.com>
Signed-off-by: Robin Murphy <robin.mur...@arm.com>
Signed-off-by: Jason Gunthorpe <j...@nvidia.com>
---
 drivers/iommu/iommu.c | 112 +++++++++++++++++++++++++++++++-----------
 1 file changed, 82 insertions(+), 30 deletions(-)

This is based on Robins draft here:

https://lore.kernel.org/linux-iommu/18831161-473f-e04f-4a81-1c7062ad1...@arm.com/

With some rework. I re-organized the call chains instead of introducing
iommu_group_user_attached(), fixed a recursive locking for
iommu_group_get_purgatory(), and made a proper commit message.

Still only compile tested, so RFCish.

Nicolin/Lu? What do you think, can you check it?

Jason

diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 0c42ece2585406..94d99768023c94 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -44,6 +44,7 @@ struct iommu_group {
        char *name;
        int id;
        struct iommu_domain *default_domain;
+       struct iommu_domain *blocking_domain;
        struct iommu_domain *domain;
        struct list_head entry;
        unsigned int owner_cnt;
@@ -82,8 +83,7 @@ static int __iommu_attach_device(struct iommu_domain *domain,
                                 struct device *dev);
 static int __iommu_attach_group(struct iommu_domain *domain,
                                struct iommu_group *group);
-static void __iommu_detach_group(struct iommu_domain *domain,
-                                struct iommu_group *group);
+static void __iommu_group_attach_core_domain(struct iommu_group *group);
 static int iommu_create_device_direct_mappings(struct iommu_group *group,
                                               struct device *dev);
 static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
@@ -596,6 +596,8 @@ static void iommu_group_release(struct kobject *kobj)
 
        if (group->default_domain)
                iommu_domain_free(group->default_domain);
+       if (group->blocking_domain)
+               iommu_domain_free(group->blocking_domain);
 
        kfree(group->name);
        kfree(group);
@@ -1979,12 +1981,10 @@ void iommu_detach_device(struct iommu_domain *domain, 
struct device *dev)
                return;
 
        mutex_lock(&group->mutex);
-       if (iommu_group_device_count(group) != 1) {
-               WARN_ON(1);
+       if (WARN_ON(domain != group->domain) ||
+           WARN_ON(iommu_group_device_count(group) != 1))
                goto out_unlock;
-       }
-
-       __iommu_detach_group(domain, group);
+       __iommu_group_attach_core_domain(group);
 
 out_unlock:
        mutex_unlock(&group->mutex);
@@ -2072,38 +2072,66 @@ static int iommu_group_do_detach_device(struct device 
*dev, void *data)
        return 0;
 }
 
-static void __iommu_detach_group(struct iommu_domain *domain,
-                                struct iommu_group *group)
+static int __iommu_group_attach_domain(struct iommu_group *group,
+                                      struct iommu_domain *new_domain)
 {
        int ret;
 
+       if (group->domain == new_domain)
+               return 0;
+
        /*
-        * If the group has been claimed already, do not re-attach the default
-        * domain.
+        * A NULL domain means to call the detach_dev() op. New drivers should
+        * use a IOMMU_DOMAIN_IDENTITY domain instead of a NULL default_domain
+        * and detatch_dev().
         */
-       if (!group->default_domain || group->owner) {
-               __iommu_group_for_each_dev(group, domain,
+       if (!new_domain) {
+               WARN_ON(!group->domain->ops->detach_dev);
+               __iommu_group_for_each_dev(group, group->domain,
                                           iommu_group_do_detach_device);
                group->domain = NULL;
-               return;
+               return 0;
        }
 
-       if (group->domain == group->default_domain)
-               return;
-
-       /* Detach by re-attaching to the default domain */
+       /*
+        * New drivers do not implement detach_dev, so changing the domain is
+        * done by calling attach on the new domain. Drivers should implement
+        * this so that DMA is always translated by either the new, old, or a
+        * blocking domain. DMA should never become untranslated.
+        *
+        * Note that this is called in error unwind paths, attaching to a
+        * domain that has already been attached cannot fail.
+        */
        ret = __iommu_group_for_each_dev(group, group->default_domain,
                                         iommu_group_do_attach_device);
-       if (ret != 0)
-               WARN_ON(1);
+       if (ret)
+               return ret;
+       group->domain = new_domain;
+       return 0;
+}
+
+/*
+ * Put the group's domain back to the appropriate core-owned domain - either 
the
+ * standard kernel-mode DMA configuration or an all-DMA-blocked domain.
+ */
+static void __iommu_group_attach_core_domain(struct iommu_group *group)
+{
+       struct iommu_domain *new_domain;
+       int ret;
+
+       if (group->owner)
+               new_domain = group->blocking_domain;
        else
-               group->domain = group->default_domain;
+               new_domain = group->default_domain;
+
+       ret = __iommu_group_attach_domain(group, new_domain);
+       WARN(ret, "iommu driver failed to attach the default/blocking domain");
 }
 
 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
 {
        mutex_lock(&group->mutex);
-       __iommu_detach_group(domain, group);
+       __iommu_group_attach_core_domain(group);
        mutex_unlock(&group->mutex);
 }
 EXPORT_SYMBOL_GPL(iommu_detach_group);
@@ -3088,6 +3116,29 @@ void iommu_device_unuse_default_domain(struct device 
*dev)
        iommu_group_put(group);
 }
 
+static int __iommu_group_alloc_blocking_domain(struct iommu_group *group)
+{
+       struct group_device *dev =
+               list_first_entry(&group->devices, struct group_device, list);
+
+       if (group->blocking_domain)
+               return 0;
+
+       group->blocking_domain =
+               __iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED);
+       if (!group->blocking_domain) {
+               /*
+                * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED
+                * create an empty domain instead.
+                */
+               group->blocking_domain = __iommu_domain_alloc(
+                       dev->dev->bus, IOMMU_DOMAIN_UNMANAGED);
+               if (!group->blocking_domain)
+                       return -EINVAL;
+       }
+       return 0;
+}
+
 /**
  * iommu_group_claim_dma_owner() - Set DMA ownership of a group
  * @group: The group.
@@ -3111,9 +3162,15 @@ int iommu_group_claim_dma_owner(struct iommu_group 
*group, void *owner)
                        goto unlock_out;
                }
 
+               ret = __iommu_group_alloc_blocking_domain(group);
+               if (ret)
+                       goto unlock_out;
+
+               ret = __iommu_group_attach_domain(group,
+                                                 group->blocking_domain);
+               if (ret)
+                       goto unlock_out;
                group->owner = owner;
-               if (group->domain)
-                       __iommu_detach_group(group->domain, group);
        }
 
        group->owner_cnt++;
@@ -3137,13 +3194,8 @@ void iommu_group_release_dma_owner(struct iommu_group 
*group)
                goto unlock_out;
 
        group->owner_cnt = 0;
-       /*
-        * The UNMANAGED domain should be detached before all USER
-        * owners have been released.
-        */
-       if (!WARN_ON(group->domain) && group->default_domain)
-               __iommu_attach_group(group->default_domain, group);
        group->owner = NULL;
+       __iommu_group_attach_core_domain(group);
 unlock_out:
        mutex_unlock(&group->mutex);
 }

base-commit: dc7afe17339c2f5de8c377aaa0b976139a19e158
-- 
2.36.0

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to