The iommu_get_domain_for_dev() helper will be reworked to check a per-gdv
flag, so it will need to hold the group->mutex. This will give trouble to
existing attach_dev callback functions that call the helper for currently
attached old domains, since group->mutex is already held in an attach_dev
context.

To address this, step one is to pass in the attached "old" domain pointer
to the attach_dev op, similar to set_dev_pasid op.

However, the release_dev op is tricky in the iommu core, because it could
be invoked when the group isn't allocated, i.e. no way of guarateeing the
group->mutex being held. Thus, it would not be able to do any attachment
in the release_dev callback function, arm_smmu_release_device() here.

Add a release_domain, moving the attach from arm_smmu_release_device() to
the iommu_deinit_device() in the core, so that arm_smmu_release_device()
will not need to worry about the group->mutex.

Signed-off-by: Nicolin Chen <nicol...@nvidia.com>
---
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 34 ++++++++++++++++-----
 1 file changed, 26 insertions(+), 8 deletions(-)

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c 
b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 5968043ac8023..1a21d1a2dd454 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -3291,6 +3291,31 @@ static struct iommu_domain arm_smmu_blocked_domain = {
        .ops = &arm_smmu_blocked_ops,
 };
 
+static int arm_smmu_attach_dev_release(struct iommu_domain *domain,
+                                      struct device *dev)
+{
+       struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
+       WARN_ON(master->iopf_refcount);
+
+       /* Put the STE back to what arm_smmu_init_strtab() sets */
+       if (dev->iommu->require_direct)
+               arm_smmu_attach_dev_identity(&arm_smmu_identity_domain, dev);
+       else
+               arm_smmu_attach_dev_blocked(&arm_smmu_blocked_domain, dev);
+
+       return 0;
+}
+
+static const struct iommu_domain_ops arm_smmu_release_ops = {
+       .attach_dev = arm_smmu_attach_dev_release,
+};
+
+static struct iommu_domain arm_smmu_release_domain = {
+       .type = IOMMU_DOMAIN_BLOCKED,
+       .ops = &arm_smmu_release_ops,
+};
+
 static struct iommu_domain *
 arm_smmu_domain_alloc_paging_flags(struct device *dev, u32 flags,
                                   const struct iommu_user_data *user_data)
@@ -3580,14 +3605,6 @@ static void arm_smmu_release_device(struct device *dev)
 {
        struct arm_smmu_master *master = dev_iommu_priv_get(dev);
 
-       WARN_ON(master->iopf_refcount);
-
-       /* Put the STE back to what arm_smmu_init_strtab() sets */
-       if (dev->iommu->require_direct)
-               arm_smmu_attach_dev_identity(&arm_smmu_identity_domain, dev);
-       else
-               arm_smmu_attach_dev_blocked(&arm_smmu_blocked_domain, dev);
-
        arm_smmu_disable_pasid(master);
        arm_smmu_remove_master(master);
        if (arm_smmu_cdtab_allocated(&master->cd_table))
@@ -3678,6 +3695,7 @@ static int arm_smmu_def_domain_type(struct device *dev)
 static const struct iommu_ops arm_smmu_ops = {
        .identity_domain        = &arm_smmu_identity_domain,
        .blocked_domain         = &arm_smmu_blocked_domain,
+       .release_domain         = &arm_smmu_release_domain,
        .capable                = arm_smmu_capable,
        .hw_info                = arm_smmu_hw_info,
        .domain_alloc_sva       = arm_smmu_sva_domain_alloc,
-- 
2.43.0


Reply via email to