iommu_flush_dev_iotlb() is called to invalidate caches on device. It only loops the devices which are full-attached to the domain. For sub-devices, this is ineffective. This results in invalid caching entries left on the device. Fix it by adding loop for subdevices as well. Also, update the domain->has_iotlb_device for both device/subdevice attach/detach and ATS enabling/disabling.
Signed-off-by: Liu Yi L <yi.l....@intel.com> Signed-off-by: Lu Baolu <baolu...@linux.intel.com> --- drivers/iommu/intel/iommu.c | 90 +++++++++++++++++++++++++++++-------- 1 file changed, 72 insertions(+), 18 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 4274b4acc325..d9b6037b72b1 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1437,6 +1437,10 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, (unsigned long long)DMA_TLB_IAIG(val)); } +/** + * For a given bus/devfn, fetch its device_domain_info if it supports + * device tlb. Only needs to loop devices attached in normal manner. + */ static struct device_domain_info * iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, u8 bus, u8 devfn) @@ -1459,6 +1463,18 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, return NULL; } +static bool dev_iotlb_enabled(struct device *dev) +{ + struct pci_dev *pdev; + + if (!dev || !dev_is_pci(dev)) + return false; + + pdev = to_pci_dev(dev); + + return !!pdev->ats_enabled; +} + static void domain_update_iotlb(struct dmar_domain *domain) { struct device_domain_info *info; @@ -1467,21 +1483,37 @@ static void domain_update_iotlb(struct dmar_domain *domain) assert_spin_locked(&device_domain_lock); list_for_each_entry(info, &domain->devices, link) { - struct pci_dev *pdev; - - if (!info->dev || !dev_is_pci(info->dev)) - continue; - - pdev = to_pci_dev(info->dev); - if (pdev->ats_enabled) { + if (dev_iotlb_enabled(info->dev)) { has_iotlb_device = true; break; } } + if (!has_iotlb_device) { + struct subdevice_domain_info *subinfo; + + list_for_each_entry(subinfo, &domain->sub_devices, link_phys) { + if (dev_iotlb_enabled(subinfo->dev)) { + has_iotlb_device = true; + break; + } + } + } domain->has_iotlb_device = has_iotlb_device; } +static void dev_update_domain_iotlb(struct device_domain_info *info) +{ + struct subdevice_domain_info *subinfo; + + assert_spin_locked(&device_domain_lock); + + domain_update_iotlb(info->domain); + + list_for_each_entry(subinfo, &info->auxiliary_domains, link_domain) + domain_update_iotlb(subinfo->domain); +} + static void iommu_enable_dev_iotlb(struct device_domain_info *info) { struct pci_dev *pdev; @@ -1524,7 +1556,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info) if (info->ats_supported && pci_ats_page_aligned(pdev) && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) { info->ats_enabled = 1; - domain_update_iotlb(info->domain); + dev_update_domain_iotlb(info); info->ats_qdep = pci_ats_queue_depth(pdev); } } @@ -1543,7 +1575,7 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info) if (info->ats_enabled) { pci_disable_ats(pdev); info->ats_enabled = 0; - domain_update_iotlb(info->domain); + dev_update_domain_iotlb(info); } #ifdef CONFIG_INTEL_IOMMU_SVM if (info->pri_enabled) { @@ -1557,26 +1589,43 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info) #endif } +static void __iommu_flush_dev_iotlb(struct device_domain_info *info, + u64 addr, unsigned mask) +{ + u16 sid, qdep; + + if (!info || !info->ats_enabled) + return; + + sid = info->bus << 8 | info->devfn; + qdep = info->ats_qdep; + qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, + qdep, addr, mask); +} + static void iommu_flush_dev_iotlb(struct dmar_domain *domain, u64 addr, unsigned mask) { - u16 sid, qdep; unsigned long flags; struct device_domain_info *info; + struct subdevice_domain_info *subinfo; if (!domain->has_iotlb_device) return; spin_lock_irqsave(&device_domain_lock, flags); - list_for_each_entry(info, &domain->devices, link) { - if (!info->ats_enabled) - continue; + list_for_each_entry(info, &domain->devices, link) + __iommu_flush_dev_iotlb(info, addr, mask); + + /* + * Besides looping all devices attached normally, also + * needs to loop all devices attached via auxiliary + * manner. + */ + list_for_each_entry(subinfo, &domain->sub_devices, link_phys) + __iommu_flush_dev_iotlb(get_domain_info(subinfo->dev), + addr, mask); - sid = info->bus << 8 | info->devfn; - qdep = info->ats_qdep; - qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, - qdep, addr, mask); - } spin_unlock_irqrestore(&device_domain_lock, flags); } @@ -5208,6 +5257,9 @@ static void auxiliary_link_device(struct dmar_domain *domain, subinfo->dev = dev; list_add(&subinfo->link_domain, &info->auxiliary_domains); list_add(&subinfo->link_phys, &domain->sub_devices); + if (dev_iotlb_enabled(dev)) + domain_update_iotlb(domain); + _auxiliary_link_device(domain, subinfo, dev); domain->auxd_refcnt++; } @@ -5242,6 +5294,8 @@ static int auxiliary_unlink_device(struct dmar_domain *domain, list_del(&subinfo->link_domain); list_del(&subinfo->link_phys); kfree(subinfo); + if (domain->has_iotlb_device) + domain_update_iotlb(domain); } domain->auxd_refcnt--; -- 2.25.1 _______________________________________________ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu