This adds the APIs for bounce buffer specified dma sync ops. Cc: Ashok Raj <ashok....@intel.com> Cc: Jacob Pan <jacob.jun....@linux.intel.com> Signed-off-by: Lu Baolu <baolu...@linux.intel.com> Tested-by: Xu Pengfei <pengfei...@intel.com> Tested-by: Mika Westerberg <mika.westerb...@intel.com> --- drivers/iommu/intel-pgtable.c | 44 +++++++++++++++++++++++++++++++++++ include/linux/intel-iommu.h | 4 ++++ 2 files changed, 48 insertions(+)
diff --git a/drivers/iommu/intel-pgtable.c b/drivers/iommu/intel-pgtable.c index 1e56ea07f755..fbe1d0e073fd 100644 --- a/drivers/iommu/intel-pgtable.c +++ b/drivers/iommu/intel-pgtable.c @@ -242,6 +242,35 @@ bounce_sync(phys_addr_t orig_addr, phys_addr_t bounce_addr, return 0; } +static int +bounce_sync_single(struct device *dev, struct dmar_domain *domain, + dma_addr_t addr, phys_addr_t paddr, size_t size, + enum dma_data_direction dir, unsigned long attrs, + void *data) +{ + enum dma_sync_target *target = data; + struct bounce_cookie *cookie; + unsigned long flags; + + spin_lock_irqsave(&bounce_lock, flags); + cookie = idr_find(&domain->bounce_idr, addr >> PAGE_SHIFT); + spin_unlock_irqrestore(&bounce_lock, flags); + if (!cookie) + return 0; + + if (*target == SYNC_FOR_DEVICE) { + if (dir == DMA_BIDIRECTIONAL || dir == DMA_TO_DEVICE) + bounce_sync(cookie->original_phys, cookie->bounce_phys, + size, DMA_TO_DEVICE); + } else { + if (dir == DMA_BIDIRECTIONAL || dir == DMA_FROM_DEVICE) + bounce_sync(cookie->original_phys, cookie->bounce_phys, + size, DMA_FROM_DEVICE); + } + + return 0; +} + static int bounce_map(struct device *dev, struct dmar_domain *domain, dma_addr_t addr, phys_addr_t paddr, size_t size, enum dma_data_direction dir, unsigned long attrs, @@ -315,6 +344,11 @@ static const struct addr_walk walk_bounce_unmap = { .high = bounce_unmap, }; +static const struct addr_walk walk_bounce_sync_single = { + .low = bounce_sync_single, + .high = bounce_sync_single, +}; + static int domain_walk_addr_range(const struct addr_walk *walk, struct device *dev, struct dmar_domain *domain, dma_addr_t addr, @@ -401,3 +435,13 @@ int domain_bounce_unmap(struct device *dev, dma_addr_t addr, return domain_walk_addr_range(&walk_bounce_unmap, dev, domain, addr, 0, size, dir, attrs, data); } + +int domain_bounce_sync_single(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, + void *data) +{ + struct dmar_domain *domain = get_valid_domain_for_dev(dev); + + return domain_walk_addr_range(&walk_bounce_sync_single, dev, + domain, addr, 0, size, dir, 0, data); +} diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 8fd1768f8729..58f192c1f26f 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -31,6 +31,7 @@ #include <linux/iommu.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/dmar.h> +#include <linux/swiotlb.h> #include <asm/cacheflush.h> #include <asm/iommu.h> @@ -695,6 +696,9 @@ int domain_bounce_map(struct device *dev, dma_addr_t addr, phys_addr_t paddr, int domain_bounce_unmap(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs, void *data); +int domain_bounce_sync_single(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, + void *data); #ifdef CONFIG_INTEL_IOMMU_SVM int intel_svm_init(struct intel_iommu *iommu); extern int intel_svm_enable_prq(struct intel_iommu *iommu); -- 2.17.1 _______________________________________________ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu