After using per-device dma ops, we don't need to check whether
a dvice needs mapping, hence all checks of iommu_need_mapping()
are unnecessary now.

Cc: Ashok Raj <ashok....@intel.com>
Cc: Jacob Pan <jacob.jun....@linux.intel.com>
Cc: Kevin Tian <kevin.t...@intel.com>
Signed-off-by: Lu Baolu <baolu...@linux.intel.com>
---
 drivers/iommu/intel-iommu.c | 50 ++++---------------------------------
 1 file changed, 5 insertions(+), 45 deletions(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 609b539b93f6..f7399d98f404 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2761,17 +2761,6 @@ static int __init si_domain_init(int hw)
        return 0;
 }
 
-static int identity_mapping(struct device *dev)
-{
-       struct device_domain_info *info;
-
-       info = dev->archdata.iommu;
-       if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
-               return (info->domain == si_domain);
-
-       return 0;
-}
-
 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
 {
        struct dmar_domain *ndomain;
@@ -3428,15 +3417,6 @@ static struct dmar_domain 
*get_private_domain_for_dev(struct device *dev)
        return domain;
 }
 
-/* Check if the dev needs to go through non-identity map and unmap process.*/
-static bool iommu_need_mapping(struct device *dev)
-{
-       if (iommu_dummy(dev))
-               return false;
-
-       return !identity_mapping(dev);
-}
-
 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
                                     size_t size, int dir, u64 dma_mask)
 {
@@ -3498,20 +3478,15 @@ static dma_addr_t intel_map_page(struct device *dev, 
struct page *page,
                                 enum dma_data_direction dir,
                                 unsigned long attrs)
 {
-       if (iommu_need_mapping(dev))
-               return __intel_map_single(dev, page_to_phys(page) + offset,
-                               size, dir, *dev->dma_mask);
-       return dma_direct_map_page(dev, page, offset, size, dir, attrs);
+       return __intel_map_single(dev, page_to_phys(page) + offset,
+                                 size, dir, *dev->dma_mask);
 }
 
 static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
                                     size_t size, enum dma_data_direction dir,
                                     unsigned long attrs)
 {
-       if (iommu_need_mapping(dev))
-               return __intel_map_single(dev, phys_addr, size, dir,
-                               *dev->dma_mask);
-       return dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
+       return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
 }
 
 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
@@ -3563,17 +3538,13 @@ static void intel_unmap_page(struct device *dev, 
dma_addr_t dev_addr,
                             size_t size, enum dma_data_direction dir,
                             unsigned long attrs)
 {
-       if (iommu_need_mapping(dev))
-               intel_unmap(dev, dev_addr, size);
-       else
-               dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
+       intel_unmap(dev, dev_addr, size);
 }
 
 static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
                size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
-       if (iommu_need_mapping(dev))
-               intel_unmap(dev, dev_addr, size);
+       intel_unmap(dev, dev_addr, size);
 }
 
 static void *intel_alloc_coherent(struct device *dev, size_t size,
@@ -3583,9 +3554,6 @@ static void *intel_alloc_coherent(struct device *dev, 
size_t size,
        struct page *page = NULL;
        int order;
 
-       if (!iommu_need_mapping(dev))
-               return dma_direct_alloc(dev, size, dma_handle, flags, attrs);
-
        size = PAGE_ALIGN(size);
        order = get_order(size);
 
@@ -3619,9 +3587,6 @@ static void intel_free_coherent(struct device *dev, 
size_t size, void *vaddr,
        int order;
        struct page *page = virt_to_page(vaddr);
 
-       if (!iommu_need_mapping(dev))
-               return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
-
        size = PAGE_ALIGN(size);
        order = get_order(size);
 
@@ -3639,9 +3604,6 @@ static void intel_unmap_sg(struct device *dev, struct 
scatterlist *sglist,
        struct scatterlist *sg;
        int i;
 
-       if (!iommu_need_mapping(dev))
-               return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
-
        for_each_sg(sglist, sg, nelems, i) {
                nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
        }
@@ -3663,8 +3625,6 @@ static int intel_map_sg(struct device *dev, struct 
scatterlist *sglist, int nele
        struct intel_iommu *iommu;
 
        BUG_ON(dir == DMA_NONE);
-       if (!iommu_need_mapping(dev))
-               return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
 
        domain = deferred_attach_domain(dev);
        if (!domain)
-- 
2.17.1

Reply via email to