From: Leon Romanovsky <leo...@nvidia.com>

In case peer-to-peer transaction traverses through host bridge,
the IOMMU needs to have IOMMU_MMIO flag, together with skip of
CPU sync.

The latter was handled by provided DMA_ATTR_SKIP_CPU_SYNC flag,
but IOMMU flag was missed, due to assumption that such memory
can be treated as regular one.

Reuse newly introduced DMA attribute to properly take MMIO path.

Reviewed-by: Jason Gunthorpe <j...@nvidia.com>
Signed-off-by: Leon Romanovsky <leo...@nvidia.com>
---
 mm/hmm.c | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/mm/hmm.c b/mm/hmm.c
index 015ab243f081..6556c0e074ba 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -746,7 +746,7 @@ dma_addr_t hmm_dma_map_pfn(struct device *dev, struct 
hmm_dma_map *map,
        case PCI_P2PDMA_MAP_NONE:
                break;
        case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
-               attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+               attrs |= DMA_ATTR_MMIO;
                pfns[idx] |= HMM_PFN_P2PDMA;
                break;
        case PCI_P2PDMA_MAP_BUS_ADDR:
@@ -776,7 +776,7 @@ dma_addr_t hmm_dma_map_pfn(struct device *dev, struct 
hmm_dma_map *map,
                        goto error;
 
                dma_addr = dma_map_phys(dev, paddr, map->dma_entry_size,
-                                       DMA_BIDIRECTIONAL, 0);
+                                       DMA_BIDIRECTIONAL, attrs);
                if (dma_mapping_error(dev, dma_addr))
                        goto error;
 
@@ -811,16 +811,17 @@ bool hmm_dma_unmap_pfn(struct device *dev, struct 
hmm_dma_map *map, size_t idx)
        if ((pfns[idx] & valid_dma) != valid_dma)
                return false;
 
+       if (pfns[idx] & HMM_PFN_P2PDMA)
+               attrs |= DMA_ATTR_MMIO;
+
        if (pfns[idx] & HMM_PFN_P2PDMA_BUS)
                ; /* no need to unmap bus address P2P mappings */
-       else if (dma_use_iova(state)) {
-               if (pfns[idx] & HMM_PFN_P2PDMA)
-                       attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+       else if (dma_use_iova(state))
                dma_iova_unlink(dev, state, idx * map->dma_entry_size,
                                map->dma_entry_size, DMA_BIDIRECTIONAL, attrs);
-       } else if (dma_need_unmap(dev))
+       else if (dma_need_unmap(dev))
                dma_unmap_phys(dev, dma_addrs[idx], map->dma_entry_size,
-                              DMA_BIDIRECTIONAL, 0);
+                              DMA_BIDIRECTIONAL, attrs);
 
        pfns[idx] &=
                ~(HMM_PFN_DMA_MAPPED | HMM_PFN_P2PDMA | HMM_PFN_P2PDMA_BUS);
-- 
2.50.1


Reply via email to