nommu configfs can trivially map the coherent allocations to user space, as no actual page table setup is required and the kernel and the user space programs share the same address space.
Fixes: 62fcee9a3bd7 ("dma-mapping: remove CONFIG_ARCH_NO_COHERENT_DMA_MMAP") Signed-off-by: Christoph Hellwig <h...@lst.de> Reported-by: dillon min <dillon.min...@gmail.com> Reviewed-by: Vladimir Murzin <vladimir.mur...@arm.com> Tested-by: dillon min <dillon.min...@gmail.com> --- kernel/dma/Kconfig | 1 + kernel/dma/direct.c | 14 -------------- 2 files changed, 1 insertion(+), 14 deletions(-) diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index d006668c0027d2..e0dae570a51530 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -71,6 +71,7 @@ config SWIOTLB # in the pagetables # config DMA_NONCOHERENT_MMAP + default y if !MMU bool config DMA_REMAP diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 0a4881e59aa7d6..9ec6a5c3fc578c 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -459,7 +459,6 @@ int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, return ret; } -#ifdef CONFIG_MMU bool dma_direct_can_mmap(struct device *dev) { return dev_is_dma_coherent(dev) || @@ -485,19 +484,6 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, user_count << PAGE_SHIFT, vma->vm_page_prot); } -#else /* CONFIG_MMU */ -bool dma_direct_can_mmap(struct device *dev) -{ - return false; -} - -int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) -{ - return -ENXIO; -} -#endif /* CONFIG_MMU */ int dma_direct_supported(struct device *dev, u64 mask) { -- 2.26.2 _______________________________________________ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu