On some architectures TARGET_PAGE_ALIGN() is not enough to get the right alignment. For example on ARM TARGET_PAGE_BITS is 10 because some old CPUs support 1K page size, while minimum SMMU page size is 4K.
This fixes problems like: 2015-11-17T07:37:42.892265Z qemu-system-aarch64: VFIO_MAP_DMA: -22 2015-11-17T07:37:42.892309Z qemu-system-aarch64: vfio_dma_map(0x223da230, 0x80002f0400, 0x10fc00, 0x7f89b40400) = -22 (Invalid argument) qemu: hardware error: vfio: DMA mapping failed, unable to continue Signed-off-by: Pavel Fedin <[email protected]> --- hw/vfio/common.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/hw/vfio/common.c b/hw/vfio/common.c index ff5a89a..328140c 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -326,7 +326,7 @@ static void vfio_listener_region_add(MemoryListener *listener, MemoryRegionSection *section) { VFIOContainer *container = container_of(listener, VFIOContainer, listener); - hwaddr iova, end; + hwaddr iova, end, iommu_page_size; Int128 llend; void *vaddr; int ret; @@ -346,6 +346,8 @@ static void vfio_listener_region_add(MemoryListener *listener, } iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); + iommu_page_size = vfio_container_granularity(container); + iova = (iova + iommu_page_size - 1) & ~(iommu_page_size - 1); llend = int128_make64(section->offset_within_address_space); llend = int128_add(llend, section->size); llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); @@ -390,8 +392,7 @@ static void vfio_listener_region_add(MemoryListener *listener, QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); memory_region_register_iommu_notifier(giommu->iommu, &giommu->n); - memory_region_iommu_replay(giommu->iommu, &giommu->n, - vfio_container_granularity(container), + memory_region_iommu_replay(giommu->iommu, &giommu->n, iommu_page_size, false); return; -- 1.9.5.msysgit.0
