From: "Aviv Ben-David" <bd.a...@gmail.com> The default implementation scans the address space and try to find translation for each page, if exists.
This callback enables effiecent implementation for intel_iommu and other subsystems with large address space. Signed-off-by: Aviv Ben-David <bd.a...@gmail.com> --- include/exec/memory.h | 4 ++++ memory.c | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/include/exec/memory.h b/include/exec/memory.h index 2d7ee54..a8b3701 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -180,6 +180,10 @@ struct MemoryRegionIOMMUOps { void (*notify_flag_changed)(MemoryRegion *iommu, IOMMUNotifierFlag old_flags, IOMMUNotifierFlag new_flags); + /* Called when region has been moved between iommu domains */ + void (*replay)(MemoryRegion *mr, + IOMMUNotifier *n, + bool is_write); }; typedef struct CoalescedMemoryRange CoalescedMemoryRange; diff --git a/memory.c b/memory.c index 9b88638..562d540 100644 --- a/memory.c +++ b/memory.c @@ -1624,6 +1624,14 @@ void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n, hwaddr addr, granularity; IOMMUTLBEntry iotlb; + /* If there is specific implementation, use it */ + assert(memory_region_is_iommu(mr)); + if (mr->iommu_ops && mr->iommu_ops->replay) { + return mr->iommu_ops->replay(mr, n, is_write); + } + + /* No specific implementation for this iommu, fall back to default + * implementation */ granularity = memory_region_iommu_get_min_page_size(mr); for (addr = 0; addr < memory_region_size(mr); addr += granularity) { -- 1.9.1