Considering that we may have multiple IOMMU notifier consumers in the future, converting iommu_ops.notify_{started|stopped} into some more general form. Now we can trap all notifier registerations and deregistrations, rather than only the first ones.
Power was leveraging the notifier_{started|stopped}, adding iommu_user field for counting on Power guests to achieve the same goal. Suggested-by: Paolo Bonzini <pbonz...@redhat.com> Signed-off-by: Peter Xu <pet...@redhat.com> --- hw/i386/intel_iommu.c | 4 ++-- hw/ppc/spapr_iommu.c | 18 ++++++++++++------ include/exec/memory.h | 8 ++++---- include/hw/ppc/spapr.h | 1 + memory.c | 10 ++++------ 5 files changed, 23 insertions(+), 18 deletions(-) diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 28c31a2..c6bd8f6 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -1974,7 +1974,7 @@ static IOMMUTLBEntry vtd_iommu_translate(MemoryRegion *iommu, hwaddr addr, return ret; } -static void vtd_iommu_notify_started(MemoryRegion *iommu) +static void vtd_iommu_notifier_add(MemoryRegion *iommu, IOMMUNotifier *n) { VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); @@ -2348,7 +2348,7 @@ static void vtd_init(IntelIOMMUState *s) memset(s->womask, 0, DMAR_REG_SIZE); s->iommu_ops.translate = vtd_iommu_translate; - s->iommu_ops.notify_started = vtd_iommu_notify_started; + s->iommu_ops.notifier_add = vtd_iommu_notifier_add; s->root = 0; s->root_extended = false; s->dmar_enabled = false; diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c index 6bc4d4d..99c83a4 100644 --- a/hw/ppc/spapr_iommu.c +++ b/hw/ppc/spapr_iommu.c @@ -156,14 +156,20 @@ static uint64_t spapr_tce_get_min_page_size(MemoryRegion *iommu) return 1ULL << tcet->page_shift; } -static void spapr_tce_notify_started(MemoryRegion *iommu) +static void spapr_tce_notifier_add(MemoryRegion *iommu, IOMMUNotifier *n) { - spapr_tce_set_need_vfio(container_of(iommu, sPAPRTCETable, iommu), true); + sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu); + if (tcet->iommu_users++ == 0) { + spapr_tce_set_need_vfio(tcet, true); + } } -static void spapr_tce_notify_stopped(MemoryRegion *iommu) +static void spapr_tce_notifier_del(MemoryRegion *iommu, IOMMUNotifier *n) { - spapr_tce_set_need_vfio(container_of(iommu, sPAPRTCETable, iommu), false); + sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu); + if (--tcet->iommu_users == 0) { + spapr_tce_set_need_vfio(tcet, false); + } } static int spapr_tce_table_post_load(void *opaque, int version_id) @@ -246,8 +252,8 @@ static const VMStateDescription vmstate_spapr_tce_table = { static MemoryRegionIOMMUOps spapr_iommu_ops = { .translate = spapr_tce_translate_iommu, .get_min_page_size = spapr_tce_get_min_page_size, - .notify_started = spapr_tce_notify_started, - .notify_stopped = spapr_tce_notify_stopped, + .notifier_add = spapr_tce_notifier_add, + .notifier_del = spapr_tce_notifier_del, }; static int spapr_tce_table_realize(DeviceState *dev) diff --git a/include/exec/memory.h b/include/exec/memory.h index 92f14db..9efcf1b 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -175,10 +175,10 @@ struct MemoryRegionIOMMUOps { IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write); /* Returns minimum supported page size */ uint64_t (*get_min_page_size)(MemoryRegion *iommu); - /* Called when the first notifier is set */ - void (*notify_started)(MemoryRegion *iommu); - /* Called when the last notifier is removed */ - void (*notify_stopped)(MemoryRegion *iommu); + /* Called when someone registers to the notify list */ + void (*notifier_add)(MemoryRegion *iommu, IOMMUNotifier *n); + /* Called when someone unregisters from the notify list */ + void (*notifier_del)(MemoryRegion *iommu, IOMMUNotifier *n); }; typedef struct CoalescedMemoryRange CoalescedMemoryRange; diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index caf7be9..08627ec 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -564,6 +564,7 @@ struct sPAPRTCETable { MemoryRegion root, iommu; struct VIOsPAPRDevice *vdev; /* for @bypass migration compatibility only */ QLIST_ENTRY(sPAPRTCETable) list; + int iommu_users; }; sPAPRTCETable *spapr_tce_find_by_liobn(target_ulong liobn); diff --git a/memory.c b/memory.c index 45a3902..d913043 100644 --- a/memory.c +++ b/memory.c @@ -1518,9 +1518,8 @@ void memory_region_register_iommu_notifier(MemoryRegion *mr, { /* We need to register for at least one bitfield */ assert(n->notifier_caps != IOMMU_NOTIFIER_NONE); - if (mr->iommu_ops->notify_started && - QLIST_EMPTY(&mr->iommu_notify)) { - mr->iommu_ops->notify_started(mr); + if (mr->iommu_ops->notifier_add) { + mr->iommu_ops->notifier_add(mr, n); } QLIST_INSERT_HEAD(&mr->iommu_notify, n, node); } @@ -1560,9 +1559,8 @@ void memory_region_unregister_iommu_notifier(MemoryRegion *mr, IOMMUNotifier *n) { QLIST_REMOVE(n, node); - if (mr->iommu_ops->notify_stopped && - QLIST_EMPTY(&mr->iommu_notify)) { - mr->iommu_ops->notify_stopped(mr); + if (mr->iommu_ops->notifier_del) { + mr->iommu_ops->notifier_del(mr, n); } } -- 2.7.4