In nested mode, legacy vfio_iommu_map_notify cannot be used as there is no "caching" mode and we do not trap on map.
On Intel, vfio_iommu_map_notify was used to DMA map the RAM through the host single stage. With nested mode, we need to setup the stage 2 and the stage 1 separately. This patch introduces a prereg_lsitener to setup the stage 2 mapping. The stage 1 mapping, owned by the guest, is passed to the host when the guest invalidates the stage 1 configuration, through a dedicated config IOMMU notifier. Guest IOTLB invalidations are cascaded downto the host through another IOMMU MR UNMAP notifier. Signed-off-by: Eric Auger <eric.au...@redhat.com> --- v3 -> v4: - use iommu_inv_pasid_info for ASID invalidation v2 -> v3: - use VFIO_IOMMU_ATTACH_PASID_TABLE - new user API - handle leaf v1 -> v2: - adapt to uapi changes - pass the asid - pass IOMMU_NOTIFIER_S1_CFG when initializing the config notifier --- hw/vfio/common.c | 151 +++++++++++++++++++++++++++++++++++++++---- hw/vfio/trace-events | 2 + 2 files changed, 142 insertions(+), 11 deletions(-) diff --git a/hw/vfio/common.c b/hw/vfio/common.c index 26bc2ab19f..084e3f30e6 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -445,6 +445,71 @@ static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr, return true; } +/* Pass the guest stage 1 config to the host */ +static void vfio_iommu_nested_notify(IOMMUNotifier *n, IOMMUConfig *cfg) +{ + VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); + VFIOContainer *container = giommu->container; + struct vfio_iommu_type1_attach_pasid_table info; + int ret; + + info.argsz = sizeof(info); + info.flags = 0; + memcpy(&info.config, &cfg->pasid_cfg, sizeof(cfg->pasid_cfg)); + + ret = ioctl(container->fd, VFIO_IOMMU_ATTACH_PASID_TABLE, &info); + if (ret) { + error_report("%p: failed to pass S1 config to the host (%d)", + container, ret); + } +} + +/* Propagate a guest IOTLB invalidation to the host (nested mode) */ +static void vfio_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) +{ + VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); + hwaddr start = iotlb->iova + giommu->iommu_offset; + + VFIOContainer *container = giommu->container; + struct vfio_iommu_type1_cache_invalidate ustruct; + size_t size = iotlb->addr_mask + 1; + int ret; + + assert(iotlb->perm == IOMMU_NONE); + + ustruct.argsz = sizeof(ustruct); + ustruct.flags = 0; + ustruct.info.version = IOMMU_CACHE_INVALIDATE_INFO_VERSION_1; + + if (size <= 0x10000) { + ustruct.info.cache = IOMMU_CACHE_INV_TYPE_IOTLB; + ustruct.info.granularity = IOMMU_INV_GRANU_ADDR; + ustruct.info.addr_info.flags = IOMMU_INV_ADDR_FLAGS_ARCHID; + if (iotlb->leaf) { + ustruct.info.addr_info.flags |= IOMMU_INV_ADDR_FLAGS_LEAF; + } + ustruct.info.addr_info.archid = iotlb->arch_id; + ustruct.info.addr_info.addr = start; + ustruct.info.addr_info.granule_size = size; + ustruct.info.addr_info.nb_granules = 1; + trace_vfio_iommu_addr_inv_iotlb(iotlb->arch_id, start, size, 1, + iotlb->leaf); + } else { + ustruct.info.cache = IOMMU_CACHE_INV_TYPE_IOTLB; + ustruct.info.granularity = IOMMU_INV_GRANU_PASID; + ustruct.info.pasid_info.archid = iotlb->arch_id; + ustruct.info.pasid_info.flags = IOMMU_INV_PASID_FLAGS_ARCHID; + trace_vfio_iommu_asid_inv_iotlb(iotlb->arch_id); + } + + ret = ioctl(container->fd, VFIO_IOMMU_CACHE_INVALIDATE, &ustruct); + if (ret) { + error_report("%p: failed to invalidate CACHE for 0x%"PRIx64 + " mask=0x%"PRIx64" (%d)", + container, start, iotlb->addr_mask, ret); + } +} + static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) { VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); @@ -628,6 +693,32 @@ static void vfio_dma_unmap_ram_section(VFIOContainer *container, } } +static void vfio_prereg_listener_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + VFIOContainer *container = + container_of(listener, VFIOContainer, prereg_listener); + + if (!memory_region_is_ram(section->mr)) { + return; + } + + vfio_dma_map_ram_section(container, section); + +} +static void vfio_prereg_listener_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + VFIOContainer *container = + container_of(listener, VFIOContainer, prereg_listener); + + if (!memory_region_is_ram(section->mr)) { + return; + } + + vfio_dma_unmap_ram_section(container, section); +} + static void vfio_listener_region_add(MemoryListener *listener, MemoryRegionSection *section) { @@ -739,21 +830,40 @@ static void vfio_listener_region_add(MemoryListener *listener, offset = section->offset_within_address_space - section->offset_within_region; - giommu = vfio_alloc_guest_iommu(container, iommu_mr, offset); - llend = int128_add(int128_make64(section->offset_within_region), section->size); llend = int128_sub(llend, int128_one()); iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, MEMTXATTRS_UNSPECIFIED); - iommu_iotlb_notifier_init(&giommu->n, vfio_iommu_map_notify, - IOMMU_NOTIFIER_IOTLB_ALL, - section->offset_within_region, - int128_get64(llend), - iommu_idx); - QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); - memory_region_register_iommu_notifier(section->mr, &giommu->n); + if (container->iommu_type == VFIO_TYPE1_NESTING_IOMMU) { + /* Config notifier to propagate guest stage 1 config changes */ + giommu = vfio_alloc_guest_iommu(container, iommu_mr, offset); + iommu_config_notifier_init(&giommu->n, vfio_iommu_nested_notify, + IOMMU_NOTIFIER_CONFIG_PASID, iommu_idx); + QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); + memory_region_register_iommu_notifier(section->mr, &giommu->n); + + /* IOTLB unmap notifier to propagate guest IOTLB invalidations */ + giommu = vfio_alloc_guest_iommu(container, iommu_mr, offset); + iommu_iotlb_notifier_init(&giommu->n, vfio_iommu_unmap_notify, + IOMMU_NOTIFIER_IOTLB_UNMAP, + section->offset_within_region, + int128_get64(llend), + iommu_idx); + QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); + memory_region_register_iommu_notifier(section->mr, &giommu->n); + } else { + /* MAP/UNMAP IOTLB notifier */ + giommu = vfio_alloc_guest_iommu(container, iommu_mr, offset); + iommu_iotlb_notifier_init(&giommu->n, vfio_iommu_map_notify, + IOMMU_NOTIFIER_IOTLB_ALL, + section->offset_within_region, + int128_get64(llend), + iommu_idx); + QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); + memory_region_register_iommu_notifier(section->mr, &giommu->n); + } memory_region_iommu_replay(giommu->iommu, &giommu->n); return; @@ -850,15 +960,21 @@ static void vfio_listener_region_del(MemoryListener *listener, } } -static const MemoryListener vfio_memory_listener = { +static MemoryListener vfio_memory_listener = { .region_add = vfio_listener_region_add, .region_del = vfio_listener_region_del, }; +static MemoryListener vfio_memory_prereg_listener = { + .region_add = vfio_prereg_listener_region_add, + .region_del = vfio_prereg_listener_region_del, +}; + static void vfio_listener_release(VFIOContainer *container) { memory_listener_unregister(&container->listener); - if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { + if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU || + container->iommu_type == VFIO_TYPE1_NESTING_IOMMU) { memory_listener_unregister(&container->prereg_listener); } } @@ -1354,6 +1470,19 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, } vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes); container->pgsizes = info.iova_pgsizes; + + if (container->iommu_type == VFIO_TYPE1_NESTING_IOMMU) { + container->prereg_listener = vfio_memory_prereg_listener; + memory_listener_register(&container->prereg_listener, + &address_space_memory); + if (container->error) { + memory_listener_unregister(&container->prereg_listener); + ret = container->error; + error_setg(errp, "RAM memory listener initialization failed " + " for container"); + goto free_container_exit; + } + } break; } case VFIO_SPAPR_TCE_v2_IOMMU: diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events index 410801de6e..9f1868af2d 100644 --- a/hw/vfio/trace-events +++ b/hw/vfio/trace-events @@ -115,6 +115,8 @@ vfio_region_sparse_mmap_header(const char *name, int index, int nr_areas) "Devic vfio_region_sparse_mmap_entry(int i, unsigned long start, unsigned long end) "sparse entry %d [0x%lx - 0x%lx]" vfio_get_dev_region(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%0x8" vfio_dma_unmap_overflow_workaround(void) "" +vfio_iommu_addr_inv_iotlb(int asid, uint64_t addr, uint64_t size, uint64_t nb_granules, bool leaf) "nested IOTLB invalidate asid=%d, addr=0x%"PRIx64" granule_size=0x%"PRIx64" nb_granules=0x%"PRIx64" leaf=%d" +vfio_iommu_asid_inv_iotlb(int asid) "nested IOTLB invalidate asid=%d" # platform.c vfio_platform_base_device_init(char *name, int groupid) "%s belongs to group #%d" -- 2.20.1