In new QEMU, do not register the memory listener at device creation time. Register it later, in the container post_load handler, after all vmstate that may affect regions and mapping boundaries has been loaded. The post_load registration will cause the listener to invoke its callback on each flat section, and the calls will match the mappings remembered by the kernel.
The listener calls a special dma_map handler that passes the new VA of each section to the kernel using VFIO_DMA_MAP_FLAG_VADDR. Restore the normal handler at the end. Signed-off-by: Steve Sistare <steven.sist...@oracle.com> --- include/hw/vfio/vfio-cpr.h | 3 +++ hw/vfio/container.c | 15 ++++++++++-- hw/vfio/cpr-legacy.c | 57 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 73 insertions(+), 2 deletions(-) diff --git a/include/hw/vfio/vfio-cpr.h b/include/hw/vfio/vfio-cpr.h index 5a2e5f6..0462447 100644 --- a/include/hw/vfio/vfio-cpr.h +++ b/include/hw/vfio/vfio-cpr.h @@ -17,6 +17,9 @@ struct VFIOGroup; typedef struct VFIOContainerCPR { Error *blocker; + int (*saved_dma_map)(const struct VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + void *vaddr, bool readonly, MemoryRegion *mr); } VFIOContainerCPR; diff --git a/hw/vfio/container.c b/hw/vfio/container.c index 798abda..f91f2d5 100644 --- a/hw/vfio/container.c +++ b/hw/vfio/container.c @@ -137,6 +137,8 @@ static int vfio_legacy_dma_unmap_one(const VFIOContainerBase *bcontainer, int ret; Error *local_err = NULL; + g_assert(!cpr_is_incoming()); + if (iotlb && vfio_container_dirty_tracking_is_started(bcontainer)) { if (!vfio_container_devices_dirty_tracking_is_supported(bcontainer) && bcontainer->dirty_pages_supported) { @@ -691,8 +693,17 @@ static bool vfio_container_connect(VFIOGroup *group, AddressSpace *as, } group_was_added = true; - if (!vfio_listener_register(bcontainer, errp)) { - goto fail; + /* + * If CPR, register the listener later, after all state that may + * affect regions and mapping boundaries has been cpr load'ed. Later, + * the listener will invoke its callback on each flat section and call + * dma_map to supply the new vaddr, and the calls will match the mappings + * remembered by the kernel. + */ + if (!cpr_is_incoming()) { + if (!vfio_listener_register(bcontainer, errp)) { + goto fail; + } } bcontainer->initialized = true; diff --git a/hw/vfio/cpr-legacy.c b/hw/vfio/cpr-legacy.c index cf80332..512ef41 100644 --- a/hw/vfio/cpr-legacy.c +++ b/hw/vfio/cpr-legacy.c @@ -10,11 +10,13 @@ #include "hw/vfio/vfio-container.h" #include "hw/vfio/vfio-cpr.h" #include "hw/vfio/vfio-device.h" +#include "hw/vfio/vfio-listener.h" #include "migration/blocker.h" #include "migration/cpr.h" #include "migration/migration.h" #include "migration/vmstate.h" #include "qapi/error.h" +#include "qemu/error-report.h" static bool vfio_dma_unmap_vaddr_all(VFIOContainer *container, Error **errp) { @@ -31,6 +33,32 @@ static bool vfio_dma_unmap_vaddr_all(VFIOContainer *container, Error **errp) return true; } +/* + * Set the new @vaddr for any mappings registered during cpr load. + * The incoming state is cleared thereafter. + */ +static int vfio_legacy_cpr_dma_map(const VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, void *vaddr, + bool readonly, MemoryRegion *mr) +{ + const VFIOContainer *container = container_of(bcontainer, VFIOContainer, + bcontainer); + struct vfio_iommu_type1_dma_map map = { + .argsz = sizeof(map), + .flags = VFIO_DMA_MAP_FLAG_VADDR, + .vaddr = (__u64)(uintptr_t)vaddr, + .iova = iova, + .size = size, + }; + + g_assert(cpr_is_incoming()); + + if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map)) { + return -errno; + } + + return 0; +} static bool vfio_cpr_supported(VFIOContainer *container, Error **errp) { @@ -59,11 +87,34 @@ static int vfio_container_pre_save(void *opaque) return 0; } +static int vfio_container_post_load(void *opaque, int version_id) +{ + VFIOContainer *container = opaque; + VFIOContainerBase *bcontainer = &container->bcontainer; + VFIOGroup *group; + Error *local_err = NULL; + + if (!vfio_listener_register(bcontainer, &local_err)) { + error_report_err(local_err); + return -1; + } + + QLIST_FOREACH(group, &container->group_list, container_next) { + VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); + + /* Restore original dma_map function */ + vioc->dma_map = container->cpr.saved_dma_map; + } + return 0; +} + static const VMStateDescription vfio_container_vmstate = { .name = "vfio-container", .version_id = 0, .minimum_version_id = 0, + .priority = MIG_PRI_LOW, /* Must happen after devices and groups */ .pre_save = vfio_container_pre_save, + .post_load = vfio_container_post_load, .needed = cpr_incoming_needed, .fields = (VMStateField[]) { VMSTATE_END_OF_LIST() @@ -86,6 +137,12 @@ bool vfio_legacy_cpr_register_container(VFIOContainer *container, Error **errp) vmstate_register(NULL, -1, &vfio_container_vmstate, container); + /* During incoming CPR, divert calls to dma_map. */ + if (cpr_is_incoming()) { + VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); + container->cpr.saved_dma_map = vioc->dma_map; + vioc->dma_map = vfio_legacy_cpr_dma_map; + } return true; } -- 1.8.3.1