In the container pre_save handler, discard the virtual addresses in DMA mappings with VFIO_DMA_UNMAP_FLAG_VADDR, because guest RAM will be remapped at a different VA after in new QEMU. DMA to already-mapped pages continues.
Signed-off-by: Steve Sistare <steven.sist...@oracle.com> --- hw/vfio/cpr-legacy.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/hw/vfio/cpr-legacy.c b/hw/vfio/cpr-legacy.c index 638a8e0..519d772 100644 --- a/hw/vfio/cpr-legacy.c +++ b/hw/vfio/cpr-legacy.c @@ -17,6 +17,22 @@ #include "migration/vmstate.h" #include "qapi/error.h" +static bool vfio_dma_unmap_vaddr_all(VFIOContainer *container, Error **errp) +{ + struct vfio_iommu_type1_dma_unmap unmap = { + .argsz = sizeof(unmap), + .flags = VFIO_DMA_UNMAP_FLAG_VADDR | VFIO_DMA_UNMAP_FLAG_ALL, + .iova = 0, + .size = 0, + }; + if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { + error_setg_errno(errp, errno, "vfio_dma_unmap_vaddr_all"); + return false; + } + return true; +} + + static bool vfio_cpr_supported(VFIOContainer *container, Error **errp) { if (!ioctl(container->fd, VFIO_CHECK_EXTENSION, VFIO_UPDATE_VADDR)) { @@ -32,6 +48,18 @@ static bool vfio_cpr_supported(VFIOContainer *container, Error **errp) } } +static int vfio_container_pre_save(void *opaque) +{ + VFIOContainer *container = opaque; + Error *err = NULL; + + if (!vfio_dma_unmap_vaddr_all(container, &err)) { + error_report_err(err); + return -1; + } + return 0; +} + static int vfio_container_post_load(void *opaque, int version_id) { VFIOContainer *container = opaque; @@ -52,6 +80,7 @@ static const VMStateDescription vfio_container_vmstate = { .name = "vfio-container", .version_id = 0, .minimum_version_id = 0, + .pre_save = vfio_container_pre_save, .post_load = vfio_container_post_load, .needed = cpr_needed_for_reuse, .fields = (VMStateField[]) { -- 1.8.3.1