Block CPR if the kernel does not support VHOST_NEW_OWNER. After CPR, call VHOST_NEW_OWNER in new QEMU.
Signed-off-by: Steve Sistare <steven.sist...@oracle.com> --- hw/virtio/trace-events | 1 + hw/virtio/vhost-vdpa.c | 24 ++++++++++++++++++++++-- include/hw/virtio/vhost.h | 1 + 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index 3cf84e04a7..990c61be79 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -64,6 +64,7 @@ vhost_vdpa_set_vring_kick(void *dev, unsigned int index, int fd) "dev: %p index: vhost_vdpa_set_vring_call(void *dev, unsigned int index, int fd) "dev: %p index: %u fd: %d" vhost_vdpa_get_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64 vhost_vdpa_set_owner(void *dev) "dev: %p" +vhost_vdpa_new_owner(void *dev) "dev: %p" vhost_vdpa_vq_get_addr(void *dev, void *vq, uint64_t desc_user_addr, uint64_t avail_user_addr, uint64_t used_user_addr) "dev: %p vq: %p desc_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64 vhost_vdpa_get_iova_range(void *dev, uint64_t first, uint64_t last) "dev: %p first: 0x%"PRIx64" last: 0x%"PRIx64 vhost_vdpa_set_config_call(void *dev, int fd)"dev: %p fd: %d" diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 3cdaa12ed5..9e3f414ac2 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -769,6 +769,7 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev) vhost_vdpa_svq_cleanup(dev); dev->opaque = NULL; + migrate_del_blocker(&dev->cpr_blocker); return 0; } @@ -848,13 +849,13 @@ static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 | 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH | 0x1ULL << VHOST_BACKEND_F_IOTLB_ASID | - 0x1ULL << VHOST_BACKEND_F_SUSPEND; + 0x1ULL << VHOST_BACKEND_F_SUSPEND | + 0x1ULL << VHOST_BACKEND_F_NEW_OWNER; int r; if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) { return -EFAULT; } - features &= f; if (vhost_vdpa_first_dev(dev)) { @@ -1360,6 +1361,18 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) } if (started) { + /* + * Register a blocker the first time device is started (when we know + * its capabilities). + */ + if (!dev->cpr_blocker && + !(dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_NEW_OWNER))) { + error_setg(&dev->cpr_blocker, "vhost-vdpa: device does not support " + "VHOST_BACKEND_F_NEW_OWNER"); + migrate_add_blocker_modes(&dev->cpr_blocker, &error_abort, + MIG_MODE_CPR_EXEC, -1); + } + if (vhost_dev_has_iommu(dev) && (v->shadow_vqs_enabled)) { error_report("SVQ can not work while IOMMU enable, please disable" "IOMMU and try again"); @@ -1518,10 +1531,17 @@ static int vhost_vdpa_get_features(struct vhost_dev *dev, static int vhost_vdpa_set_owner(struct vhost_dev *dev) { + struct vhost_vdpa *v = dev->opaque; + if (!vhost_vdpa_first_dev(dev)) { return 0; } + if (v->shared->reused) { + trace_vhost_vdpa_new_owner(dev); + return vhost_vdpa_call(dev, VHOST_NEW_OWNER, NULL); + } + trace_vhost_vdpa_set_owner(dev); return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL); } diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h index d75faf46e9..3f1b802f85 100644 --- a/include/hw/virtio/vhost.h +++ b/include/hw/virtio/vhost.h @@ -133,6 +133,7 @@ struct vhost_dev { QLIST_HEAD(, vhost_iommu) iommu_list; IOMMUNotifier n; const VhostDevConfigOps *config_ops; + Error *cpr_blocker; }; extern const VhostOps kernel_ops; -- 2.39.3