Pass a parameter to the dma mapping functions that indicates if the memory backs rings or buffers for svq's. No functional change.
Signed-off-by: Steve Sistare <steven.sist...@oracle.com> --- hw/virtio/vhost-vdpa.c | 19 ++++++++++--------- include/hw/virtio/vhost-vdpa.h | 4 ++-- net/vhost-vdpa.c | 5 +++-- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 9e3f414ac2..d9ebc396b7 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -92,7 +92,7 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, * This is not an ABI break since it is set to 0 by the initializer anyway. */ int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova, - hwaddr size, void *vaddr, bool readonly) + hwaddr size, void *vaddr, bool readonly, bool shadow) { struct vhost_msg_v2 msg = {}; int fd = s->device_fd; @@ -124,7 +124,7 @@ int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova, * This is not an ABI break since it is set to 0 by the initializer anyway. */ int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova, - hwaddr size) + hwaddr size, bool shadow) { struct vhost_msg_v2 msg = {}; int fd = s->device_fd; @@ -234,7 +234,7 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) return; } ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova, - iotlb->addr_mask + 1, vaddr, read_only); + iotlb->addr_mask + 1, vaddr, read_only, false); if (ret) { error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx ", " "0x%" HWADDR_PRIx ", %p) = %d (%m)", @@ -242,7 +242,7 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) } } else { ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova, - iotlb->addr_mask + 1); + iotlb->addr_mask + 1, false); if (ret) { error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " "0x%" HWADDR_PRIx ") = %d (%m)", @@ -376,7 +376,8 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, vhost_vdpa_iotlb_batch_begin_once(s); ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova, - int128_get64(llsize), vaddr, section->readonly); + int128_get64(llsize), vaddr, section->readonly, + false); if (ret) { error_report("vhost vdpa map fail!"); goto fail_map; @@ -463,7 +464,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, if (int128_eq(llsize, int128_2_64())) { llsize = int128_rshift(llsize, 1); ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova, - int128_get64(llsize)); + int128_get64(llsize), false); if (ret) { error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " @@ -473,7 +474,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, iova += int128_get64(llsize); } ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova, - int128_get64(llsize)); + int128_get64(llsize), false); if (ret) { error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " @@ -1116,7 +1117,7 @@ static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr) size = ROUND_UP(result->size, qemu_real_host_page_size()); r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, result->iova, - size); + size, true); if (unlikely(r < 0)) { error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r); return; @@ -1159,7 +1160,7 @@ static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle, r = vhost_vdpa_dma_map(v->shared, v->address_space_id, needle->iova, needle->size + 1, (void *)(uintptr_t)needle->translated_addr, - needle->perm == IOMMU_RO); + needle->perm == IOMMU_RO, true); if (unlikely(r != 0)) { error_setg_errno(errp, -r, "Cannot map region to device"); vhost_iova_tree_remove(v->shared->iova_tree, *needle); diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h index 427458cfed..aac6ad439c 100644 --- a/include/hw/virtio/vhost-vdpa.h +++ b/include/hw/virtio/vhost-vdpa.h @@ -82,9 +82,9 @@ int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range); int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx); int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova, - hwaddr size, void *vaddr, bool readonly); + hwaddr size, void *vaddr, bool readonly, bool shadow); int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova, - hwaddr size); + hwaddr size, bool shadow); typedef struct vdpa_iommu { VhostVDPAShared *dev_shared; diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index e6010e8900..e3e861cfcc 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -499,7 +499,7 @@ static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr) } r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova, - map->size + 1); + map->size + 1, true); if (unlikely(r != 0)) { error_report("Device cannot unmap: %s(%d)", g_strerror(r), r); } @@ -524,7 +524,8 @@ static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size, } r = vhost_vdpa_dma_map(v->shared, v->address_space_id, map.iova, - vhost_vdpa_net_cvq_cmd_page_len(), buf, !write); + vhost_vdpa_net_cvq_cmd_page_len(), buf, !write, + true); if (unlikely(r < 0)) { goto dma_map_err; } -- 2.39.3