The event queue allows to report asynchronous errors. The translate function now injects faults when relevant.
Signed-off-by: Eric Auger <eric.au...@redhat.com> --- hw/virtio/trace-events | 1 + hw/virtio/virtio-iommu.c | 67 ++++++++++++++++++++++++++++++++++++++-- 2 files changed, 65 insertions(+), 3 deletions(-) diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index 19824c3e91..053a07b3fc 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -71,3 +71,4 @@ virtio_iommu_unmap_inc_interval(uint64_t low, uint64_t high) "Unmap inc [0x%"PRI virtio_iommu_translate_out(uint64_t virt_addr, uint64_t phys_addr, uint32_t sid) "0x%"PRIx64" -> 0x%"PRIx64 " for sid=%d" virtio_iommu_fill_resv_property(uint32_t devid, uint8_t subtype, uint64_t start, uint64_t end, uint32_t flags, size_t filled) "dev= %d, subtype=%d start=0x%"PRIx64" end=0x%"PRIx64" flags=%d filled=0x%lx" virtio_iommu_fill_none_property(uint32_t devid) "devid=%d" +virtio_iommu_report_fault(uint8_t reason, uint32_t flags, uint32_t endpoint, uint64_t addr) "FAULT reason=%d flags=%d endpoint=%d address =0x%"PRIx64 diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c index 324518c300..1246dd6bdf 100644 --- a/hw/virtio/virtio-iommu.c +++ b/hw/virtio/virtio-iommu.c @@ -683,17 +683,63 @@ push: } } +static void virtio_iommu_report_fault(VirtIOIOMMU *viommu, uint8_t reason, + uint32_t flags, uint32_t endpoint, + uint64_t address) +{ + VirtIODevice *vdev = &viommu->parent_obj; + VirtQueue *vq = viommu->event_vq; + struct virtio_iommu_fault fault; + VirtQueueElement *elem; + size_t sz; + + memset(&fault, 0, sizeof(fault)); + fault.reason = reason; + fault.flags = flags; + fault.endpoint = endpoint; + fault.address = address; + + for (;;) { + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + + if (!elem) { + virtio_error(vdev, + "no buffer available in event queue to report event"); + return; + } + + if (iov_size(elem->in_sg, elem->in_num) < sizeof(fault)) { + virtio_error(vdev, "error buffer of wrong size"); + virtqueue_detach_element(vq, elem, 0); + g_free(elem); + continue; + } + break; + } + /* we have a buffer to fill in */ + sz = iov_from_buf(elem->in_sg, elem->in_num, 0, + &fault, sizeof(fault)); + assert(sz == sizeof(fault)); + + trace_virtio_iommu_report_fault(reason, flags, endpoint, address); + virtqueue_push(vq, elem, sz); + virtio_notify(vdev, vq); + g_free(elem); + +} + static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr, IOMMUAccessFlags flag, int iommu_idx) { IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr); VirtIOIOMMU *s = sdev->viommu; - uint32_t sid; + uint32_t sid, flags; viommu_endpoint *ep; viommu_mapping *mapping; viommu_interval interval; bool bypass_allowed; + bool read_fault, write_fault; interval.low = addr; interval.high = addr + 1; @@ -718,6 +764,8 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr, if (!ep) { if (!bypass_allowed) { error_report("%s sid=%d is not known!!", __func__, sid); + virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_UNKNOWN, + 0, sid, 0); } else { entry.perm = flag; } @@ -729,6 +777,8 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr, qemu_log_mask(LOG_GUEST_ERROR, "%s %02x:%02x.%01x not attached to any domain\n", __func__, PCI_BUS_NUM(sid), PCI_SLOT(sid), PCI_FUNC(sid)); + virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_DOMAIN, + 0, sid, 0); } else { entry.perm = flag; } @@ -740,14 +790,25 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr, qemu_log_mask(LOG_GUEST_ERROR, "%s no mapping for 0x%"PRIx64" for sid=%d\n", __func__, addr, sid); + virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING, + 0, sid, addr); goto unlock; } - if (((flag & IOMMU_RO) && !(mapping->flags & VIRTIO_IOMMU_MAP_F_READ)) || - ((flag & IOMMU_WO) && !(mapping->flags & VIRTIO_IOMMU_MAP_F_WRITE))) { + read_fault = (flag & IOMMU_RO) && + !(mapping->flags & VIRTIO_IOMMU_MAP_F_READ); + write_fault = (flag & IOMMU_WO) && + !(mapping->flags & VIRTIO_IOMMU_MAP_F_WRITE); + + flags = read_fault ? VIRTIO_IOMMU_FAULT_F_READ : 0; + flags |= write_fault ? VIRTIO_IOMMU_FAULT_F_WRITE : 0; + if (flags) { qemu_log_mask(LOG_GUEST_ERROR, "Permission error on 0x%"PRIx64"(%d): allowed=%d\n", addr, flag, mapping->flags); + flags |= VIRTIO_IOMMU_FAULT_F_ADDRESS; + virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING, + flags, sid, addr); goto unlock; } entry.translated_addr = addr - mapping->virt_addr + mapping->phys_addr; -- 2.17.2