In current implementation vhost will crash with segmentation fault if malicious or buggy virtio application breaks addresses of descriptors.
Before commit 0823c1cb0a73 this crash was reproducible even with normal DPDK application that tries to change number of virtqueues dynamically inside VM. Fix that by checking addresses of descriptors before using. Also fixed return value on error for 'copy_mbuf_to_desc_mergeable()' from '-1' to '0' because it returns unsigned value and it means number of used descriptors. Signed-off-by: Ilya Maximets <i.maximets at samsung.com> --- Version 2: * Rebased on top of current master. * host's address now checked in meargeable case, because needed refactoring already done. * Commit-message changed because old issue with virtio reload accidentially fixed by commit 0823c1cb0a73. lib/librte_vhost/vhost_rxtx.c | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c index 15ca956..31e8b58 100644 --- a/lib/librte_vhost/vhost_rxtx.c +++ b/lib/librte_vhost/vhost_rxtx.c @@ -147,10 +147,10 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0}; desc = &vq->desc[desc_idx]; - if (unlikely(desc->len < dev->vhost_hlen)) + desc_addr = gpa_to_vva(dev, desc->addr); + if (unlikely(desc->len < dev->vhost_hlen || !desc_addr)) return -1; - desc_addr = gpa_to_vva(dev, desc->addr); rte_prefetch0((void *)(uintptr_t)desc_addr); virtio_enqueue_offload(m, &virtio_hdr.hdr); @@ -182,7 +182,10 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, return -1; desc = &vq->desc[desc->next]; - desc_addr = gpa_to_vva(dev, desc->addr); + desc_addr = gpa_to_vva(dev, desc->addr); + if (unlikely(!desc_addr)) + return -1; + desc_offset = 0; desc_avail = desc->len; } @@ -387,10 +390,10 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq, LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n", dev->vid, cur_idx, end_idx); - if (buf_vec[vec_idx].buf_len < dev->vhost_hlen) - return -1; - desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr); + if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr) + return 0; + rte_prefetch0((void *)(uintptr_t)desc_addr); virtio_hdr.num_buffers = end_idx - start_idx; @@ -425,6 +428,8 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq, vec_idx++; desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr); + if (unlikely(!desc_addr)) + return 0; /* Prefetch buffer address. */ rte_prefetch0((void *)(uintptr_t)desc_addr); @@ -507,7 +512,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, *(volatile uint16_t *)&vq->used->idx += nr_used; vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx), sizeof(vq->used->idx)); - vq->last_used_idx = end; + vq->last_used_idx += nr_used; } if (likely(pkt_idx)) { @@ -688,6 +693,9 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, return -1; desc_addr = gpa_to_vva(dev, desc->addr); + if (unlikely(!desc_addr)) + return -1; + hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr); rte_prefetch0(hdr); @@ -701,6 +709,9 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, desc = &vq->desc[desc->next]; desc_addr = gpa_to_vva(dev, desc->addr); + if (unlikely(!desc_addr)) + return -1; + rte_prefetch0((void *)(uintptr_t)desc_addr); desc_offset = 0; @@ -737,6 +748,9 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, desc = &vq->desc[desc->next]; desc_addr = gpa_to_vva(dev, desc->addr); + if (unlikely(!desc_addr)) + return -1; + rte_prefetch0((void *)(uintptr_t)desc_addr); desc_offset = 0; -- 2.7.4