When the packet receiving failure and the DMA ring full occur simultaneously in the asynchronous vhost, the slot_idx needs to be reduced by 1. For packed virtqueue, the slot index should be ring_size - 1, if the slot_idx is currently 0, since the ring size is not necessarily the power of 2.
Signed-off-by: Cheng Jiang <cheng1.ji...@intel.com> --- lib/vhost/virtio_net.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index bfc6d65b7c..f804bce0bd 100644 --- a/lib/vhost/virtio_net.c +++ b/lib/vhost/virtio_net.c @@ -3462,6 +3462,7 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq, allocerr_warned = true; } dropped = true; + slot_idx--; break; } @@ -3652,6 +3653,12 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, if (unlikely(virtio_dev_tx_async_single_packed(dev, vq, mbuf_pool, pkt, slot_idx, legacy_ol_flags))) { rte_pktmbuf_free_bulk(&pkts_prealloc[pkt_idx], count - pkt_idx); + + if (slot_idx == 0) + slot_idx = vq->size - 1; + else + slot_idx--; + break; } @@ -3679,8 +3686,13 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, async->buffer_idx_packed += vq->size - pkt_err; while (pkt_err-- > 0) { - rte_pktmbuf_free(pkts_info[slot_idx % vq->size].mbuf); - slot_idx--; + rte_pktmbuf_free(pkts_info[slot_idx].mbuf); + descs_err += pkts_info[slot_idx].descs; + + if (slot_idx == 0) + slot_idx = vq->size - 1; + else + slot_idx--; } /* recover available ring */ -- 2.35.1