Vhost enqueue function will try to handle packed descriptors four by four. If packed descriptors can't be handled by fast path, will try normal path. Loop will skip when normal path can't emit packet.
Signed-off-by: Marvin Liu <yong....@intel.com> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index cd51ed47a..a3b1e85fe 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -1252,49 +1252,37 @@ virtio_dev_rx_normal_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, static __rte_noinline uint32_t virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, - struct rte_mbuf **pkts, uint32_t count) + struct rte_mbuf **pkts, uint32_t count) { uint32_t pkt_idx = 0; - uint16_t num_buffers; - struct buf_vector buf_vec[BUF_VECTOR_MAX]; + uint32_t pkt_num; + uint32_t remained = count; + int ret; - for (pkt_idx = 0; pkt_idx < count; pkt_idx++) { - uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen; - uint16_t nr_vec = 0; - uint16_t nr_descs = 0; + for (pkt_idx = 0; pkt_idx < count; pkt_idx += pkt_num, + remained -= pkt_num) { + if (remained >= PACKED_DESC_PER_CACHELINE) { + ret = virtio_dev_rx_fast_packed(dev, vq, pkts); - if (unlikely(reserve_avail_buf_packed(dev, vq, - pkt_len, buf_vec, &nr_vec, - &num_buffers, &nr_descs) < 0)) { - VHOST_LOG_DEBUG(VHOST_DATA, - "(%d) failed to get enough desc from vring\n", - dev->vid); - vq->shadow_used_idx -= num_buffers; - break; + if (!ret) { + pkt_num = PACKED_DESC_PER_CACHELINE; + continue; + } } - VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n", - dev->vid, vq->last_avail_idx, - vq->last_avail_idx + num_buffers); + pkt_num = virtio_dev_rx_normal_packed(dev, vq, pkts[pkt_idx]); - if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx], - buf_vec, nr_vec, - num_buffers) < 0) { - vq->shadow_used_idx -= num_buffers; + if (pkt_num == 0) break; - } - vq->last_avail_idx += nr_descs; - if (vq->last_avail_idx >= vq->size) { - vq->last_avail_idx -= vq->size; - vq->avail_wrap_counter ^= 1; - } } - do_data_copy_enqueue(dev, vq); + if (pkt_idx) { + if (vq->shadow_used_idx) { + do_data_copy_enqueue(dev, vq); + flush_enqueue_used_packed(dev, vq); + } - if (likely(vq->shadow_used_idx)) { - flush_shadow_used_ring_packed(dev, vq); vhost_vring_call_packed(dev, vq); } -- 2.17.1