This patch introduces a new, per virtqueue, mbuf allocation failure statistic. It can be useful to troubleshoot packets drops due to insufficient mempool size or memory leaks.
Signed-off-by: Maxime Coquelin <maxime.coque...@redhat.com> --- lib/vhost/vhost.c | 1 + lib/vhost/vhost.h | 1 + lib/vhost/virtio_net.c | 17 +++++++++++++---- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c index 5912a42979..ac71d17784 100644 --- a/lib/vhost/vhost.c +++ b/lib/vhost/vhost.c @@ -55,6 +55,7 @@ static const struct vhost_vq_stats_name_off vhost_vq_stat_strings[] = { {"iotlb_misses", offsetof(struct vhost_virtqueue, stats.iotlb_misses)}, {"inflight_submitted", offsetof(struct vhost_virtqueue, stats.inflight_submitted)}, {"inflight_completed", offsetof(struct vhost_virtqueue, stats.inflight_completed)}, + {"mbuf_alloc_failed", offsetof(struct vhost_virtqueue, stats.mbuf_alloc_failed)}, }; #define VHOST_NB_VQ_STATS RTE_DIM(vhost_vq_stat_strings) diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h index 470dadbba6..371c3e3858 100644 --- a/lib/vhost/vhost.h +++ b/lib/vhost/vhost.h @@ -156,6 +156,7 @@ struct virtqueue_stats { uint64_t iotlb_misses; uint64_t inflight_submitted; uint64_t inflight_completed; + uint64_t mbuf_alloc_failed; uint64_t guest_notifications_suppressed; /* Counters below are atomic, and should be incremented as such. */ RTE_ATOMIC(uint64_t) guest_notifications; diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index db9985c9b9..b056c83d8f 100644 --- a/lib/vhost/virtio_net.c +++ b/lib/vhost/virtio_net.c @@ -2975,6 +2975,7 @@ desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, if (mbuf_avail == 0) { cur = rte_pktmbuf_alloc(mbuf_pool); if (unlikely(cur == NULL)) { + vq->stats.mbuf_alloc_failed++; VHOST_DATA_LOG(dev->ifname, ERR, "failed to allocate memory for mbuf."); goto error; @@ -3103,8 +3104,10 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, count = RTE_MIN(count, avail_entries); VHOST_DATA_LOG(dev->ifname, DEBUG, "about to dequeue %u buffers", count); - if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count)) + if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count)) { + vq->stats.mbuf_alloc_failed += count; return 0; + } for (i = 0; i < count; i++) { struct buf_vector buf_vec[BUF_VECTOR_MAX]; @@ -3481,8 +3484,10 @@ virtio_dev_tx_packed(struct virtio_net *dev, { uint32_t pkt_idx = 0; - if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count)) + if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count)) { + vq->stats.mbuf_alloc_failed += count; return 0; + } do { rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]); @@ -3729,8 +3734,10 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq, count = RTE_MIN(count, avail_entries); VHOST_DATA_LOG(dev->ifname, DEBUG, "about to dequeue %u buffers", count); - if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts_prealloc, count)) + if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts_prealloc, count)) { + vq->stats.mbuf_alloc_failed += count; goto out; + } for (pkt_idx = 0; pkt_idx < count; pkt_idx++) { uint16_t head_idx = 0; @@ -4019,8 +4026,10 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, async_iter_reset(async); - if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts_prealloc, count)) + if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts_prealloc, count)) { + vq->stats.mbuf_alloc_failed += count; goto out; + } do { struct rte_mbuf *pkt = pkts_prealloc[pkt_idx]; -- 2.43.0