On Tue, Aug 27, 2019 at 06:24:06PM +0800, Marvin Liu wrote: > When doing xmit in-order enqueue, packets are buffered and then flushed > into avail ring. It has possibility that no free room in avail ring, > thus some buffered packets can't be transmitted. So move stats update > just after successful avail ring updates. > > Signed-off-by: Marvin Liu <yong....@intel.com> > --- > drivers/net/virtio/virtio_rxtx.c | 86 ++++++++++++++++---------------- > 1 file changed, 43 insertions(+), 43 deletions(-) > > diff --git a/drivers/net/virtio/virtio_rxtx.c > b/drivers/net/virtio/virtio_rxtx.c > index 27ead19fb..5d4ed524e 100644 > --- a/drivers/net/virtio/virtio_rxtx.c > +++ b/drivers/net/virtio/virtio_rxtx.c > @@ -575,6 +575,48 @@ virtqueue_xmit_offload(struct virtio_net_hdr *hdr, > } > } > > +static inline void > +virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf > *mbuf) > +{ > + uint32_t s = mbuf->pkt_len; > + struct rte_ether_addr *ea; > + > + stats->bytes += s; > + > + if (s == 64) { > + stats->size_bins[1]++; > + } else if (s > 64 && s < 1024) { > + uint32_t bin; > + > + /* count zeros, and offset into correct bin */ > + bin = (sizeof(s) * 8) - __builtin_clz(s) - 5; > + stats->size_bins[bin]++; > + } else { > + if (s < 64) > + stats->size_bins[0]++; > + else if (s < 1519) > + stats->size_bins[6]++; > + else > + stats->size_bins[7]++; > + } > + > + ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *); > + if (rte_is_multicast_ether_addr(ea)) { > + if (rte_is_broadcast_ether_addr(ea)) > + stats->broadcast++; > + else > + stats->multicast++; > + } > +} > + > +static inline void > +virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m) > +{ > + VIRTIO_DUMP_PACKET(m, m->data_len); > + > + virtio_update_packet_stats(&rxvq->stats, m); > +}
If we move above helpers, it's better to just move them to the top of this file. Thanks, Tiwei