Add basic stats support for DQO.

Signed-off-by: Junfeng Guo <junfeng....@intel.com>
Signed-off-by: Rushil Gupta <rush...@google.com>
Signed-off-by: Joshua Washington <joshw...@google.com>
Signed-off-by: Jeroen de Borst <jeroe...@google.com>
---
 drivers/net/gve/gve_ethdev.c |  5 ++++-
 drivers/net/gve/gve_rx_dqo.c | 14 +++++++++++++-
 drivers/net/gve/gve_tx_dqo.c |  7 +++++++
 3 files changed, 24 insertions(+), 2 deletions(-)

diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index a532b8a93a..8b6861a24f 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -150,14 +150,17 @@ gve_refill_dqo(struct gve_rx_queue *rxq)
 
        diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], 
rxq->nb_rx_desc);
        if (diag < 0) {
+               rxq->stats.no_mbufs_bulk++;
                for (i = 0; i < rxq->nb_rx_desc - 1; i++) {
                        nmb = rte_pktmbuf_alloc(rxq->mpool);
                        if (!nmb)
                                break;
                        rxq->sw_ring[i] = nmb;
                }
-               if (i < rxq->nb_rx_desc - 1)
+               if (i < rxq->nb_rx_desc - 1) {
+                       rxq->stats.no_mbufs += rxq->nb_rx_desc - 1 - i;
                        return -ENOMEM;
+               }
        }
 
        for (i = 0; i < rxq->nb_rx_desc; i++) {
diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c
index d0eaea9c24..1d6b21359c 100644
--- a/drivers/net/gve/gve_rx_dqo.c
+++ b/drivers/net/gve/gve_rx_dqo.c
@@ -39,6 +39,8 @@ gve_rx_refill_dqo(struct gve_rx_queue *rxq)
                        next_avail = 0;
                        rxq->nb_rx_hold -= delta;
                } else {
+                       rxq->stats.no_mbufs_bulk++;
+                       rxq->stats.no_mbufs += nb_desc - next_avail;
                        dev = &rte_eth_devices[rxq->port_id];
                        dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail;
                        PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u 
queue_id=%u",
@@ -59,6 +61,8 @@ gve_rx_refill_dqo(struct gve_rx_queue *rxq)
                        next_avail += nb_refill;
                        rxq->nb_rx_hold -= nb_refill;
                } else {
+                       rxq->stats.no_mbufs_bulk++;
+                       rxq->stats.no_mbufs += nb_desc - next_avail;
                        dev = &rte_eth_devices[rxq->port_id];
                        dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail;
                        PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u 
queue_id=%u",
@@ -82,7 +86,9 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
        uint16_t pkt_len;
        uint16_t rx_id;
        uint16_t nb_rx;
+       uint64_t bytes;
 
+       bytes = 0;
        nb_rx = 0;
        rxq = rx_queue;
        rx_id = rxq->rx_tail;
@@ -96,8 +102,10 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
                if (rx_desc->generation != rxq->cur_gen_bit)
                        break;
 
-               if (unlikely(rx_desc->rx_error))
+               if (unlikely(rx_desc->rx_error)) {
+                       rxq->stats.errors++;
                        continue;
+               }
 
                pkt_len = rx_desc->packet_len;
 
@@ -122,6 +130,7 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
                rxm->hash.rss = rte_be_to_cpu_32(rx_desc->hash);
 
                rx_pkts[nb_rx++] = rxm;
+               bytes += pkt_len;
        }
 
        if (nb_rx > 0) {
@@ -130,6 +139,9 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
                        rxq->next_avail = rx_id_bufq;
 
                gve_rx_refill_dqo(rxq);
+
+               rxq->stats.packets += nb_rx;
+               rxq->stats.bytes += bytes;
        }
 
        return nb_rx;
diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c
index 2ea38a8f8e..578a409616 100644
--- a/drivers/net/gve/gve_tx_dqo.c
+++ b/drivers/net/gve/gve_tx_dqo.c
@@ -81,10 +81,12 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
        uint16_t nb_used;
        uint16_t tx_id;
        uint16_t sw_id;
+       uint64_t bytes;
 
        sw_ring = txq->sw_ring;
        txr = txq->tx_ring;
 
+       bytes = 0;
        mask = txq->nb_tx_desc - 1;
        sw_mask = txq->sw_size - 1;
        tx_id = txq->tx_tail;
@@ -119,6 +121,7 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
                        tx_id = (tx_id + 1) & mask;
                        sw_id = (sw_id + 1) & sw_mask;
 
+                       bytes += tx_pkt->pkt_len;
                        tx_pkt = tx_pkt->next;
                } while (tx_pkt);
 
@@ -142,6 +145,10 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pkts)
                rte_write32(tx_id, txq->qtx_tail);
                txq->tx_tail = tx_id;
                txq->sw_tail = sw_id;
+
+               txq->stats.packets += nb_tx;
+               txq->stats.bytes += bytes;
+               txq->stats.errors += nb_pkts - nb_tx;
        }
 
        return nb_tx;
-- 
2.34.1

Reply via email to