Google Virtual NIC PMD is enriched with statistics info.

Signed-off-by: Levend Sayar <levendsa...@gmail.com>
---
 drivers/net/gve/gve_ethdev.c | 147 ++++++++++++++++++++++++++++++++++-
 drivers/net/gve/gve_ethdev.h |  11 +++
 drivers/net/gve/gve_rx.c     |  18 ++++-
 drivers/net/gve/gve_tx.c     |   6 ++
 4 files changed, 175 insertions(+), 7 deletions(-)

diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index e357f16e16..735847ede7 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -266,7 +266,7 @@ gve_dev_close(struct rte_eth_dev *dev)
 }
 
 static int
-gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+gve_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
        struct gve_priv *priv = dev->data->dev_private;
 
@@ -319,7 +319,7 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
 }
 
 static int
-gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+gve_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 {
        struct gve_priv *priv = dev->data->dev_private;
        int err;
@@ -345,18 +345,157 @@ gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        return 0;
 }
 
+static int
+gve_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+       uint16_t i;
+       struct rte_eth_stats tmp;
+
+       memset(&tmp, 0, sizeof(tmp));
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               struct gve_rx_queue *rxq = dev->data->rx_queues[i];
+               tmp.ipackets += rxq->packets;
+               tmp.ibytes += rxq->bytes;
+               tmp.ierrors += rxq->errors;
+               tmp.rx_nombuf += rxq->no_mbufs;
+       }
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               struct gve_tx_queue *txq = dev->data->tx_queues[i];
+               tmp.opackets += txq->packets;
+               tmp.obytes += txq->bytes;
+               tmp.oerrors += txq->errors;
+       }
+
+       *stats = tmp;
+       return 0;
+}
+
+static int
+gve_stats_reset(struct rte_eth_dev *dev)
+{
+       uint16_t i;
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               struct gve_rx_queue *rxq = dev->data->rx_queues[i];
+               rxq->packets = 0;
+               rxq->bytes = 0;
+               rxq->errors = 0;
+               rxq->no_mbufs = 0;
+       }
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               struct gve_tx_queue *txq = dev->data->tx_queues[i];
+               txq->packets = 0;
+               txq->bytes = 0;
+               txq->errors = 0;
+       }
+
+       return 0;
+}
+
+static int
+gve_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned 
int n)
+{
+       if (xstats) {
+               uint requested = n;
+               uint64_t indx = 0;
+               struct rte_eth_xstat *xstat = xstats;
+               uint16_t i;
+
+               for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                       struct gve_rx_queue *rxq = dev->data->rx_queues[i];
+                       xstat->id = indx++;
+                       xstat->value = rxq->packets;
+                       if (--requested == 0)
+                               return n;
+                       xstat++;
+
+                       xstat->id = indx++;
+                       xstat->value = rxq->bytes;
+                       if (--requested == 0)
+                               return n;
+                       xstat++;
+               }
+
+               for (i = 0; i < dev->data->nb_tx_queues; i++) {
+                       struct gve_tx_queue *txq = dev->data->tx_queues[i];
+                       xstat->id = indx++;
+                       xstat->value = txq->packets;
+                       if (--requested == 0)
+                               return n;
+                       xstat++;
+
+                       xstat->id = indx++;
+                       xstat->value = txq->bytes;
+                       if (--requested == 0)
+                               return n;
+                       xstat++;
+               }
+       }
+
+       return (dev->data->nb_tx_queues + dev->data->nb_rx_queues) * 2;
+}
+
+static int
+gve_xstats_reset(struct rte_eth_dev *dev)
+{
+       return gve_stats_reset(dev);
+}
+
+static int
+gve_xstats_get_names(struct rte_eth_dev *dev, struct rte_eth_xstat_name 
*xstats_names,
+                                               unsigned int n)
+{
+       if (xstats_names) {
+               uint requested = n;
+               struct rte_eth_xstat_name *xstats_name = xstats_names;
+               uint16_t i;
+
+               for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                       snprintf(xstats_name->name, sizeof(xstats_name->name), 
"rx_q%d_packets", i);
+                       if (--requested == 0)
+                               return n;
+                       xstats_name++;
+                       snprintf(xstats_name->name, sizeof(xstats_name->name), 
"rx_q%d_bytes", i);
+                       if (--requested == 0)
+                               return n;
+                       xstats_name++;
+               }
+
+               for (i = 0; i < dev->data->nb_tx_queues; i++) {
+                       snprintf(xstats_name->name, sizeof(xstats_name->name), 
"tx_q%d_packets", i);
+                       if (--requested == 0)
+                               return n;
+                       xstats_name++;
+                       snprintf(xstats_name->name, sizeof(xstats_name->name), 
"tx_q%d_bytes", i);
+                       if (--requested == 0)
+                               return n;
+                       xstats_name++;
+               }
+       }
+
+       return (dev->data->nb_tx_queues + dev->data->nb_rx_queues) * 2;
+}
+
 static const struct eth_dev_ops gve_eth_dev_ops = {
        .dev_configure        = gve_dev_configure,
        .dev_start            = gve_dev_start,
        .dev_stop             = gve_dev_stop,
        .dev_close            = gve_dev_close,
-       .dev_infos_get        = gve_dev_info_get,
+       .dev_infos_get        = gve_dev_infos_get,
        .rx_queue_setup       = gve_rx_queue_setup,
        .tx_queue_setup       = gve_tx_queue_setup,
        .rx_queue_release     = gve_rx_queue_release,
        .tx_queue_release     = gve_tx_queue_release,
        .link_update          = gve_link_update,
-       .mtu_set              = gve_dev_mtu_set,
+       .mtu_set              = gve_mtu_set,
+       .stats_get            = gve_stats_get,
+       .stats_reset          = gve_stats_reset,
+       .xstats_get           = gve_xstats_get,
+       .xstats_reset         = gve_xstats_reset,
+       .xstats_get_names     = gve_xstats_get_names,
 };
 
 static void
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index 235e55899e..70210570a2 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -81,6 +81,11 @@ struct gve_tx_queue {
        uint32_t next_to_clean;
        uint16_t free_thresh;
 
+       /* stats */
+       uint64_t packets;
+       uint64_t bytes;
+       uint64_t errors;
+
        /* Only valid for DQO_QPL queue format */
        uint16_t sw_tail;
        uint16_t sw_ntc;
@@ -124,6 +129,12 @@ struct gve_rx_queue {
        uint32_t next_avail;
        uint32_t nb_avail;
 
+       /* stats */
+       uint64_t packets;
+       uint64_t bytes;
+       uint64_t errors;
+       uint64_t no_mbufs;
+
        volatile rte_be32_t *qrx_tail;
        volatile rte_be32_t *ntfy_addr;
 
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index 518c9d109c..4643f9f47a 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -22,8 +22,10 @@ gve_rx_refill(struct gve_rx_queue *rxq)
                if (diag < 0) {
                        for (i = 0; i < nb_alloc; i++) {
                                nmb = rte_pktmbuf_alloc(rxq->mpool);
-                               if (!nmb)
+                               if (!nmb) {
+                                       rxq->no_mbufs++;
                                        break;
+                               }
                                rxq->sw_ring[idx + i] = nmb;
                        }
                        if (i != nb_alloc)
@@ -55,8 +57,10 @@ gve_rx_refill(struct gve_rx_queue *rxq)
                if (diag < 0) {
                        for (i = 0; i < nb_alloc; i++) {
                                nmb = rte_pktmbuf_alloc(rxq->mpool);
-                               if (!nmb)
+                               if (!nmb) {
+                                       rxq->no_mbufs++;
                                        break;
+                               }
                                rxq->sw_ring[idx + i] = nmb;
                        }
                        nb_alloc = i;
@@ -90,6 +94,7 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
        uint16_t nb_rx, len;
        uint64_t addr;
        uint16_t i;
+       uint64_t total_len = 0;
 
        rxr = rxq->rx_desc_ring;
        nb_rx = 0;
@@ -99,10 +104,13 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
                if (GVE_SEQNO(rxd->flags_seq) != rxq->expected_seqno)
                        break;
 
-               if (rxd->flags_seq & GVE_RXF_ERR)
+               if (rxd->flags_seq & GVE_RXF_ERR) {
+                       rxq->errors++;
                        continue;
+               }
 
                len = rte_be_to_cpu_16(rxd->len) - GVE_RX_PAD;
+               total_len += len;
                rxe = rxq->sw_ring[rx_id];
                if (rxq->is_gqi_qpl) {
                        addr = (uint64_t)(rxq->qpl->mz->addr) + rx_id * 
PAGE_SIZE + GVE_RX_PAD;
@@ -138,6 +146,10 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
                nb_rx++;
        }
 
+       /* update stats */
+       rxq->packets += nb_rx;
+       rxq->bytes += total_len;
+
        rxq->nb_avail += nb_rx;
        rxq->rx_tail = rx_id;
 
diff --git a/drivers/net/gve/gve_tx.c b/drivers/net/gve/gve_tx.c
index bf4e8fea2c..d4e52e3ea5 100644
--- a/drivers/net/gve/gve_tx.c
+++ b/drivers/net/gve/gve_tx.c
@@ -262,6 +262,7 @@ gve_tx_burst_qpl(void *tx_queue, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
        uint16_t nb_used, i;
        uint16_t nb_tx = 0;
        uint32_t hlen;
+       uint64_t total_len = 0;
 
        txr = txq->tx_desc_ring;
 
@@ -299,6 +300,7 @@ gve_tx_burst_qpl(void *tx_queue, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
                hlen = ol_flags & RTE_MBUF_F_TX_TCP_SEG ?
                        (uint32_t)(tx_offload.l2_len + tx_offload.l3_len + 
tx_offload.l4_len) :
                        tx_pkt->pkt_len;
+               total_len += hlen;
 
                sw_ring[sw_id] = tx_pkt;
                if (!is_fifo_avail(txq, hlen)) {
@@ -364,6 +366,10 @@ gve_tx_burst_qpl(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pkts)
                txq->sw_tail = sw_id;
        }
 
+       /* update stats */
+       txq->packets += nb_tx;
+       txq->bytes += total_len;
+
        return nb_tx;
 }
 
-- 
2.37.1 (Apple Git-137.1)

Reply via email to