With virtual hardware version 6, the max number of RX queues was increased
to VMXNET3_EXT_MAX_RX_QUEUES (32) from VMXNET3_MAX_RX_QUEUES (16),
similarly, the max number of TX queues was increased to
VMXNET3_EXT_MAX_TX_QUEUES (32) from VMXNET3_MAX_TX_QUEUES (8).
These increases were not fully considered in the PMD...

The configured number of queues to provide statistics for
(RTE_ETHDEV_QUEUE_STAT_CNTRS) can be smaller than driver's max number of
supported transmit queues for virtual hardware version 6
(VMXNET3_EXT_MAX_RX_QUEUES) (32), which will cause accessing the queue
stats array out of boundary if the application uses more than
RTE_ETHDEV_QUEUE_STAT_CNTRS queues.

This patch fixes this by two modifications:
- Increased stats array size to support hardware version 6.
- Respect RTE_ETHDEV_QUEUE_STAT_CNTRS when getting the per-queue counters.

The build time check
RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES)
has become irrelevant, so it is removed.
With this removal, per-queue stats for fewer queues is supported.

Fixes: b1584dd0affe ("net/vmxnet3: support version 6")

Signed-off-by: Morten Brørup <m...@smartsharesystems.com>
---
v3:
* This is a bugfix, not just an improvement. (Ferruh Yigit)
* Moved support for larger MTU with virtual hardware version 6 to separate
  patch. (Ferruh Yigit)
v2:
* Virtual hardware version 6 supports more queues; updated some arrays
  accordingly.
* Added support for larger MTU with virtual hardware version 6.
---
 drivers/net/vmxnet3/vmxnet3_ethdev.c | 32 +++++++++++++++++-----------
 drivers/net/vmxnet3/vmxnet3_ethdev.h |  4 ++--
 2 files changed, 22 insertions(+), 14 deletions(-)

diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c 
b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 78fac63ab6..8a9bb452c6 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -1470,42 +1470,52 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct 
rte_eth_stats *stats)
        struct vmxnet3_hw *hw = dev->data->dev_private;
        struct UPT1_TxStats txStats;
        struct UPT1_RxStats rxStats;
+       uint64_t packets, bytes;
 
        VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
 
        for (i = 0; i < hw->num_tx_queues; i++) {
                vmxnet3_tx_stats_get(hw, i, &txStats);
 
-               stats->q_opackets[i] = txStats.ucastPktsTxOK +
+               packets = txStats.ucastPktsTxOK +
                        txStats.mcastPktsTxOK +
                        txStats.bcastPktsTxOK;
 
-               stats->q_obytes[i] = txStats.ucastBytesTxOK +
+               bytes = txStats.ucastBytesTxOK +
                        txStats.mcastBytesTxOK +
                        txStats.bcastBytesTxOK;
 
-               stats->opackets += stats->q_opackets[i];
-               stats->obytes += stats->q_obytes[i];
+               stats->opackets += packets;
+               stats->obytes += bytes;
                stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard;
+
+               if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+                       stats->q_opackets[i] = packets;
+                       stats->q_obytes[i] = bytes;
+               }
        }
 
        for (i = 0; i < hw->num_rx_queues; i++) {
                vmxnet3_rx_stats_get(hw, i, &rxStats);
 
-               stats->q_ipackets[i] = rxStats.ucastPktsRxOK +
+               packets = rxStats.ucastPktsRxOK +
                        rxStats.mcastPktsRxOK +
                        rxStats.bcastPktsRxOK;
 
-               stats->q_ibytes[i] = rxStats.ucastBytesRxOK +
+               bytes = rxStats.ucastBytesRxOK +
                        rxStats.mcastBytesRxOK +
                        rxStats.bcastBytesRxOK;
 
-               stats->ipackets += stats->q_ipackets[i];
-               stats->ibytes += stats->q_ibytes[i];
-
-               stats->q_errors[i] = rxStats.pktsRxError;
+               stats->ipackets += packets;
+               stats->ibytes += bytes;
                stats->ierrors += rxStats.pktsRxError;
                stats->imissed += rxStats.pktsRxOutOfBuf;
+
+               if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+                       stats->q_ipackets[i] = packets;
+                       stats->q_ibytes[i] = bytes;
+                       stats->q_errors[i] = rxStats.pktsRxError;
+               }
        }
 
        return 0;
@@ -1521,8 +1531,6 @@ vmxnet3_dev_stats_reset(struct rte_eth_dev *dev)
 
        VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
 
-       RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
-
        for (i = 0; i < hw->num_tx_queues; i++) {
                vmxnet3_hw_tx_stats_get(hw, i, &txStats);
                memcpy(&hw->snapshot_tx_stats[i], &txStats,
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h 
b/drivers/net/vmxnet3/vmxnet3_ethdev.h
index 2b3e2c4caa..e9ded6663d 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -121,8 +121,8 @@ struct vmxnet3_hw {
 #define VMXNET3_VFT_TABLE_SIZE     (VMXNET3_VFT_SIZE * sizeof(uint32_t))
        UPT1_TxStats          saved_tx_stats[VMXNET3_EXT_MAX_TX_QUEUES];
        UPT1_RxStats          saved_rx_stats[VMXNET3_EXT_MAX_RX_QUEUES];
-       UPT1_TxStats          snapshot_tx_stats[VMXNET3_MAX_TX_QUEUES];
-       UPT1_RxStats          snapshot_rx_stats[VMXNET3_MAX_RX_QUEUES];
+       UPT1_TxStats          snapshot_tx_stats[VMXNET3_EXT_MAX_TX_QUEUES];
+       UPT1_RxStats          snapshot_rx_stats[VMXNET3_EXT_MAX_RX_QUEUES];
        uint16_t              tx_prod_offset;
        uint16_t              rx_prod_offset[2];
        /* device capability bit map */
-- 
2.43.0

Reply via email to