The only way to enable diagnostics for TX paths is to modify the
application source code. Making it difficult to diagnose faults.

In this patch, the devarg option "mbuf_check" is introduced and the
parameters are configured to enable the corresponding diagnostics.

supported cases: mbuf, size, segment, offload.
 1. mbuf: check for corrupted mbuf.
 2. size: check min/max packet length according to hw spec.
 3. segment: check number of mbuf segments not exceed hw limitation.
 4. offload: check any unsupported offload flag.

parameter format: mbuf_check=[mbuf,<case1>,<case2>]
eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i

Signed-off-by: Mingjin Ye <mingjinx...@intel.com>
---
v2: Remove call chain.
---
v3: Optimisation implementation.
---
v4: Fix Windows os compilation error.
---
v5: Split Patch.
---
v6: remove strict.
---
 doc/guides/nics/intel_vf.rst   |  4 ++
 drivers/net/iavf/iavf.h        | 12 +++++
 drivers/net/iavf/iavf_ethdev.c | 72 +++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.c   | 98 ++++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.h   |  2 +
 5 files changed, 188 insertions(+)

diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index ad08198f0f..8e39bc831c 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -111,6 +111,10 @@ For more detail on SR-IOV, please refer to the following 
documents:
     by setting the ``devargs`` parameter like ``-a 
18:01.0,no-poll-on-link-down=1``
     when IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700 
Series Ethernet device.
 
+    Enable mbuf check for Tx diagnostics by setting the devargs parameter like
+    ``-a 18:01.0,mbuf_check=[mbuf,<case1>,<case2>]`` when IAVF is backed by an
+    Intel\ |reg| E810 device or an Intel\ |reg| 700 Series Ethernet device.
+
 The PCIE host-interface of Intel Ethernet Switch FM10000 Series VF 
infrastructure
 
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 73a089c199..6535b624cb 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -113,9 +113,14 @@ struct iavf_ipsec_crypto_stats {
        } ierrors;
 };
 
+struct iavf_mbuf_stats {
+       uint64_t tx_pkt_errors;
+};
+
 struct iavf_eth_xstats {
        struct virtchnl_eth_stats eth_stats;
        struct iavf_ipsec_crypto_stats ips_stats;
+       struct iavf_mbuf_stats mbuf_stats;
 };
 
 /* Structure that defines a VSI, associated with a adapter. */
@@ -309,6 +314,7 @@ struct iavf_devargs {
        uint32_t watchdog_period;
        int auto_reset;
        int no_poll_on_link_down;
+       int mbuf_check;
 };
 
 struct iavf_security_ctx;
@@ -351,6 +357,11 @@ enum iavf_tx_burst_type {
        IAVF_TX_AVX512_CTX_OFFLOAD,
 };
 
+#define IAVF_MBUF_CHECK_F_TX_MBUF        (1ULL << 0)
+#define IAVF_MBUF_CHECK_F_TX_SIZE        (1ULL << 1)
+#define IAVF_MBUF_CHECK_F_TX_SEGMENT     (1ULL << 2)
+#define IAVF_MBUF_CHECK_F_TX_OFFLOAD     (1ULL << 3)
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
        struct iavf_hw hw;
@@ -368,6 +379,7 @@ struct iavf_adapter {
        bool no_poll;
        enum iavf_rx_burst_type rx_burst_type;
        enum iavf_tx_burst_type tx_burst_type;
+       uint64_t mc_flags; /* mbuf check flags. */
        uint16_t fdir_ref_cnt;
        struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d1edb0dd5c..25938b9558 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -13,6 +13,7 @@
 #include <inttypes.h>
 #include <rte_byteorder.h>
 #include <rte_common.h>
+#include <rte_os_shim.h>
 
 #include <rte_interrupts.h>
 #include <rte_debug.h>
@@ -39,6 +40,8 @@
 #define IAVF_RESET_WATCHDOG_ARG    "watchdog_period"
 #define IAVF_ENABLE_AUTO_RESET_ARG "auto_reset"
 #define IAVF_NO_POLL_ON_LINK_DOWN_ARG "no-poll-on-link-down"
+#define IAVF_MBUF_CHECK_ARG       "mbuf_check"
+
 uint64_t iavf_timestamp_dynflag;
 int iavf_timestamp_dynfield_offset = -1;
 
@@ -48,6 +51,7 @@ static const char * const iavf_valid_args[] = {
        IAVF_RESET_WATCHDOG_ARG,
        IAVF_ENABLE_AUTO_RESET_ARG,
        IAVF_NO_POLL_ON_LINK_DOWN_ARG,
+       IAVF_MBUF_CHECK_ARG,
        NULL
 };
 
@@ -174,6 +178,7 @@ static const struct rte_iavf_xstats_name_off 
rte_iavf_stats_strings[] = {
        {"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
        {"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
        {"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+       {"tx_mbuf_error_packets", _OFF_OF(mbuf_stats.tx_pkt_errors)},
 
        {"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
        {"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
@@ -1837,6 +1842,9 @@ iavf_dev_xstats_reset(struct rte_eth_dev *dev)
        iavf_dev_stats_reset(dev);
        memset(&vf->vsi.eth_stats_offset.ips_stats, 0,
                        sizeof(struct iavf_ipsec_crypto_stats));
+       memset(&vf->vsi.eth_stats_offset.mbuf_stats, 0,
+                       sizeof(struct iavf_mbuf_stats));
+
        return 0;
 }
 
@@ -1881,6 +1889,8 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 {
        int ret;
        unsigned int i;
+       struct iavf_tx_queue *txq;
+       uint64_t mbuf_errors = 0;
        struct iavf_adapter *adapter =
                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1904,6 +1914,16 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
        if (iavf_ipsec_crypto_supported(adapter))
                iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
+       if (adapter->devargs.mbuf_check) {
+               for (i = 0; i < dev->data->nb_tx_queues; i++) {
+                       txq = dev->data->tx_queues[i];
+                       mbuf_errors += __atomic_exchange_n(&txq->mbuf_errors,
+                               0, __ATOMIC_RELAXED);
+               }
+               if (mbuf_errors > 0)
+                       iavf_xtats.mbuf_stats.tx_pkt_errors += mbuf_errors;
+       }
+
        /* loop over xstats array and values from pstats */
        for (i = 0; i < IAVF_NB_XSTATS; i++) {
                xstats[i].id = i;
@@ -2286,6 +2306,50 @@ iavf_parse_watchdog_period(__rte_unused const char *key, 
const char *value, void
        return 0;
 }
 
+static int
+iavf_parse_mbuf_check(__rte_unused const char *key, const char *value, void 
*args)
+{
+       char *cur;
+       char *tmp;
+       int str_len;
+       int valid_len;
+
+       int ret = 0;
+       uint64_t *mc_flags = args;
+       char *str2 = strdup(value);
+       if (str2 == NULL)
+               return -1;
+
+       str_len = strlen(str2);
+       if (str2[0] == '[' && str2[str_len - 1] == ']') {
+               if (str_len < 3) {
+                       ret = -1;
+                       goto mdd_end;
+               }
+               valid_len = str_len - 2;
+               memmove(str2, str2 + 1, valid_len);
+               memset(str2 + valid_len, '\0', 2);
+       }
+       cur = strtok_r(str2, ",", &tmp);
+       while (cur != NULL) {
+               if (!strcmp(cur, "mbuf"))
+                       *mc_flags |= IAVF_MBUF_CHECK_F_TX_MBUF;
+               else if (!strcmp(cur, "size"))
+                       *mc_flags |= IAVF_MBUF_CHECK_F_TX_SIZE;
+               else if (!strcmp(cur, "segment"))
+                       *mc_flags |= IAVF_MBUF_CHECK_F_TX_SEGMENT;
+               else if (!strcmp(cur, "offload"))
+                       *mc_flags |= IAVF_MBUF_CHECK_F_TX_OFFLOAD;
+               else
+                       PMD_DRV_LOG(ERR, "Unsupported mdd check type: %s", cur);
+               cur = strtok_r(NULL, ",", &tmp);
+       }
+
+mdd_end:
+       free(str2);
+       return ret;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
        struct iavf_adapter *ad =
@@ -2340,6 +2404,14 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
                goto bail;
        }
 
+       ret = rte_kvargs_process(kvlist, IAVF_MBUF_CHECK_ARG,
+                                &iavf_parse_mbuf_check, &ad->mc_flags);
+       if (ret)
+               goto bail;
+
+       if (ad->mc_flags)
+               ad->devargs.mbuf_check = 1;
+
        ret = rte_kvargs_process(kvlist, IAVF_ENABLE_AUTO_RESET_ARG,
                                 &parse_bool, &ad->devargs.auto_reset);
        if (ret)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 13b932ad85..cb767fb668 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -3787,6 +3787,97 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf 
**tx_pkts,
                                                                tx_pkts, 
nb_pkts);
 }
 
+/* Tx mbuf check */
+static uint16_t
+iavf_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts,
+             uint16_t nb_pkts)
+{
+       uint16_t idx;
+       uint64_t ol_flags;
+       struct rte_mbuf *mb;
+       uint16_t good_pkts = nb_pkts;
+       const char *reason = NULL;
+       bool pkt_error = false;
+       struct iavf_tx_queue *txq = tx_queue;
+       struct iavf_adapter *adapter = txq->vsi->adapter;
+       enum iavf_tx_burst_type tx_burst_type =
+               txq->vsi->adapter->tx_burst_type;
+
+       for (idx = 0; idx < nb_pkts; idx++) {
+               mb = tx_pkts[idx];
+               ol_flags = mb->ol_flags;
+
+               if ((adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_MBUF) &&
+                       (rte_mbuf_check(mb, 1, &reason) != 0)) {
+                       PMD_TX_LOG(ERR, "INVALID mbuf: %s\n", reason);
+                       pkt_error = true;
+                       break;
+               }
+
+               if ((adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_SIZE) &&
+                       (mb->data_len < IAVF_TX_MIN_PKT_LEN ||
+                       mb->data_len > adapter->vf.max_pkt_len)) {
+                       PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out "
+                       "of range, reasonable range (%d - %u)\n", mb->data_len,
+                       IAVF_TX_MIN_PKT_LEN, adapter->vf.max_pkt_len);
+                       pkt_error = true;
+                       break;
+               }
+
+               if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_SEGMENT) {
+                       /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
+                       if (!(ol_flags & (RTE_MBUF_F_TX_TCP_SEG | 
RTE_MBUF_F_TX_UDP_SEG))) {
+                               if (mb->nb_segs > IAVF_TX_MAX_MTU_SEG) {
+                                       PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs 
(%d) exceeds "
+                                       "HW limit, maximum allowed value is 
%d\n", mb->nb_segs,
+                                       IAVF_TX_MAX_MTU_SEG);
+                                       pkt_error = true;
+                                       break;
+                               }
+                       } else if ((mb->tso_segsz < IAVF_MIN_TSO_MSS) ||
+                               (mb->tso_segsz > IAVF_MAX_TSO_MSS)) {
+                               /* MSS outside the range are considered 
malicious */
+                               PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) 
is out "
+                               "of range, reasonable range (%d - %u)\n", 
mb->tso_segsz,
+                               IAVF_MIN_TSO_MSS, IAVF_MAX_TSO_MSS);
+                               pkt_error = true;
+                               break;
+                       } else if (mb->nb_segs > txq->nb_tx_desc) {
+                               PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out "
+                               "of ring length\n");
+                               pkt_error = true;
+                               break;
+                       }
+               }
+
+               if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_OFFLOAD) {
+                       if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
+                               PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+                               "is not supported\n");
+                               pkt_error = true;
+                               break;
+                       }
+
+                       if (!rte_validate_tx_offload(mb)) {
+                               PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+                               "setup error\n");
+                               pkt_error = true;
+                               break;
+                       }
+               }
+       }
+
+       if (pkt_error) {
+               __atomic_fetch_add(&txq->mbuf_errors, 1, __ATOMIC_RELAXED);
+               good_pkts = idx;
+               if (good_pkts == 0)
+                       return 0;
+       }
+
+       return iavf_tx_pkt_burst_ops[tx_burst_type](tx_queue,
+                                                               tx_pkts, 
good_pkts);
+}
+
 /* choose rx function*/
 void
 iavf_set_rx_function(struct rte_eth_dev *dev)
@@ -4032,6 +4123,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
        struct iavf_adapter *adapter =
                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
        enum iavf_tx_burst_type tx_burst_type;
+       int mbuf_check = adapter->devargs.mbuf_check;
        int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
        struct iavf_tx_queue *txq;
@@ -4122,6 +4214,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
                if (no_poll_on_link_down) {
                        adapter->tx_burst_type = tx_burst_type;
                        dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+               } else if (mbuf_check) {
+                       adapter->tx_burst_type = tx_burst_type;
+                       dev->tx_pkt_burst = iavf_xmit_pkts_check;
                } else {
                        dev->tx_pkt_burst = 
iavf_tx_pkt_burst_ops[tx_burst_type];
                }
@@ -4138,6 +4233,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
        if (no_poll_on_link_down) {
                adapter->tx_burst_type = tx_burst_type;
                dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+       } else if (mbuf_check) {
+               adapter->tx_burst_type = tx_burst_type;
+               dev->tx_pkt_burst = iavf_xmit_pkts_check;
        } else {
                dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
        }
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index f432f9d956..90e7291928 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -297,6 +297,8 @@ struct iavf_tx_queue {
        uint16_t next_rs;              /* next to check DD,  for VPMD */
        uint16_t ipsec_crypto_pkt_md_offset;
 
+       uint64_t mbuf_errors;
+
        bool q_set;                    /* if rx queue has been configured */
        bool tx_deferred_start;        /* don't start this queue in dev start */
        const struct iavf_txq_ops *ops;
-- 
2.25.1

Reply via email to