This commit adds support for generic tunnel TSO and checksum offload.
PMD will compute the inner/outer headers offset according to the
mbuf fields. Hardware will do calculation based on offsets and types.

Signed-off-by: Xueming Li <xuemi...@mellanox.com>
---
 doc/guides/nics/mlx5.rst              |   8 +++
 drivers/net/mlx5/Makefile             |   5 ++
 drivers/net/mlx5/mlx5.c               |  30 +++++++--
 drivers/net/mlx5/mlx5.h               |   1 +
 drivers/net/mlx5/mlx5_ethdev.c        |   3 +-
 drivers/net/mlx5/mlx5_prm.h           |  24 +++++++
 drivers/net/mlx5/mlx5_rxtx.c          | 117 +++++++++++++++++++++++++++-------
 drivers/net/mlx5/mlx5_rxtx.h          | 104 +++++++++++++++++++++++-------
 drivers/net/mlx5/mlx5_rxtx_vec.c      |   9 +--
 drivers/net/mlx5/mlx5_rxtx_vec_neon.h |   2 +-
 drivers/net/mlx5/mlx5_rxtx_vec_sse.h  |   2 +-
 drivers/net/mlx5/mlx5_txq.c           |   4 ++
 12 files changed, 248 insertions(+), 61 deletions(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 2e6d1e45a..184302ee5 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -307,6 +307,14 @@ Run-time configuration
 
   Enabled by default.
 
+- ``swp`` parameter [int]
+
+  A nonzero value enables TX SW parser to support genenric tunnel TSO and
+  checksum offloading. Please refer to ``DEV_TX_OFFLOAD_GENERIC_TNL_CKSUM_TSO``
+  for detail information.
+
+  Disabled by default.
+
 Prerequisites
 -------------
 
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index a3984eb9f..254cb93c8 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -118,6 +118,11 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
                enum IBV_WQ_FLAG_RX_END_PADDING \
                $(AUTOCONF_OUTPUT)
        $Q sh -- '$<' '$@' \
+               HAVE_IBV_MLX5_MOD_SWP \
+               infiniband/mlx5dv.h \
+               enum MLX5DV_CONTEXT_MASK_SWP \
+               $(AUTOCONF_OUTPUT)
+       $Q sh -- '$<' '$@' \
                HAVE_IBV_MLX5_MOD_MPW \
                infiniband/mlx5dv.h \
                enum MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED \
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 5a959f7c5..3b126d45a 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -93,6 +93,9 @@
 /* Device parameter to enable hardware Rx vector. */
 #define MLX5_RX_VEC_EN "rx_vec_en"
 
+/* Device parameter to control Tx SW parser. */
+#define MLX5_TX_SWP "swp"
+
 #ifndef HAVE_IBV_MLX5_MOD_MPW
 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
@@ -421,6 +424,8 @@ mlx5_args_check(const char *key, const char *val, void 
*opaque)
                config->tx_vec_en = !!tmp;
        } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
                config->rx_vec_en = !!tmp;
+       } else if (strcmp(MLX5_TX_SWP, key) == 0) {
+               config->swp = !!tmp;
        } else {
                WARN("%s: unknown parameter", key);
                return -EINVAL;
@@ -451,6 +456,7 @@ mlx5_args(struct mlx5_dev_config *config, struct 
rte_devargs *devargs)
                MLX5_TXQ_MAX_INLINE_LEN,
                MLX5_TX_VEC_EN,
                MLX5_RX_VEC_EN,
+               MLX5_TX_SWP,
                NULL,
        };
        struct rte_kvargs *kvlist;
@@ -606,9 +612,10 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct 
rte_pci_device *pci_dev)
        unsigned int mps;
        unsigned int cqe_comp;
        unsigned int tunnel_en = 0;
+       unsigned int swp = 0;
        int idx;
        int i;
-       struct mlx5dv_context attrs_out;
+       struct mlx5dv_context attrs_out = {0};
 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
        struct ibv_counter_set_description cs_desc;
 #endif
@@ -695,10 +702,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct 
rte_pci_device *pci_dev)
        ibv_dev = list[i];
 
        DEBUG("device opened");
-       /*
-        * Multi-packet send is supported by ConnectX-4 Lx PF as well
-        * as all ConnectX-5 devices.
-        */
+#ifdef HAVE_IBV_MLX5_MOD_SWP
+       attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
+#endif
        mlx5dv_query_device(attr_ctx, &attrs_out);
        if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
                if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
@@ -712,6 +718,11 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct 
rte_pci_device *pci_dev)
                DEBUG("MPW isn't supported");
                mps = MLX5_MPW_DISABLED;
        }
+#ifdef HAVE_IBV_MLX5_MOD_SWP
+       if (attrs_out.comp_mask | MLX5DV_CONTEXT_MASK_SWP)
+               swp = attrs_out.sw_parsing_caps.sw_parsing_offloads;
+       DEBUG("SWP support: %u", swp);
+#endif
        if (RTE_CACHE_LINE_SIZE == 128 &&
            !(attrs_out.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
                cqe_comp = 0;
@@ -928,6 +939,11 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct 
rte_pci_device *pci_dev)
                err = priv_uar_init_primary(priv);
                if (err)
                        goto port_error;
+               if (config.swp && !swp) {
+                       WARN("Tx SWP isn't supported");
+                       config.swp = 0;
+               }
+               INFO("SWP is %s", config.swp ? "enabled" : "disabled");
                /* Configure the first MAC address by default. */
                if (priv_get_mac(priv, &mac.addr_bytes)) {
                        ERROR("cannot get MAC address, is mlx5_en loaded?"
@@ -1080,8 +1096,10 @@ RTE_INIT(rte_mlx5_pmd_init);
 static void
 rte_mlx5_pmd_init(void)
 {
-       /* Build the static table for ptype conversion. */
+       /* Build the static tables for verbs conversion. */
        mlx5_set_ptype_table();
+       mlx5_set_cksum_table();
+       mlx5_set_swp_types_table();
        /*
         * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
         * huge pages. Calling ibv_fork_init() during init allows
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 7851cac96..db88ee840 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -116,6 +116,7 @@ struct mlx5_dev_config {
        unsigned int tx_vec_en:1; /* Tx vector is enabled. */
        unsigned int rx_vec_en:1; /* Rx vector is enabled. */
        unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
+       unsigned int swp:1; /* Tx generic tunnel checksum and TSO offload. */
        unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */
        unsigned int ind_table_max_size; /* Maximum indirection table size. */
        int txq_inline; /* Maximum packet size for inlining. */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 83657f509..bef54c562 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -1492,7 +1492,8 @@ priv_select_tx_function(struct priv *priv, struct 
rte_eth_dev *dev)
        uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
        int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
                                    DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-                                   DEV_TX_OFFLOAD_GRE_TNL_TSO));
+                                   DEV_TX_OFFLOAD_GRE_TNL_TSO |
+                                   DEV_TX_OFFLOAD_GENERIC_TNL_CKSUM_TSO));
        int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT);
 
        assert(priv != NULL);
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index 2de310bcb..ba62699f9 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -135,6 +135,30 @@
 /* Inner L4 checksum offload (Tunneled packets only). */
 #define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5)
 
+/* Outer L4 type is TCP. */
+#define MLX5_ETH_WQE_L4_OUTER_TCP  (0u << 5)
+
+/* Outer L4 type is UDP. */
+#define MLX5_ETH_WQE_L4_OUTER_UDP  (1u << 5)
+
+/* Outer L3 type is IPV4. */
+#define MLX5_ETH_WQE_L3_OUTER_IPV4 (0u << 4)
+
+/* Outer L3 type is IPV6. */
+#define MLX5_ETH_WQE_L3_OUTER_IPV6 (1u << 4)
+
+/* Inner L4 type is TCP. */
+#define MLX5_ETH_WQE_L4_INNER_TCP (0u << 1)
+
+/* Inner L4 type is UDP. */
+#define MLX5_ETH_WQE_L4_INNER_UDP (1u << 1)
+
+/* Inner L3 type is IPV4. */
+#define MLX5_ETH_WQE_L3_INNER_IPV4 (0u << 0)
+
+/* Inner L3 type is IPV6. */
+#define MLX5_ETH_WQE_L3_INNER_IPV6 (1u << 0)
+
 /* Is flow mark valid. */
 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
 #define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00)
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index dc6691d1c..3d2462a67 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -75,6 +75,9 @@ uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
        [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
 };
 
+uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
+uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
+
 /**
  * Build a table to translate Rx completion flags to packet type.
  *
@@ -191,6 +194,74 @@ mlx5_set_ptype_table(void)
 }
 
 /**
+ * Build a table to translate packet to checksum type of Verbs.
+ */
+void
+mlx5_set_cksum_table(void)
+{
+       unsigned int i;
+       uint8_t v;
+
+       /*
+        * The index should have:
+        * bit[0] = PKT_TX_TCP_SEG
+        * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
+        * bit[4] = PKT_TX_IP_CKSUM
+        * bit[8] = PKT_TX_OUTER_IP_CKSUM
+        * bit[9] = tunnel
+        */
+       for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
+               v = 0;
+               if (i & (1 << 9)) {
+                       /* Tunneled packet. */
+                       if (i & (1 << 8)) /* Outer IP. */
+                               v |= MLX5_ETH_WQE_L3_CSUM;
+                       if (i & (1 << 4)) /* Inner IP. */
+                               v |= MLX5_ETH_WQE_L3_INNER_CSUM;
+                       if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
+                               v |= MLX5_ETH_WQE_L4_INNER_CSUM;
+               } else {
+                       /* No tunnel. */
+                       if (i & (1 << 4)) /* IP. */
+                               v |= MLX5_ETH_WQE_L3_CSUM;
+                       if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
+                               v |= MLX5_ETH_WQE_L4_CSUM;
+               }
+               mlx5_cksum_table[i] = v;
+       }
+}
+
+/**
+ * Build a table to translate packet type of mbuf to SWP type of Verbs.
+ */
+void
+mlx5_set_swp_types_table(void)
+{
+       unsigned int i;
+       uint8_t v;
+
+       /*
+        * The index should have:
+        * bit[0:1] = PKT_TX_UDP_CKSUM
+        * bit[4] = PKT_TX_IPV6
+        * bit[8] = PKT_TX_OUTER_IPV6
+        * bit[9] = Outer UDP
+        */
+       for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
+               v = 0;
+               if (i & (1 << 8)) /* Outer IPv6. */
+                       v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
+               if (i & (1 << 9)) /* Outer UDP. */
+                       v |= MLX5_ETH_WQE_L4_OUTER_UDP;
+               if (i & (1 << 4)) /* Inner IPv6. */
+                       v |= MLX5_ETH_WQE_L3_INNER_IPV6;
+               if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52)) /* Inner UDP. */
+                       v |= MLX5_ETH_WQE_L4_INNER_UDP;
+               mlx5_swp_types_table[i] = v;
+       }
+}
+
+/**
  * Return the size of tailroom of WQ.
  *
  * @param txq
@@ -255,7 +326,6 @@ mlx5_copy_to_wq(void *dst, const void *src, size_t n,
 static int
 inline_tso(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
           uint32_t *length,
-          uint8_t *cs_flags,
           uintptr_t *addr,
           uint16_t *pkt_inline_sz,
           uint8_t **raw,
@@ -278,19 +348,15 @@ inline_tso(struct mlx5_txq_data *txq, struct rte_mbuf 
*buf,
                txq->stats.oerrors++;
                return -EINVAL;
        }
-       if (tunneled) {
+       if (tunneled)
                *tso_header_sz += buf->outer_l2_len + buf->outer_l3_len;
-               *cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
-       } else {
-               *cs_flags |= MLX5_ETH_WQE_L4_CSUM;
-       }
-       if (unlikely(*tso_header_sz > MLX5_MAX_TSO_HEADER)) {
+       /* First seg must contain all TSO headers. */
+       if (unlikely(*tso_header_sz > MLX5_MAX_TSO_HEADER) ||
+                    *tso_header_sz > DATA_LEN(buf)) {
                txq->stats.oerrors++;
                return -EINVAL;
        }
        copy_b = *tso_header_sz - *pkt_inline_sz;
-       /* First seg must contain all TSO headers. */
-       assert(copy_b <= *length);
        if (!copy_b || ((end - (uintptr_t)*raw) < copy_b))
                return -EAGAIN;
        n_wqe = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
@@ -423,7 +489,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, 
uint16_t pkts_n)
        if (unlikely(!max_wqe))
                return 0;
        do {
-               struct rte_mbuf *buf = NULL;
+               struct rte_mbuf *buf = *pkts; /* First_seg. */
                uint8_t *raw;
                volatile struct mlx5_wqe_v *wqe = NULL;
                volatile rte_v128u32_t *dseg = NULL;
@@ -435,15 +501,16 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, 
uint16_t pkts_n)
                uint16_t tso_header_sz = 0;
                uint16_t ehdr;
                uint8_t cs_flags;
-               uint64_t tso = 0;
+               uint8_t tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG);
+               uint8_t is_vlan = buf->ol_flags & PKT_TX_VLAN_PKT;
+               uint32_t swp_offsets = 0;
+               uint8_t swp_types = 0;
                uint16_t tso_segsz = 0;
 #ifdef MLX5_PMD_SOFT_COUNTERS
                uint32_t total_length = 0;
 #endif
                int ret;
 
-               /* first_seg */
-               buf = *pkts;
                segs_n = buf->nb_segs;
                /*
                 * Make sure there is enough room to store this packet and
@@ -478,10 +545,12 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, 
uint16_t pkts_n)
                if (pkts_n - i > 1)
                        rte_prefetch0(
                            rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
-               cs_flags = txq_ol_cksum_to_cs(txq, buf);
+               cs_flags = txq_ol_cksum_to_cs(buf);
+               txq_mbuf_to_swp(txq, buf, tso, is_vlan,
+                               (uint8_t *)&swp_offsets, &swp_types);
                raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
                /* Replace the Ethernet type by the VLAN if necessary. */
-               if (buf->ol_flags & PKT_TX_VLAN_PKT) {
+               if (is_vlan) {
                        uint32_t vlan = rte_cpu_to_be_32(0x81000000 |
                                                         buf->vlan_tci);
                        unsigned int len = 2 * ETHER_ADDR_LEN - 2;
@@ -504,9 +573,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, 
uint16_t pkts_n)
                        addr += pkt_inline_sz;
                }
                raw += MLX5_WQE_DWORD_SIZE;
-               tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG);
                if (tso) {
-                       ret = inline_tso(txq, buf, &length, &cs_flags,
+                       ret = inline_tso(txq, buf, &length,
                                         &addr, &pkt_inline_sz,
                                         &raw, &max_wqe,
                                         &tso_segsz, &tso_header_sz);
@@ -683,8 +751,9 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, 
uint16_t pkts_n)
                                0,
                        };
                        wqe->eseg = (rte_v128u32_t){
-                               0,
-                               cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16),
+                               swp_offsets,
+                               cs_flags | (swp_types << 8) |
+                               (rte_cpu_to_be_16(tso_segsz) << 16),
                                0,
                                (ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
                        };
@@ -697,8 +766,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, 
uint16_t pkts_n)
                                0,
                        };
                        wqe->eseg = (rte_v128u32_t){
-                               0,
-                               cs_flags,
+                               swp_offsets,
+                               cs_flags | (swp_types << 8),
                                0,
                                (ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
                        };
@@ -870,7 +939,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, 
uint16_t pkts_n)
                }
                max_elts -= segs_n;
                --pkts_n;
-               cs_flags = txq_ol_cksum_to_cs(txq, buf);
+               cs_flags = txq_ol_cksum_to_cs(buf);
                /* Retrieve packet information. */
                length = PKT_LEN(buf);
                assert(length);
@@ -1102,7 +1171,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf 
**pkts,
                 * iteration.
                 */
                max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
-               cs_flags = txq_ol_cksum_to_cs(txq, buf);
+               cs_flags = txq_ol_cksum_to_cs(buf);
                /* Retrieve packet information. */
                length = PKT_LEN(buf);
                /* Start new session if packet differs. */
@@ -1380,7 +1449,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf 
**pkts,
                /* Make sure there is enough room to store this packet. */
                if (max_elts - j == 0)
                        break;
-               cs_flags = txq_ol_cksum_to_cs(txq, buf);
+               cs_flags = txq_ol_cksum_to_cs(buf);
                /* Retrieve packet information. */
                length = PKT_LEN(buf);
                /* Start new session if:
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index d4738b14c..7040c9ec8 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -195,6 +195,7 @@ struct mlx5_txq_data {
        uint16_t tso_en:1; /* When set hardware TSO is enabled. */
        uint16_t tunnel_en:1;
        /* When set TX offload for tunneled packets are supported. */
+       uint16_t swp_en:1; /* Whether SW parser is enabled. */
        uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
        uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
        uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
@@ -301,8 +302,12 @@ uint64_t mlx5_priv_get_tx_port_offloads(struct priv *);
 /* mlx5_rxtx.c */
 
 extern uint32_t mlx5_ptype_table[];
+extern uint8_t mlx5_cksum_table[];
+extern uint8_t mlx5_swp_types_table[];
 
 void mlx5_set_ptype_table(void);
+void mlx5_set_cksum_table(void);
+void mlx5_set_swp_types_table(void);
 uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t);
 uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t);
 uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t);
@@ -627,38 +632,93 @@ mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct 
mlx5_wqe *wqe)
 }
 
 /**
- * Convert the Checksum offloads to Verbs.
+ * Convert mbuf to Verb SWP.
  *
  * @param txq_data
  *   Pointer to the Tx queue.
  * @param buf
  *   Pointer to the mbuf.
+ * @param tso
+ *   TSO offloads enabled.
+ * @param vlan
+ *   VLAN offloads enabled
+ * @param offsets
+ *   Pointer to the SWP header offsets.
+ * @param swp_types
+ *   Pointer to the SWP header types.
+ */
+static __rte_always_inline void
+txq_mbuf_to_swp(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
+                uint8_t tso, uint64_t vlan,
+                uint8_t *offsets, uint8_t *swp_types)
+{
+       uint8_t tunnel = (buf->ol_flags & PKT_TX_TUNNEL_MASK) ||
+                         buf->outer_l2_len;
+       uint16_t idx;
+       uint16_t off;
+       int outer_udp;
+       const uint64_t ol_flags_mask = PKT_TX_UDP_CKSUM | PKT_TX_IPV6 |
+                                      PKT_TX_OUTER_IPV6;
+
+       if (!txq->swp_en || !tunnel)
+               return;
+       outer_udp = (buf->ol_flags & PKT_TX_TUNNEL_MASK) == PKT_TX_TUNNEL_VXLAN;
+       /*
+        * The index should have:
+        * bit[0:1] = PKT_TX_UDP_CKSUM
+        * bit[4] = PKT_TX_IPV6
+        * bit[8] = PKT_TX_OUTER_IPV6
+        * bit[9] = Outer UDP
+        */
+       idx = ((buf->ol_flags & ol_flags_mask) >> 52) |
+             (outer_udp << 9);
+       *swp_types = mlx5_swp_types_table[idx];
+       /* swp offsets. */
+       off = buf->outer_l2_len + (vlan ? 4 : 0); /* Outer L3 offset. */
+       if (tso || (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM))
+               offsets[1] = off >> 1;
+       off += buf->outer_l3_len; /* Outer L4 offset. */
+       if (outer_udp)
+               offsets[0] = off >> 1;
+       off += buf->l2_len; /* Inner L3 offset. */
+       if (tso || (buf->ol_flags & PKT_TX_IP_CKSUM))
+               offsets[3] = off >> 1;
+       off += buf->l3_len; /* Inner L4 offset. */
+       if (tso || ((buf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) ||
+           ((buf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM))
+               offsets[2] = off >> 1;
+}
+
+/**
+ * Convert the Checksum offloads to Verbs.
+ *
+ * @param buf
+ *   Pointer to the mbuf.
  *
  * @return
- *   the converted cs_flags.
+ *   Converted checksum flags.
  */
 static __rte_always_inline uint8_t
-txq_ol_cksum_to_cs(struct mlx5_txq_data *txq_data, struct rte_mbuf *buf)
+txq_ol_cksum_to_cs(struct rte_mbuf *buf)
 {
-       uint8_t cs_flags = 0;
-
-       /* Should we enable HW CKSUM offload */
-       if (buf->ol_flags &
-           (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM |
-            PKT_TX_OUTER_IP_CKSUM)) {
-               if (txq_data->tunnel_en &&
-                   (buf->ol_flags &
-                    (PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN))) {
-                       cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
-                                  MLX5_ETH_WQE_L4_INNER_CSUM;
-                       if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
-                               cs_flags |= MLX5_ETH_WQE_L3_CSUM;
-               } else {
-                       cs_flags = MLX5_ETH_WQE_L3_CSUM |
-                                  MLX5_ETH_WQE_L4_CSUM;
-               }
-       }
-       return cs_flags;
+       uint32_t idx;
+       uint32_t is_tunnel;
+       const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
+                                      PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
+
+       is_tunnel = (buf->ol_flags & (PKT_TX_TUNNEL_MASK |
+                    PKT_TX_OUTER_IP_CKSUM)) || buf->outer_l2_len;
+       /*
+        * The index should have:
+        * bit[0] = PKT_TX_TCP_SEG
+        * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
+        * bit[4] = PKT_TX_IP_CKSUM
+        * bit[8] = PKT_TX_OUTER_IP_CKSUM
+        * bit[9] = tunnel
+        */
+       idx = ((buf->ol_flags & ol_flags_mask) >> 50) |
+             (is_tunnel << 9);
+       return mlx5_cksum_table[idx];
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index 142c786d5..a1f3d662a 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -70,8 +70,6 @@
 /**
  * Count the number of packets having same ol_flags and calculate cs_flags.
  *
- * @param txq
- *   Pointer to TX queue structure.
  * @param pkts
  *   Pointer to array of packets.
  * @param pkts_n
@@ -83,8 +81,7 @@
  *   Number of packets having same ol_flags.
  */
 static inline unsigned int
-txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
-                uint16_t pkts_n, uint8_t *cs_flags)
+txq_calc_offload(struct rte_mbuf **pkts, uint16_t pkts_n, uint8_t *cs_flags)
 {
        unsigned int pos;
        const uint64_t ol_mask =
@@ -98,7 +95,7 @@ txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf 
**pkts,
        for (pos = 1; pos < pkts_n; ++pos)
                if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask)
                        break;
-       *cs_flags = txq_ol_cksum_to_cs(txq, pkts[0]);
+       *cs_flags = txq_ol_cksum_to_cs(pkts[0]);
        return pos;
 }
 
@@ -169,7 +166,7 @@ mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, 
uint16_t pkts_n)
                if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
                        n = txq_count_contig_single_seg(&pkts[nb_tx], n);
                if (txq->offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP)
-                       n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags);
+                       n = txq_calc_offload(&pkts[nb_tx], n, &cs_flags);
                ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
                nb_tx += ret;
                if (!ret)
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h 
b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index e11565f69..261da11a6 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -170,7 +170,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf 
**pkts,
                        break;
                wqe = &((volatile struct mlx5_wqe64 *)
                         txq->wqes)[wqe_ci & wq_mask].hdr;
-               cs_flags = txq_ol_cksum_to_cs(txq, buf);
+               cs_flags = txq_ol_cksum_to_cs(buf);
                /* Title WQEBB pointer. */
                t_wqe = (uint8x16_t *)wqe;
                dseg = (uint8_t *)(wqe + 1);
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h 
b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index 559b0237e..c55f5e36a 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -172,7 +172,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf 
**pkts,
                }
                wqe = &((volatile struct mlx5_wqe64 *)
                         txq->wqes)[wqe_ci & wq_mask].hdr;
-               cs_flags = txq_ol_cksum_to_cs(txq, buf);
+               cs_flags = txq_ol_cksum_to_cs(buf);
                /* Title WQEBB pointer. */
                t_wqe = (__m128i *)wqe;
                dseg = (__m128i *)(wqe + 1);
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 65086d36f..c4cd61aa6 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -143,6 +143,8 @@ mlx5_priv_get_tx_port_offloads(struct priv *priv)
                if (config->tso)
                        offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
                                     DEV_TX_OFFLOAD_GRE_TNL_TSO);
+               if (config->swp)
+                       offloads |= DEV_TX_OFFLOAD_GENERIC_TNL_CKSUM_TSO;
        }
        return offloads;
 }
@@ -754,6 +756,8 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
                txq_ctrl->txq.tso_en = 1;
        }
        txq_ctrl->txq.tunnel_en = config->tunnel_en;
+       txq_ctrl->txq.swp_en = (DEV_TX_OFFLOAD_GENERIC_TNL_CKSUM_TSO &
+                               txq_ctrl->txq.offloads) && config->swp;
 }
 
 /**
-- 
2.13.3

Reply via email to