Add Tx offloading support:
 - support TSO

Signed-off-by: Beilei Xing <beilei.x...@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun...@intel.com>
Signed-off-by: Junfeng Guo <junfeng....@intel.com>
---
 doc/guides/nics/features/idpf.ini |   1 +
 drivers/net/idpf/idpf_ethdev.c    |   4 +-
 drivers/net/idpf/idpf_rxtx.c      | 134 +++++++++++++++++++++++++++++-
 drivers/net/idpf/idpf_rxtx.h      |  22 +++++
 4 files changed, 158 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/features/idpf.ini 
b/doc/guides/nics/features/idpf.ini
index c86d9378ea..47c686762d 100644
--- a/doc/guides/nics/features/idpf.ini
+++ b/doc/guides/nics/features/idpf.ini
@@ -9,6 +9,7 @@
 [Features]
 Queue start/stop     = Y
 MTU update           = Y
+TSO                  = P
 L3 checksum offload  = P
 L4 checksum offload  = P
 Packet type parsing  = Y
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index d8cc423a23..21315866bf 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -102,7 +102,9 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
                RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM     |
                RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
-       dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
+       dev_info->tx_offload_capa =
+               RTE_ETH_TX_OFFLOAD_TCP_TSO              |
+               RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_free_thresh = IDPF_DEFAULT_RX_FREE_THRESH,
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 143c8b69f3..8f82cf1b59 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -1549,6 +1549,49 @@ idpf_split_tx_free(struct idpf_tx_queue *cq)
        cq->tx_tail = next;
 }
 
+/* Check if the context descriptor is needed for TX offloading */
+static inline uint16_t
+idpf_calc_context_desc(uint64_t flags)
+{
+       if ((flags & RTE_MBUF_F_TX_TCP_SEG) != 0)
+               return 1;
+
+       return 0;
+}
+
+/* set TSO context descriptor
+ */
+static inline void
+idpf_set_splitq_tso_ctx(struct rte_mbuf *mbuf,
+                       union idpf_tx_offload tx_offload,
+                       volatile union idpf_flex_tx_ctx_desc *ctx_desc)
+{
+       uint16_t cmd_dtype;
+       uint32_t tso_len;
+       uint8_t hdr_len;
+
+       if (tx_offload.l4_len == 0) {
+               PMD_TX_LOG(DEBUG, "L4 length set to 0");
+               return;
+       }
+
+       hdr_len = tx_offload.l2_len +
+               tx_offload.l3_len +
+               tx_offload.l4_len;
+       cmd_dtype = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
+               IDPF_TX_FLEX_CTX_DESC_CMD_TSO;
+       tso_len = mbuf->pkt_len - hdr_len;
+
+       ctx_desc->tso.qw1.cmd_dtype = rte_cpu_to_le_16(cmd_dtype);
+       ctx_desc->tso.qw0.hdr_len = hdr_len;
+       ctx_desc->tso.qw0.mss_rt =
+               rte_cpu_to_le_16((uint16_t)mbuf->tso_segsz &
+                                IDPF_TXD_FLEX_CTX_MSS_RT_M);
+       ctx_desc->tso.qw0.flex_tlen =
+               rte_cpu_to_le_32(tso_len &
+                                IDPF_TXD_FLEX_CTX_MSS_RT_M);
+}
+
 uint16_t
 idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                      uint16_t nb_pkts)
@@ -1557,11 +1600,14 @@ idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
        volatile struct idpf_flex_tx_sched_desc *txr;
        volatile struct idpf_flex_tx_sched_desc *txd;
        struct idpf_tx_entry *sw_ring;
+       union idpf_tx_offload tx_offload = {0};
        struct idpf_tx_entry *txe, *txn;
        uint16_t nb_used, tx_id, sw_id;
        struct rte_mbuf *tx_pkt;
        uint16_t nb_to_clean;
        uint16_t nb_tx = 0;
+       uint64_t ol_flags;
+       uint16_t nb_ctx;
 
        if (unlikely(txq == NULL) || unlikely(!txq->q_started))
                return nb_tx;
@@ -1591,7 +1637,29 @@ idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
 
                if (txq->nb_free < tx_pkt->nb_segs)
                        break;
-               nb_used = tx_pkt->nb_segs;
+
+               ol_flags = tx_pkt->ol_flags;
+               tx_offload.l2_len = tx_pkt->l2_len;
+               tx_offload.l3_len = tx_pkt->l3_len;
+               tx_offload.l4_len = tx_pkt->l4_len;
+               tx_offload.tso_segsz = tx_pkt->tso_segsz;
+               /* Calculate the number of context descriptors needed. */
+               nb_ctx = idpf_calc_context_desc(ol_flags);
+               nb_used = tx_pkt->nb_segs + nb_ctx;
+
+               /* context descriptor */
+               if (nb_ctx != 0) {
+                       volatile union idpf_flex_tx_ctx_desc *ctx_desc =
+                       (volatile union idpf_flex_tx_ctx_desc *)&txr[tx_id];
+
+                       if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0)
+                               idpf_set_splitq_tso_ctx(tx_pkt, tx_offload,
+                                                       ctx_desc);
+
+                       tx_id++;
+                       if (tx_id == txq->nb_tx_desc)
+                               tx_id = 0;
+               }
 
                do {
                        txd = &txr[tx_id];
@@ -1842,14 +1910,17 @@ idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
 {
        volatile struct idpf_flex_tx_desc *txd;
        volatile struct idpf_flex_tx_desc *txr;
+       union idpf_tx_offload tx_offload = {0};
        struct idpf_tx_entry *txe, *txn;
        struct idpf_tx_entry *sw_ring;
        struct idpf_tx_queue *txq;
        struct rte_mbuf *tx_pkt;
        struct rte_mbuf *m_seg;
        uint64_t buf_dma_addr;
+       uint64_t ol_flags;
        uint16_t tx_last;
        uint16_t nb_used;
+       uint16_t nb_ctx;
        uint16_t td_cmd;
        uint16_t tx_id;
        uint16_t nb_tx;
@@ -1876,11 +1947,19 @@ idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
                tx_pkt = *tx_pkts++;
                RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
+               ol_flags = tx_pkt->ol_flags;
+               tx_offload.l2_len = tx_pkt->l2_len;
+               tx_offload.l3_len = tx_pkt->l3_len;
+               tx_offload.l4_len = tx_pkt->l4_len;
+               tx_offload.tso_segsz = tx_pkt->tso_segsz;
+               /* Calculate the number of context descriptors needed. */
+               nb_ctx = idpf_calc_context_desc(ol_flags);
+
                /* The number of descriptors that must be allocated for
                 * a packet equals to the number of the segments of that
                 * packet plus 1 context descriptor if needed.
                 */
-               nb_used = (uint16_t)(tx_pkt->nb_segs);
+               nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
                tx_last = (uint16_t)(tx_id + nb_used - 1);
 
                /* Circular ring */
@@ -1908,6 +1987,29 @@ idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
                        }
                }
 
+               if (nb_ctx != 0) {
+                       /* Setup TX context descriptor if required */
+                       volatile union idpf_flex_tx_ctx_desc *ctx_txd =
+                               (volatile union idpf_flex_tx_ctx_desc *)
+                                                       &txr[tx_id];
+
+                       txn = &sw_ring[txe->next_id];
+                       RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+                       if (txe->mbuf != NULL) {
+                               rte_pktmbuf_free_seg(txe->mbuf);
+                               txe->mbuf = NULL;
+                       }
+
+                       /* TSO enabled */
+                       if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0)
+                               idpf_set_splitq_tso_ctx(tx_pkt, tx_offload,
+                                                       ctx_txd);
+
+                       txe->last_id = tx_last;
+                       tx_id = txe->next_id;
+                       txe = txn;
+               }
+
                m_seg = tx_pkt;
                do {
                        txd = &txr[tx_id];
@@ -1968,16 +2070,44 @@ idpf_prep_pkts(__rte_unused void *tx_queue, struct 
rte_mbuf **tx_pkts,
               uint16_t nb_pkts)
 {
        int i, ret;
+       uint64_t ol_flags;
        struct rte_mbuf *m;
 
        for (i = 0; i < nb_pkts; i++) {
                m = tx_pkts[i];
+               ol_flags = m->ol_flags;
+
+               /* Check condition for nb_segs > IDPF_TX_MAX_MTU_SEG. */
+               if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0) {
+                       if (m->nb_segs > IDPF_TX_MAX_MTU_SEG) {
+                               rte_errno = EINVAL;
+                               return i;
+                       }
+               } else if ((m->tso_segsz < IDPF_MIN_TSO_MSS) ||
+                          (m->tso_segsz > IDPF_MAX_TSO_MSS) ||
+                          (m->pkt_len > IDPF_MAX_TSO_FRAME_SIZE)) {
+                       /* MSS outside the range are considered malicious */
+                       rte_errno = EINVAL;
+                       return i;
+               }
+
+               if ((ol_flags & IDPF_TX_OFFLOAD_NOTSUP_MASK) != 0) {
+                       rte_errno = ENOTSUP;
+                       return i;
+               }
 
                if (m->pkt_len < IDPF_MIN_FRAME_SIZE) {
                        rte_errno = EINVAL;
                        return i;
                }
 
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+               ret = rte_validate_tx_offload(m);
+               if (ret != 0) {
+                       rte_errno = -ret;
+                       return i;
+               }
+#endif
                ret = rte_net_intel_cksum_prepare(m);
                if (ret != 0) {
                        rte_errno = -ret;
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index eb0b230d3a..efb2734d85 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -21,6 +21,16 @@
 #define IDPF_DEFAULT_TX_RS_THRESH      32
 #define IDPF_DEFAULT_TX_FREE_THRESH    32
 
+#define IDPF_MIN_TSO_MSS       88
+#define IDPF_MAX_TSO_MSS       9728
+#define IDPF_MAX_TSO_FRAME_SIZE        262143
+#define IDPF_TX_MAX_MTU_SEG     10
+
+#define IDPF_TX_OFFLOAD_MASK RTE_MBUF_F_TX_TCP_SEG
+
+#define IDPF_TX_OFFLOAD_NOTSUP_MASK \
+               (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IDPF_TX_OFFLOAD_MASK)
+
 #define IDPF_GET_PTYPE_SIZE(p) \
        (sizeof(struct virtchnl2_ptype) + \
        (((p)->proto_id_count ? ((p)->proto_id_count - 1) : 0) * 
sizeof((p)->proto_id[0])))
@@ -115,6 +125,18 @@ struct idpf_tx_queue {
        struct idpf_tx_queue *complq;
 };
 
+/* Offload features */
+union idpf_tx_offload {
+       uint64_t data;
+       struct {
+               uint64_t l2_len:7; /* L2 (MAC) Header Length. */
+               uint64_t l3_len:9; /* L3 (IP) Header Length. */
+               uint64_t l4_len:8; /* L4 Header Length. */
+               uint64_t tso_segsz:16; /* TCP TSO segment size */
+               /* uint64_t unused : 24; */
+       };
+};
+
 struct idpf_rxq_ops {
        void (*release_mbufs)(struct idpf_rx_queue *rxq);
 };
-- 
2.34.1

Reply via email to