From: Xiaoyun Wang <cloud.wangxiao...@huawei.com>

This patch supports inner L3 checksum offload.

Signed-off-by: Ziyang Xuan <xuanziya...@huawei.com>
---
 drivers/net/hinic/hinic_pmd_tx.c | 192 ++++++++++++++++++++++++---------------
 1 file changed, 121 insertions(+), 71 deletions(-)

diff --git a/drivers/net/hinic/hinic_pmd_tx.c b/drivers/net/hinic/hinic_pmd_tx.c
index 0ef7add..155ea61 100644
--- a/drivers/net/hinic/hinic_pmd_tx.c
+++ b/drivers/net/hinic/hinic_pmd_tx.c
@@ -20,6 +20,9 @@
 #include "hinic_pmd_tx.h"
 
 /* packet header and tx offload info */
+#define ETHER_LEN_NO_VLAN              14
+#define ETHER_LEN_WITH_VLAN            18
+#define HEADER_LEN_OFFSET              2
 #define VXLANLEN                       8
 #define MAX_PLD_OFFSET                 221
 #define MAX_SINGLE_SGE_SIZE            65536
@@ -34,6 +37,9 @@
 #define HINIC_TSO_PKT_MAX_SGE                  127     /* tso max sge 127 */
 #define HINIC_TSO_SEG_NUM_INVALID(num)         ((num) > HINIC_TSO_PKT_MAX_SGE)
 
+#define HINIC_TX_OUTER_CHECKSUM_FLAG_SET       1
+#define HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET    0
+
 /* sizeof(struct hinic_sq_bufdesc) == 16, shift 4 */
 #define HINIC_BUF_DESC_SIZE(nr_descs)  (SIZE_8BYTES(((u32)nr_descs) << 4))
 
@@ -476,16 +482,16 @@ static inline bool hinic_is_tso_sge_valid(struct rte_mbuf 
*mbuf,
 hinic_set_l4_csum_info(struct hinic_sq_task *task,
                u32 *queue_info, struct hinic_tx_offload_info *poff_info)
 {
-       u32 tcp_udp_cs, sctp;
+       u32 tcp_udp_cs, sctp = 0;
        u16 l2hdr_len;
 
-       sctp = 0;
        if (unlikely(poff_info->inner_l4_type == SCTP_OFFLOAD_ENABLE))
                sctp = 1;
 
        tcp_udp_cs = poff_info->inner_l4_tcp_udp;
 
-       if (poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) {
+       if (poff_info->tunnel_type == TUNNEL_UDP_CSUM ||
+           poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) {
                l2hdr_len =  poff_info->outer_l2_len;
 
                task->pkt_info2 |=
@@ -665,50 +671,6 @@ static inline void hinic_xmit_mbuf_cleanup(struct 
hinic_txq *txq)
        return (struct hinic_sq_wqe *)WQ_WQE_ADDR(wq, cur_pi);
 }
 
-static inline int
-hinic_validate_tx_offload(const struct rte_mbuf *m)
-{
-       uint64_t ol_flags = m->ol_flags;
-       uint64_t inner_l3_offset = m->l2_len;
-
-       /* just support vxlan offload */
-       if ((ol_flags & PKT_TX_TUNNEL_MASK) &&
-           !(ol_flags & PKT_TX_TUNNEL_VXLAN))
-               return -ENOTSUP;
-
-       if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
-               inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
-
-       /* Headers are fragmented */
-       if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len)
-               return -ENOTSUP;
-
-       /* IP checksum can be counted only for IPv4 packet */
-       if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
-               return -EINVAL;
-
-       /* IP type not set when required */
-       if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) {
-               if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
-                       return -EINVAL;
-       }
-
-       /* Check requirements for TSO packet */
-       if (ol_flags & PKT_TX_TCP_SEG) {
-               if (m->tso_segsz == 0 ||
-                       ((ol_flags & PKT_TX_IPV4) &&
-                       !(ol_flags & PKT_TX_IP_CKSUM)))
-                       return -EINVAL;
-       }
-
-       /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
-       if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
-               !(ol_flags & PKT_TX_OUTER_IPV4))
-               return -EINVAL;
-
-       return 0;
-}
-
 static inline uint16_t
 hinic_ipv4_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, uint64_t ol_flags)
 {
@@ -760,6 +722,67 @@ static inline void hinic_xmit_mbuf_cleanup(struct 
hinic_txq *txq)
        return __rte_raw_cksum_reduce(sum);
 }
 
+static inline void
+hinic_get_pld_offset(struct rte_mbuf *m, struct hinic_tx_offload_info 
*off_info,
+                    int outer_cs_flag)
+{
+       uint64_t ol_flags = m->ol_flags;
+
+       if (outer_cs_flag == 1) {
+               if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
+                       off_info->payload_offset = m->outer_l2_len +
+                               m->outer_l3_len + m->l2_len + m->l3_len;
+               } else if ((ol_flags & PKT_TX_TCP_CKSUM) ||
+                               (ol_flags & PKT_TX_TCP_SEG)) {
+                       off_info->payload_offset = m->outer_l2_len +
+                                       m->outer_l3_len + m->l2_len +
+                                       m->l3_len + m->l4_len;
+               }
+       } else {
+               if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
+                       off_info->payload_offset = m->l2_len + m->l3_len;
+               } else if ((ol_flags & PKT_TX_TCP_CKSUM) ||
+                       (ol_flags & PKT_TX_TCP_SEG)) {
+                       off_info->payload_offset = m->l2_len + m->l3_len +
+                                                  m->l4_len;
+               }
+       }
+}
+
+static inline void
+hinic_analyze_tx_info(struct rte_mbuf *mbuf,
+                     struct hinic_tx_offload_info *off_info)
+{
+       struct rte_ether_hdr *eth_hdr;
+       struct rte_vlan_hdr *vlan_hdr;
+       struct rte_ipv4_hdr *ip4h;
+       u16 ethertype, l3_proto;
+       u8 *hdr;
+
+       hdr = (u8 *)rte_pktmbuf_mtod(mbuf, u8*);
+       eth_hdr = (struct rte_ether_hdr *)hdr;
+       ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
+
+       if (ethertype == RTE_ETHER_TYPE_VLAN) {
+               off_info->outer_l2_len = ETHER_LEN_WITH_VLAN;
+               vlan_hdr = (struct rte_vlan_hdr *)(hdr + 1);
+               ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
+       } else {
+               off_info->outer_l2_len = ETHER_LEN_NO_VLAN;
+       }
+
+       l3_proto = ethertype;
+
+       if (l3_proto == RTE_ETHER_TYPE_IPV4) {
+               ip4h = (struct rte_ipv4_hdr *)(hdr + off_info->outer_l2_len);
+               off_info->outer_l3_len = (ip4h->version_ihl & 0xf) <<
+                                       HEADER_LEN_OFFSET;
+       } else if (l3_proto == RTE_ETHER_TYPE_IPV6) {
+               /* not support ipv6 extension header */
+               off_info->outer_l3_len = sizeof(struct rte_ipv6_hdr);
+       }
+}
+
 static inline int
 hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
                                struct hinic_tx_offload_info *off_info)
@@ -771,42 +794,66 @@ static inline void hinic_xmit_mbuf_cleanup(struct 
hinic_txq *txq)
        struct rte_ether_hdr *eth_hdr;
        struct rte_vlan_hdr *vlan_hdr;
        u16 eth_type = 0;
-       uint64_t inner_l3_offset = m->l2_len;
+       uint64_t inner_l3_offset;
        uint64_t ol_flags = m->ol_flags;
 
-       /* Does packet set any of available offloads */
+       /* Check packet set any of available offloads */
        if (!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK))
                return 0;
 
-       if (unlikely(hinic_validate_tx_offload(m)))
+       /* just support vxlan offload */
+       if ((ol_flags & PKT_TX_TUNNEL_MASK) &&
+           !(ol_flags & PKT_TX_TUNNEL_VXLAN))
+               return -ENOTSUP;
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+       if (rte_validate_tx_offload(m) != 0)
                return -EINVAL;
+#endif
 
-       if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
-                       (ol_flags & PKT_TX_OUTER_IPV6) ||
-                       (ol_flags & PKT_TX_TUNNEL_VXLAN)) {
-               inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
-               off_info->outer_l2_len = m->outer_l2_len;
-               off_info->outer_l3_len = m->outer_l3_len;
-               /* just support vxlan tunneling pkt */
-               off_info->inner_l2_len = m->l2_len - VXLANLEN -
-                                               sizeof(struct rte_udp_hdr);
-               off_info->inner_l3_len = m->l3_len;
-               off_info->inner_l4_len = m->l4_len;
-               off_info->tunnel_length = m->l2_len;
-               off_info->payload_offset = m->outer_l2_len +
-                               m->outer_l3_len + m->l2_len + m->l3_len;
-               off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+       if (ol_flags & PKT_TX_TUNNEL_VXLAN) {
+               if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
+                   (ol_flags & PKT_TX_OUTER_IPV6)) {
+                       inner_l3_offset = m->l2_len + m->outer_l2_len +
+                               m->outer_l3_len;
+                       off_info->outer_l2_len = m->outer_l2_len;
+                       off_info->outer_l3_len = m->outer_l3_len;
+                       /* just support vxlan tunneling pkt */
+                       off_info->inner_l2_len = m->l2_len - VXLANLEN -
+                               sizeof(*udp_hdr);
+                       off_info->inner_l3_len = m->l3_len;
+                       off_info->inner_l4_len = m->l4_len;
+                       off_info->tunnel_length = m->l2_len;
+                       off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+
+                       hinic_get_pld_offset(m, off_info,
+                                            HINIC_TX_OUTER_CHECKSUM_FLAG_SET);
+               } else {
+                       inner_l3_offset = m->l2_len;
+                       hinic_analyze_tx_info(m, off_info);
+                       /* just support vxlan tunneling pkt */
+                       off_info->inner_l2_len = m->l2_len - VXLANLEN -
+                               sizeof(*udp_hdr) - off_info->outer_l2_len -
+                               off_info->outer_l3_len;
+                       off_info->inner_l3_len = m->l3_len;
+                       off_info->inner_l4_len = m->l4_len;
+                       off_info->tunnel_length = m->l2_len -
+                               off_info->outer_l2_len - off_info->outer_l3_len;
+                       off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+
+                       hinic_get_pld_offset(m, off_info,
+                               HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET);
+               }
        } else {
+               inner_l3_offset = m->l2_len;
                off_info->inner_l2_len = m->l2_len;
                off_info->inner_l3_len = m->l3_len;
                off_info->inner_l4_len = m->l4_len;
                off_info->tunnel_type = NOT_TUNNEL;
-               off_info->payload_offset = m->l2_len + m->l3_len;
-       }
 
-       if (((ol_flags & PKT_TX_L4_MASK) != PKT_TX_SCTP_CKSUM) &&
-           ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_UDP_CKSUM))
-               off_info->payload_offset += m->l4_len;
+               hinic_get_pld_offset(m, off_info,
+                                    HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET);
+       }
 
        /* invalid udp or tcp header */
        if (unlikely(off_info->payload_offset > MAX_PLD_OFFSET))
@@ -855,6 +902,10 @@ static inline void hinic_xmit_mbuf_cleanup(struct 
hinic_txq *txq)
                        udp_hdr->dgram_cksum =
                                hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
                }
+       } else if (ol_flags & PKT_TX_OUTER_IPV4) {
+               off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+               off_info->inner_l4_tcp_udp = 1;
+               off_info->outer_l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
        }
 
        if (ol_flags & PKT_TX_IPV4)
@@ -892,7 +943,6 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq 
*txq)
 
                off_info->inner_l4_type = UDP_OFFLOAD_ENABLE;
                off_info->inner_l4_tcp_udp = 1;
-               off_info->inner_l4_len = sizeof(struct rte_udp_hdr);
        } else if (((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) ||
                        (ol_flags & PKT_TX_TCP_SEG)) {
                if (ol_flags & PKT_TX_IPV4) {
-- 
1.8.3.1

Reply via email to