Add test cases in testpmd to test VxLAN Tx Checksum offload, which include
 - IPv4 tunnel and IPv6 tunnel s 
 - outer L3, inner L3 and L4 checksum offload for Tx side.

Signed-off-by: jijiangl <jijiang.liu at intel.com>
Acked-by: Helin Zhang <helin.zhang at intel.com>
Acked-by: Jingjing Wu <jingjing.wu at intel.com>
Acked-by: Jing Chen <jing.d.chen at intel.com>

---
 app/test-pmd/config.c   |    6 +-
 app/test-pmd/csumonly.c |  194 +++++++++++++++++++++++++++++++++++++++++++----
 2 files changed, 183 insertions(+), 17 deletions(-)

diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 606e34a..fe77b89 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -1719,9 +1719,9 @@ tx_cksum_set(portid_t port_id, uint8_t cksum_mask)
        uint16_t tx_ol_flags;
        if (port_id_is_invalid(port_id))
                return;
-       /* Clear last 4 bits and then set L3/4 checksum mask again */
-       tx_ol_flags = (uint16_t) (ports[port_id].tx_ol_flags & 0xFFF0);
-       ports[port_id].tx_ol_flags = (uint16_t) ((cksum_mask & 0xf) | 
tx_ol_flags);
+       /* Clear last 8 bits and then set L3/4 checksum mask again */
+       tx_ol_flags = (uint16_t) (ports[port_id].tx_ol_flags & 0xFF00);
+       ports[port_id].tx_ol_flags = (uint16_t) ((cksum_mask & 0xff) | 
tx_ol_flags);
 }

 void
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index e5a1f52..cb2a483 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -196,7 +196,6 @@ get_ipv6_udptcp_checksum(struct ipv6_hdr *ipv6_hdr, 
uint16_t *l4_hdr)
        return (uint16_t)cksum;
 }

-
 /*
  * Forwarding of packets. Change the checksum field with HW or SW methods
  * The HW/SW method selection depends on the ol_flags on every packet
@@ -209,10 +208,16 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
        struct rte_mbuf  *mb;
        struct ether_hdr *eth_hdr;
        struct ipv4_hdr  *ipv4_hdr;
+       struct ether_hdr *inner_eth_hdr;
+       struct ipv4_hdr  *inner_ipv4_hdr = NULL;
        struct ipv6_hdr  *ipv6_hdr;
+       struct ipv6_hdr  *inner_ipv6_hdr = NULL;
        struct udp_hdr   *udp_hdr;
+       struct udp_hdr   *inner_udp_hdr;
        struct tcp_hdr   *tcp_hdr;
+       struct tcp_hdr   *inner_tcp_hdr;
        struct sctp_hdr  *sctp_hdr;
+       struct sctp_hdr  *inner_sctp_hdr;

        uint16_t nb_rx;
        uint16_t nb_tx;
@@ -221,12 +226,18 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
        uint16_t pkt_ol_flags;
        uint16_t tx_ol_flags;
        uint16_t l4_proto;
+       uint16_t inner_l4_proto = 0;
        uint16_t eth_type;
        uint8_t  l2_len;
        uint8_t  l3_len;
+       uint8_t  inner_l2_len;
+       uint8_t  inner_l3_len = 0;

        uint32_t rx_bad_ip_csum;
        uint32_t rx_bad_l4_csum;
+       uint8_t  ipv4_tunnel;
+       uint8_t  ipv6_tunnel;
+       uint16_t ptype;

 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
        uint64_t start_tsc;
@@ -261,8 +272,12 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
                mb = pkts_burst[i];
                l2_len  = sizeof(struct ether_hdr);
                pkt_ol_flags = mb->ol_flags;
+               ptype = mb->reserved;
                ol_flags = (uint16_t) (pkt_ol_flags & (~PKT_TX_L4_MASK));

+               ipv4_tunnel = IS_ETH_IPV4_TUNNEL(ptype);
+               ipv6_tunnel = IS_ETH_IPV6_TUNNEL(ptype);
+
                eth_hdr = (struct ether_hdr *) mb->pkt.data;
                eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
                if (eth_type == ETHER_TYPE_VLAN) {
@@ -295,7 +310,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
                 *      + ipv4 or ipv6
                 *      + udp or tcp or sctp or others
                 */
-               if (pkt_ol_flags & PKT_RX_IPV4_HDR) {
+               if (pkt_ol_flags & (PKT_RX_IPV4_HDR | PKT_RX_IPV4_HDR_EXT)) {

                        /* Do not support ipv4 option field */
                        l3_len = sizeof(struct ipv4_hdr) ;
@@ -325,17 +340,92 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
                                if (tx_ol_flags & 0x2) {
                                        /* HW Offload */
                                        ol_flags |= PKT_TX_UDP_CKSUM;
-                                       /* Pseudo header sum need be set 
properly */
-                                       udp_hdr->dgram_cksum = 
get_ipv4_psd_sum(ipv4_hdr);
+                                       if (ipv4_tunnel)
+                                               udp_hdr->dgram_cksum = 0;
+                                       else
+                                               /* Pseudo header sum need be 
set properly */
+                                               udp_hdr->dgram_cksum =
+                                                       
get_ipv4_psd_sum(ipv4_hdr);
                                }
                                else {
                                        /* SW Implementation, clear checksum 
field first */
                                        udp_hdr->dgram_cksum = 0;
                                        udp_hdr->dgram_cksum = 
get_ipv4_udptcp_checksum(ipv4_hdr,
-                                                       (uint16_t*)udp_hdr);
+                                                                       
(uint16_t *)udp_hdr);
                                }
-                       }
-                       else if (l4_proto == IPPROTO_TCP){
+
+                               if (ipv4_tunnel) {
+
+                                       uint16_t len;
+
+                                       /* Check if inner L3/L4 checkum flag is 
set */
+                                       if (tx_ol_flags & 0xF0)
+                                               ol_flags |= PKT_TX_VXLAN_CKSUM;
+
+                                       inner_l2_len  = sizeof(struct 
ether_hdr);
+                                       inner_eth_hdr = (struct ether_hdr *) 
(rte_pktmbuf_mtod(mb,
+                                                               unsigned char 
*) + l2_len + l3_len
+                                                                + 
ETHER_VXLAN_HLEN);
+
+                                       eth_type = 
rte_be_to_cpu_16(inner_eth_hdr->ether_type);
+                                       if (eth_type == ETHER_TYPE_VLAN) {
+                                               ol_flags |= PKT_TX_IVLAN_PKT;
+                                               inner_l2_len += sizeof(struct 
vlan_hdr);
+                                               eth_type = 
rte_be_to_cpu_16(*(uint16_t *)
+                                                       
((uintptr_t)&eth_hdr->ether_type +
+                                                               sizeof(struct 
vlan_hdr)));
+                                       }
+
+                                       len = l2_len + l3_len + 
ETHER_VXLAN_HLEN + inner_l2_len;
+                                       if (eth_type == ETHER_TYPE_IPv4) {
+                                               inner_l3_len = sizeof(struct 
ipv4_hdr);
+                                               inner_ipv4_hdr = (struct 
ipv4_hdr *) (rte_pktmbuf_mtod(mb,
+                                                               unsigned char 
*) + len);
+                                               inner_l4_proto = 
inner_ipv4_hdr->next_proto_id;
+
+                                               if (tx_ol_flags & 0x10)
+                                                       /* Do not delete, this 
is required by HW*/
+                                                       
inner_ipv4_hdr->hdr_checksum = 0;
+                                                       ol_flags |= 
PKT_TX_IPV4_CSUM;
+
+                                       } else if (eth_type == ETHER_TYPE_IPv6) 
{
+                                               inner_l3_len = sizeof(struct 
ipv6_hdr);
+                                               inner_ipv6_hdr = (struct 
ipv6_hdr *) (rte_pktmbuf_mtod(mb,
+                                                               unsigned char 
*) + len);
+                                               inner_l4_proto = 
inner_ipv6_hdr->proto;
+                                       }
+                                       if ((inner_l4_proto == IPPROTO_UDP) && 
(tx_ol_flags & 0x20)) {
+
+                                               /* HW Offload */
+                                               ol_flags |= PKT_TX_UDP_CKSUM;
+                                               inner_udp_hdr = (struct udp_hdr 
*) (rte_pktmbuf_mtod(mb,
+                                                               unsigned char 
*) + len + inner_l3_len);
+                                               if (eth_type == ETHER_TYPE_IPv4)
+                                                       
inner_udp_hdr->dgram_cksum = get_ipv4_psd_sum(inner_ipv4_hdr);
+                                               else if (eth_type == 
ETHER_TYPE_IPv6)
+                                                       
inner_udp_hdr->dgram_cksum = get_ipv6_psd_sum(inner_ipv6_hdr);
+
+                                       } else if ((inner_l4_proto == 
IPPROTO_TCP) && (tx_ol_flags & 0x40)) {
+                                               /* HW Offload */
+                                               ol_flags |= PKT_TX_TCP_CKSUM;
+                                               inner_tcp_hdr = (struct tcp_hdr 
*) (rte_pktmbuf_mtod(mb,
+                                                               unsigned char 
*) + len + inner_l3_len);
+                                               if (eth_type == ETHER_TYPE_IPv4)
+                                                       inner_tcp_hdr->cksum = 
get_ipv4_psd_sum(inner_ipv4_hdr);
+                                               else if (eth_type == 
ETHER_TYPE_IPv6)
+                                                       inner_tcp_hdr->cksum = 
get_ipv6_psd_sum(inner_ipv6_hdr);
+                                       } else if ((inner_l4_proto == 
IPPROTO_SCTP) && (tx_ol_flags & 0x80)) {
+                                               /* HW Offload */
+                                               ol_flags |= PKT_TX_SCTP_CKSUM;
+                                               inner_sctp_hdr = (struct 
sctp_hdr *) (rte_pktmbuf_mtod(mb,
+                                                               unsigned char 
*) + len + inner_l3_len);
+                                               inner_sctp_hdr->cksum = 0;
+                                       }
+
+                                       mb->reserved = inner_l3_len;
+                               }
+
+                       } else if (l4_proto == IPPROTO_TCP) {
                                tcp_hdr = (struct tcp_hdr*) 
(rte_pktmbuf_mtod(mb,
                                                unsigned char *) + l2_len + 
l3_len);
                                if (tx_ol_flags & 0x4) {
@@ -347,8 +437,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
                                        tcp_hdr->cksum = 
get_ipv4_udptcp_checksum(ipv4_hdr,
                                                        (uint16_t*)tcp_hdr);
                                }
-                       }
-                       else if (l4_proto == IPPROTO_SCTP) {
+                       } else if (l4_proto == IPPROTO_SCTP) {
                                sctp_hdr = (struct sctp_hdr*) 
(rte_pktmbuf_mtod(mb,
                                                unsigned char *) + l2_len + 
l3_len);

@@ -367,9 +456,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
                                }
                        }
                        /* End of L4 Handling*/
-               }
-               else if (pkt_ol_flags & PKT_RX_IPV6_HDR) {
-
+               } else if (pkt_ol_flags & (PKT_RX_IPV6_HDR | 
PKT_RX_IPV6_HDR_EXT)) {
                        ipv6_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(mb,
                                        unsigned char *) + l2_len);
                        l3_len = sizeof(struct ipv6_hdr) ;
@@ -382,15 +469,94 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
                                if (tx_ol_flags & 0x2) {
                                        /* HW Offload */
                                        ol_flags |= PKT_TX_UDP_CKSUM;
-                                       udp_hdr->dgram_cksum = 
get_ipv6_psd_sum(ipv6_hdr);
+                                       if (ipv6_tunnel)
+                                               udp_hdr->dgram_cksum = 0;
+                                       else
+                                               udp_hdr->dgram_cksum =
+                                                       
get_ipv6_psd_sum(ipv6_hdr);
                                }
                                else {
                                        /* SW Implementation */
                                        /* checksum field need be clear first */
                                        udp_hdr->dgram_cksum = 0;
                                        udp_hdr->dgram_cksum = 
get_ipv6_udptcp_checksum(ipv6_hdr,
-                                                       (uint16_t*)udp_hdr);
+                                                               (uint16_t 
*)udp_hdr);
                                }
+
+                               if (ipv6_tunnel) {
+
+                                       uint16_t len;
+
+                                       /* Check if inner L3/L4 checksum flag 
is set */
+                                       if (tx_ol_flags & 0xF0)
+                                               ol_flags |= PKT_TX_VXLAN_CKSUM;
+
+                                       inner_l2_len  = sizeof(struct 
ether_hdr);
+                                       inner_eth_hdr = (struct ether_hdr *) 
(rte_pktmbuf_mtod(mb,
+                                               unsigned char *) + l2_len + 
l3_len + ETHER_VXLAN_HLEN);
+                                       eth_type = 
rte_be_to_cpu_16(inner_eth_hdr->ether_type);
+
+                                       if (eth_type == ETHER_TYPE_VLAN) {
+                                               ol_flags |= PKT_TX_IVLAN_PKT;
+                                               inner_l2_len += sizeof(struct 
vlan_hdr);
+                                               eth_type = 
rte_be_to_cpu_16(*(uint16_t *)
+                                                       
((uintptr_t)&eth_hdr->ether_type +
+                                                       sizeof(struct 
vlan_hdr)));
+                                       }
+
+                                       len = l2_len + l3_len + 
ETHER_VXLAN_HLEN + inner_l2_len;
+
+                                       if (eth_type == ETHER_TYPE_IPv4) {
+                                               inner_l3_len = sizeof(struct 
ipv4_hdr);
+                                               inner_ipv4_hdr = (struct 
ipv4_hdr *) (rte_pktmbuf_mtod(mb,
+                                                               unsigned char 
*) + len);
+                                               inner_l4_proto = 
inner_ipv4_hdr->next_proto_id;
+
+                                               /* HW offload */
+                                               if (tx_ol_flags & 0x10)
+                                                       /* Do not delete, this 
is required by HW*/
+                                                       
inner_ipv4_hdr->hdr_checksum = 0;
+                                                       ol_flags |= 
PKT_TX_IPV4_CSUM;
+                                       } else if (eth_type == ETHER_TYPE_IPv6) 
{
+                                               inner_l3_len = sizeof(struct 
ipv6_hdr);
+                                               inner_ipv6_hdr = (struct 
ipv6_hdr *) (rte_pktmbuf_mtod(mb,
+                                                       unsigned char *) + len);
+                                               inner_l4_proto = 
inner_ipv6_hdr->proto;
+                                       }
+
+                                       if ((inner_l4_proto == IPPROTO_UDP) && 
(tx_ol_flags & 0x20)) {
+                                               inner_udp_hdr = (struct udp_hdr 
*) (rte_pktmbuf_mtod(mb,
+                                                       unsigned char *) + len 
+ inner_l3_len);
+                                               /* HW offload */
+                                               ol_flags |= PKT_TX_UDP_CKSUM;
+                                               inner_udp_hdr->dgram_cksum = 0;
+                                               if (eth_type == ETHER_TYPE_IPv4)
+                                                       
inner_udp_hdr->dgram_cksum = get_ipv4_psd_sum(inner_ipv4_hdr);
+                                               else if (eth_type == 
ETHER_TYPE_IPv6)
+                                                       
inner_udp_hdr->dgram_cksum = get_ipv6_psd_sum(inner_ipv6_hdr);
+                                       } else if ((inner_l4_proto == 
IPPROTO_TCP) && (tx_ol_flags & 0x40)) {
+                                               /* HW offload */
+                                               ol_flags |= PKT_TX_TCP_CKSUM;
+                                               inner_tcp_hdr = (struct tcp_hdr 
*) (rte_pktmbuf_mtod(mb,
+                                                               unsigned char 
*) + len + inner_l3_len);
+
+                                               if (eth_type == ETHER_TYPE_IPv4)
+                                                       inner_tcp_hdr->cksum = 
get_ipv4_psd_sum(inner_ipv4_hdr);
+                                               else if (eth_type == 
ETHER_TYPE_IPv6)
+                                                       inner_tcp_hdr->cksum = 
get_ipv6_psd_sum(inner_ipv6_hdr);
+
+                                       } else if ((inner_l4_proto == 
IPPROTO_SCTP) && (tx_ol_flags & 0x80)) {
+                                               /* HW offload */
+                                               ol_flags |= PKT_TX_SCTP_CKSUM;
+                                               inner_sctp_hdr = (struct 
sctp_hdr *) (rte_pktmbuf_mtod(mb,
+                                                               unsigned char 
*) + len + inner_l3_len);
+                                               inner_sctp_hdr->cksum = 0;
+                                       }
+
+                                       /* pass the inner l3 length to driver */
+                                       mb->reserved = inner_l3_len;
+                               }
+
                        }
                        else if (l4_proto == IPPROTO_TCP) {
                                tcp_hdr = (struct tcp_hdr*) 
(rte_pktmbuf_mtod(mb,
-- 
1.7.7.6

Reply via email to