If requesting an inner (L3/L4 checksum or L4 segmentation) offload,
when the hardware does not support recomputing outer UDP checksum,
automatically disable it in the common helper.

Signed-off-by: David Marchand <david.march...@redhat.com>
---
Changes since v2:
- fixed GRE tunneling,
- dropped documentation update,

---
 app/test-pmd/csumonly.c      | 10 ++------
 drivers/net/hns3/hns3_rxtx.c | 44 ------------------------------------
 lib/net/rte_net.h            | 22 +++++++++++++-----
 3 files changed, 18 insertions(+), 58 deletions(-)

diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 71add6ca47..2246c22e8e 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -612,19 +612,13 @@ process_outer_cksums(void *outer_l3_hdr, struct 
testpmd_offload_info *info,
                return ol_flags;
        }
 
-       /* outer UDP checksum is done in software. In the other side, for
-        * UDP tunneling, like VXLAN or Geneve, outer UDP checksum can be
-        * set to zero.
+       /* Outer UDP checksum is done in software.
         *
         * If a packet will be TSOed into small packets by NIC, we cannot
         * set/calculate a non-zero checksum, because it will be a wrong
         * value after the packet be split into several small packets.
         */
-       if (tso_enabled)
-               udp_hdr->dgram_cksum = 0;
-
-       /* do not recalculate udp cksum if it was 0 */
-       if (udp_hdr->dgram_cksum != 0) {
+       if (!tso_enabled && udp_hdr->dgram_cksum != 0) {
                udp_hdr->dgram_cksum = 0;
                udp_hdr->dgram_cksum = get_udptcp_checksum(m, outer_l3_hdr,
                                        info->outer_l2_len + info->outer_l3_len,
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 03fc919fd7..b5436c51e7 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -3616,47 +3616,6 @@ hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, 
uint32_t bd_num,
        return false;
 }
 
-static void
-hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
-{
-       uint64_t ol_flags = m->ol_flags;
-       uint32_t paylen, hdr_len, l4_proto;
-       struct rte_udp_hdr *udp_hdr;
-
-       if (!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)) 
&&
-                       ((ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) ||
-                       !(ol_flags & RTE_MBUF_F_TX_TCP_SEG)))
-               return;
-
-       if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
-               struct rte_ipv4_hdr *ipv4_hdr;
-
-               ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
-                       m->outer_l2_len);
-               l4_proto = ipv4_hdr->next_proto_id;
-       } else {
-               struct rte_ipv6_hdr *ipv6_hdr;
-
-               ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
-                                          m->outer_l2_len);
-               l4_proto = ipv6_hdr->proto;
-       }
-
-       if (l4_proto != IPPROTO_UDP)
-               return;
-
-       /* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */
-       hdr_len = m->l2_len + m->l3_len + m->l4_len;
-       hdr_len += m->outer_l2_len + m->outer_l3_len;
-       paylen = m->pkt_len - hdr_len;
-       if (paylen <= m->tso_segsz)
-               return;
-       udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
-                                         m->outer_l2_len +
-                                         m->outer_l3_len);
-       udp_hdr->dgram_cksum = 0;
-}
-
 static int
 hns3_check_tso_pkt_valid(struct rte_mbuf *m)
 {
@@ -3834,7 +3793,6 @@ hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct 
rte_mbuf *m)
                         * checksum of packets that need TSO, so network driver
                         * software not need to recalculate it.
                         */
-                       hns3_outer_header_cksum_prepare(m);
                        return 0;
                }
        }
@@ -3848,8 +3806,6 @@ hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct 
rte_mbuf *m)
        if (!hns3_validate_tunnel_cksum(tx_queue, m))
                return 0;
 
-       hns3_outer_header_cksum_prepare(m);
-
        return 0;
 }
 
diff --git a/lib/net/rte_net.h b/lib/net/rte_net.h
index efd9d5f5ee..cdc6cf956d 100644
--- a/lib/net/rte_net.h
+++ b/lib/net/rte_net.h
@@ -108,6 +108,10 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
 static inline int
 rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags)
 {
+       const uint64_t inner_requests = RTE_MBUF_F_TX_IP_CKSUM | 
RTE_MBUF_F_TX_L4_MASK |
+               RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG;
+       const uint64_t outer_requests = RTE_MBUF_F_TX_OUTER_IP_CKSUM |
+               RTE_MBUF_F_TX_OUTER_UDP_CKSUM;
        /* Initialise ipv4_hdr to avoid false positive compiler warnings. */
        struct rte_ipv4_hdr *ipv4_hdr = NULL;
        struct rte_ipv6_hdr *ipv6_hdr;
@@ -120,9 +124,7 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, 
uint64_t ol_flags)
         * Mainly it is required to avoid fragmented headers check if
         * no offloads are requested.
         */
-       if (!(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK | 
RTE_MBUF_F_TX_TCP_SEG |
-                                       RTE_MBUF_F_TX_UDP_SEG | 
RTE_MBUF_F_TX_OUTER_IP_CKSUM |
-                                       RTE_MBUF_F_TX_OUTER_UDP_CKSUM)))
+       if (!(ol_flags & (inner_requests | outer_requests)))
                return 0;
 
        if (ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)) {
@@ -136,19 +138,27 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, 
uint64_t ol_flags)
                                        struct rte_ipv4_hdr *, m->outer_l2_len);
                        ipv4_hdr->hdr_checksum = 0;
                }
-               if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {
+               if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM || ol_flags & 
inner_requests) {
                        if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
                                ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct 
rte_ipv4_hdr *,
                                        m->outer_l2_len);
                                udp_hdr = (struct rte_udp_hdr *)((char 
*)ipv4_hdr +
                                        m->outer_l3_len);
-                               udp_hdr->dgram_cksum = 
rte_ipv4_phdr_cksum(ipv4_hdr, m->ol_flags);
+                               if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)
+                                       udp_hdr->dgram_cksum = 
rte_ipv4_phdr_cksum(ipv4_hdr,
+                                               m->ol_flags);
+                               else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
+                                       udp_hdr->dgram_cksum = 0;
                        } else {
                                ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct 
rte_ipv6_hdr *,
                                        m->outer_l2_len);
                                udp_hdr = rte_pktmbuf_mtod_offset(m, struct 
rte_udp_hdr *,
                                         m->outer_l2_len + m->outer_l3_len);
-                               udp_hdr->dgram_cksum = 
rte_ipv6_phdr_cksum(ipv6_hdr, m->ol_flags);
+                               if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)
+                                       udp_hdr->dgram_cksum = 
rte_ipv6_phdr_cksum(ipv6_hdr,
+                                               m->ol_flags);
+                               else if (ipv6_hdr->proto == IPPROTO_UDP)
+                                       udp_hdr->dgram_cksum = 0;
                        }
                }
        }
-- 
2.44.0

Reply via email to