Enable Tx checksum offload once any flag of L4 checksum is set. Signed-off-by: Junfeng Guo <junfeng....@intel.com> Signed-off-by: Rushil Gupta <rush...@google.com> Signed-off-by: Jordan Kimbrough <jr...@google.com> Signed-off-by: Jeroen de Borst <jeroe...@google.com> --- drivers/net/gve/gve_ethdev.h | 4 ++++ drivers/net/gve/gve_tx_dqo.c | 5 +++++ 2 files changed, 9 insertions(+)
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h index a8e0dd5f3d..bca6e86ef0 100644 --- a/drivers/net/gve/gve_ethdev.h +++ b/drivers/net/gve/gve_ethdev.h @@ -38,6 +38,10 @@ #define GVE_MAX_MTU RTE_ETHER_MTU #define GVE_MIN_MTU RTE_ETHER_MIN_MTU +#define GVE_TX_CKSUM_OFFLOAD_MASK ( \ + RTE_MBUF_F_TX_L4_MASK | \ + RTE_MBUF_F_TX_TCP_SEG) + /* A list of pages registered with the device during setup and used by a queue * as buffers */ diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c index 450cf71a6b..e925d6c3d0 100644 --- a/drivers/net/gve/gve_tx_dqo.c +++ b/drivers/net/gve/gve_tx_dqo.c @@ -77,6 +77,7 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) uint16_t mask, sw_mask; uint16_t nb_to_clean; uint16_t nb_tx = 0; + uint64_t ol_flags; uint16_t nb_used; uint16_t tx_id; uint16_t sw_id; @@ -103,6 +104,7 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) if (txq->nb_free < tx_pkt->nb_segs) break; + ol_flags = tx_pkt->ol_flags; nb_used = tx_pkt->nb_segs; do { @@ -127,6 +129,9 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* fill the last descriptor with End of Packet (EOP) bit */ txd->pkt.end_of_packet = 1; + if (ol_flags & GVE_TX_CKSUM_OFFLOAD_MASK) + txd->pkt.checksum_offload_enable = 1; + txq->nb_free -= nb_used; txq->nb_used += nb_used; } -- 2.34.1