The patch intends on adding support for TSO in DQO RDA format.

Signed-off-by: Tathagat Priyadarshi <tathagat.d...@gmail.com>
Signed-off-by: Varun Lakkur Ambaji Rao <varun...@gmail.com>
---
 drivers/net/gve/gve_tx_dqo.c | 42 ++++++++++++++++++++++++++++++++++++------
 1 file changed, 36 insertions(+), 6 deletions(-)

diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c
index 579b8d6..e5cdb57 100644
--- a/drivers/net/gve/gve_tx_dqo.c
+++ b/drivers/net/gve/gve_tx_dqo.c
@@ -72,6 +72,17 @@
        txq->complq_tail = next;
 }
 
+static inline void
+gve_tx_fill_seg_desc_dqo(volatile union gve_tx_desc_dqo *desc, struct rte_mbuf 
*tx_pkt)
+{
+       uint32_t hlen = tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len;
+       desc->tso_ctx.cmd_dtype.dtype = GVE_TX_TSO_CTX_DESC_DTYPE_DQO;
+       desc->tso_ctx.cmd_dtype.tso = 1;
+       desc->tso_ctx.mss = (uint16_t)tx_pkt->tso_segsz;
+       desc->tso_ctx.tso_total_len = tx_pkt->pkt_len - hlen;
+       desc->tso_ctx.header_len = (uint8_t)hlen;
+}
+
 uint16_t
 gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
@@ -89,6 +100,8 @@
        uint16_t sw_id;
        uint64_t bytes;
        uint16_t first_sw_id;
+       uint8_t tso;
+       uint8_t csum;
 
        sw_ring = txq->sw_ring;
        txr = txq->tx_ring;
@@ -108,12 +121,31 @@
                                gve_tx_clean_dqo(txq);
                }
 
-               if (txq->nb_free < tx_pkt->nb_segs)
-                       break;
-
                ol_flags = tx_pkt->ol_flags;
                nb_used = tx_pkt->nb_segs;
                first_sw_id = sw_id;
+
+               if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+                       tso = 1;
+                       csum = 1;
+               } else if (ol_flags & GVE_TX_CKSUM_OFFLOAD_MASK_DQO) {
+                       tso = 0;
+                       csum = 1;
+               } else {
+                       tso = 0;
+                       csum = 0;
+               }
+
+               nb_used += tso;
+               if (txq->nb_free < nb_used)
+                       break;
+
+               if (tso) {
+                       txd = &txr[tx_id];
+                       gve_tx_fill_seg_desc_dqo(txd, tx_pkt);
+                       tx_id = (tx_id + 1) & mask;
+               }
+               
                do {
                        if (sw_ring[sw_id] != NULL)
                                PMD_DRV_LOG(DEBUG, "Overwriting an entry in 
sw_ring");
@@ -127,6 +159,7 @@
                        txd->pkt.compl_tag = rte_cpu_to_le_16(first_sw_id);
                        txd->pkt.buf_size = RTE_MIN(tx_pkt->data_len, 
GVE_TX_MAX_BUF_SIZE_DQO);
                        txd->pkt.end_of_packet = 0;
+                       txd->pkt.checksum_offload_enable = csum;
 
                        /* size of desc_ring and sw_ring could be different */
                        tx_id = (tx_id + 1) & mask;
@@ -139,9 +172,6 @@
                /* fill the last descriptor with End of Packet (EOP) bit */
                txd->pkt.end_of_packet = 1;
 
-               if (ol_flags & GVE_TX_CKSUM_OFFLOAD_MASK_DQO)
-                       txd->pkt.checksum_offload_enable = 1;
-
                txq->nb_free -= nb_used;
                txq->nb_used += nb_used;
        }
-- 
1.8.3.1

Reply via email to