Signed-off-by: Jesna K E <jesna....@amd.com> --- drivers/net/axgbe/axgbe_common.h | 11 + drivers/net/axgbe/axgbe_dev.c | 19 ++ drivers/net/axgbe/axgbe_ethdev.c | 1 + drivers/net/axgbe/axgbe_ethdev.h | 1 + drivers/net/axgbe/axgbe_rxtx.c | 305 +++++++++++++++---------- drivers/net/axgbe/axgbe_rxtx_vec_sse.c | 1 + 6 files changed, 223 insertions(+), 115 deletions(-)
diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h index a5d11c5832..1face6f361 100644 --- a/drivers/net/axgbe/axgbe_common.h +++ b/drivers/net/axgbe/axgbe_common.h @@ -162,6 +162,9 @@ #define DMA_CH_SR 0x60 /* DMA channel register entry bit positions and sizes */ +//TSO +#define DMA_CH_CR_MSS_INDEX 0 +#define DMA_CH_CR_MSS_WIDTH 14 #define DMA_CH_CR_PBLX8_INDEX 16 #define DMA_CH_CR_PBLX8_WIDTH 1 #define DMA_CH_CR_SPH_INDEX 24 @@ -1232,6 +1235,14 @@ #define TX_CONTEXT_DESC3_VT_INDEX 0 #define TX_CONTEXT_DESC3_VT_WIDTH 16 +//TSO +#define TX_NORMAL_DESC3_TPL_INDEX 0 +#define TX_NORMAL_DESC3_TPL_WIDTH 18 +#define TX_NORMAL_DESC3_THL_INDEX 19 +#define TX_NORMAL_DESC3_THL_WIDTH 4 +#define TX_CONTEXT_DESC3_OSTC_INDEX 27 +#define TX_CONTEXT_DESC3_OSTC_WIDTH 1 + #define TX_NORMAL_DESC2_HL_B1L_INDEX 0 #define TX_NORMAL_DESC2_HL_B1L_WIDTH 14 #define TX_NORMAL_DESC2_IC_INDEX 31 diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c index 6a7fddffca..7e0d387fc3 100644 --- a/drivers/net/axgbe/axgbe_dev.c +++ b/drivers/net/axgbe/axgbe_dev.c @@ -808,6 +808,24 @@ int axgbe_write_rss_lookup_table(struct axgbe_port *pdata) return 0; } + +static void xgbe_config_tso_mode(struct axgbe_port *pdata) +{ + unsigned int i; + + struct axgbe_tx_queue *txq; + + for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { + txq = pdata->eth_dev->data->tx_queues[i]; + AXGMAC_DMA_IOWRITE_BITS(txq,DMA_CH_TCR, TSE, + 1); + AXGMAC_DMA_IOWRITE_BITS(txq,DMA_CH_CR, MSS, + 800); + } + +} + + static int axgbe_enable_rss(struct axgbe_port *pdata) { int ret; @@ -1314,6 +1332,7 @@ static int axgbe_init(struct axgbe_port *pdata) axgbe_config_rx_pbl_val(pdata); axgbe_config_rx_buffer_size(pdata); axgbe_config_rss(pdata); + xgbe_config_tso_mode(pdata); wrapper_tx_desc_init(pdata); ret = wrapper_rx_desc_init(pdata); if (ret) diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c index e1cb60c1c3..5aa8743a1a 100644 --- a/drivers/net/axgbe/axgbe_ethdev.c +++ b/drivers/net/axgbe/axgbe_ethdev.c @@ -1237,6 +1237,7 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_MULTI_SEGS | RTE_ETH_TX_OFFLOAD_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_TCP_CKSUM; if (pdata->hw_feat.rss) { diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h index 7f19321d88..31a583c2c6 100644 --- a/drivers/net/axgbe/axgbe_ethdev.h +++ b/drivers/net/axgbe/axgbe_ethdev.h @@ -583,6 +583,7 @@ struct axgbe_port { unsigned int tx_osp_mode; unsigned int tx_max_fifo_size; unsigned int multi_segs_tx; + unsigned int tso_tx; /* Rx settings */ unsigned int rx_sf_mode; diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c index 68aa67a3fa..6b5ea6d622 100644 --- a/drivers/net/axgbe/axgbe_rxtx.c +++ b/drivers/net/axgbe/axgbe_rxtx.c @@ -643,6 +643,10 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, RTE_ETH_TX_OFFLOAD_MULTI_SEGS)) pdata->multi_segs_tx = true; + if ((dev_data->dev_conf.txmode.offloads & + RTE_ETH_TX_OFFLOAD_TCP_TSO)) + pdata->tso_tx = true; + return 0; } @@ -843,7 +847,7 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq, idx = AXGBE_GET_DESC_IDX(txq, txq->cur); desc = &txq->desc[idx]; - + printf("tso::Inside axgbe_xmit_hw \n"); /* Update buffer address and length */ desc->baddr = rte_mbuf_data_iova(mbuf); AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L, @@ -889,7 +893,6 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq, AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); rte_wmb(); - /* Save mbuf */ txq->sw_ring[idx] = mbuf; /* Update current index*/ @@ -900,138 +903,208 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq, return 0; } + /* Tx Descriptor formation for segmented mbuf * Each mbuf will require multiple descriptors */ static int axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq, - struct rte_mbuf *mbuf) + struct rte_mbuf *mbuf) { - volatile struct axgbe_tx_desc *desc; - uint16_t idx; - uint64_t mask; - int start_index; - uint32_t pkt_len = 0; - int nb_desc_free; - struct rte_mbuf *tx_pkt; + volatile struct axgbe_tx_desc *desc; + uint16_t idx; + uint64_t mask; + int start_index; + uint32_t pkt_len = 0; + int nb_desc_free; + struct rte_mbuf *tx_pkt; + uint64_t l2_len = 0; + uint64_t l3_len = 0; + uint64_t l4_len = 0; + uint64_t tso_segsz = 0; + uint64_t total_hdr_len; + int tso = 0; + + /*Parameters required for tso*/ + l2_len = mbuf->l2_len; + l3_len = mbuf->l3_len; + l4_len = mbuf->l4_len; + tso_segsz = mbuf->tso_segsz; + total_hdr_len = l2_len + l3_len + l4_len; + + if ((txq->pdata->tso_tx)) + tso = 1; + else + tso = 0; + + printf("tso:l2_len = %ld,l3_len=%ld,l4_len=%ld,tso_segsz=%ld,total_hdr_len%ld\n",l2_len,l3_len,l4_len, + tso_segsz,total_hdr_len); + + nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); + + printf("tso::Inside axgbe_xmit_hw_seg \n"); + if (mbuf->nb_segs > nb_desc_free) { + axgbe_xmit_cleanup_seg(txq); + nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); + if (unlikely(mbuf->nb_segs > nb_desc_free)) + return RTE_ETH_TX_DESC_UNAVAIL; + } + + idx = AXGBE_GET_DESC_IDX(txq, txq->cur); + desc = &txq->desc[idx]; + /* Saving the start index for setting the OWN bit finally */ + start_index = idx; + tx_pkt = mbuf; + /* Max_pkt len = 9018 ; need to update it according to Jumbo pkt size */ + pkt_len = tx_pkt->pkt_len; - nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); + /* Update buffer address and length */ + desc->baddr = rte_pktmbuf_iova_offset(mbuf,0); + /*For TSO first buffer contains the Header */ + if (tso) + AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L, + total_hdr_len); + else + AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L, + tx_pkt->data_len); - if (mbuf->nb_segs > nb_desc_free) { - axgbe_xmit_cleanup_seg(txq); - nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); - if (unlikely(mbuf->nb_segs > nb_desc_free)) - return RTE_ETH_TX_DESC_UNAVAIL; - } + rte_wmb(); + /* Timestamp enablement check */ + if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) + AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1); + + rte_wmb(); + /* Mark it as First Descriptor */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1); + /* Mark it as a NORMAL descriptor */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0); + /* configure h/w Offload */ + mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK; + if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM) + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3); + else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1); + rte_wmb(); + + if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) { + /* Mark it as a CONTEXT descriptor */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, + CTXT, 1); + /* Set the VLAN tag */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, + VT, mbuf->vlan_tci); + /* Indicate this descriptor contains the VLAN tag */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, + VLTV, 1); + AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, + TX_NORMAL_DESC2_VLAN_INSERT); + } else { + AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0); + } + rte_wmb(); + + /*Register settings for TSO*/ + if (tso) { + printf("Inside register setting-tso\n"); + /* Enable TSO */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, TSE,1); + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, TPL, + ((mbuf->pkt_len)-total_hdr_len)); + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, THL, + l4_len); + } else { + /* Enable CRC and Pad Insertion */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CPC, 0); + /* Total msg length to transmit */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL, + mbuf->pkt_len); + } +#if 0 + /*For TSO , needs one more descriptor to hold + * the Payload + * But while adding another descriptor packets are not + * transmitted */ + /* Save mbuf */ + txq->sw_ring[idx] = tx_pkt; + /* Update current index*/ + txq->cur++; idx = AXGBE_GET_DESC_IDX(txq, txq->cur); desc = &txq->desc[idx]; - /* Saving the start index for setting the OWN bit finally */ - start_index = idx; + desc->baddr = rte_pktmbuf_iova_offset(mbuf,total_hdr_len); + AXGMAC_SET_BITS_LE(desc->desc2, + TX_NORMAL_DESC2, HL_B1L, (mbuf->pkt_len)-total_hdr_len)); - tx_pkt = mbuf; - /* Max_pkt len = 9018 ; need to update it according to Jumbo pkt size */ - pkt_len = tx_pkt->pkt_len; + printf("(mbuf->pkt_len)-total_hdr_len=%d\n",(mbuf->pkt_len)-total_hdr_len); + printf("total_hdr_len=%d\n",total_hdr_len); - /* Update buffer address and length */ - desc->baddr = rte_mbuf_data_iova(tx_pkt); - AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L, - tx_pkt->data_len); - /* Total msg length to transmit */ - AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL, - tx_pkt->pkt_len); - /* Timestamp enablement check */ - if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) - AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1); - - rte_wmb(); - /* Mark it as First Descriptor */ - AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1); - /* Mark it as a NORMAL descriptor */ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0); - /* configure h/w Offload */ - mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK; - if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM) - AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3); - else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) - AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1); - rte_wmb(); - - if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) { - /* Mark it as a CONTEXT descriptor */ - AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, - CTXT, 1); - /* Set the VLAN tag */ - AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, - VT, mbuf->vlan_tci); - /* Indicate this descriptor contains the VLAN tag */ - AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, - VLTV, 1); - AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, - TX_NORMAL_DESC2_VLAN_INSERT); - } else { - AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0); - } + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); rte_wmb(); - - /* Save mbuf */ - txq->sw_ring[idx] = tx_pkt; - /* Update current index*/ txq->cur++; - - tx_pkt = tx_pkt->next; +#endif +#if 1 + /* Save mbuf */ + txq->sw_ring[idx] = tx_pkt; + /* Update current index*/ + txq->cur++; +#endif + tx_pkt = tx_pkt->next; while (tx_pkt != NULL) { - idx = AXGBE_GET_DESC_IDX(txq, txq->cur); - desc = &txq->desc[idx]; - - /* Update buffer address and length */ - desc->baddr = rte_mbuf_data_iova(tx_pkt); - - AXGMAC_SET_BITS_LE(desc->desc2, - TX_NORMAL_DESC2, HL_B1L, tx_pkt->data_len); - - rte_wmb(); - - /* Mark it as a NORMAL descriptor */ - AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0); - /* configure h/w Offload */ - mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK; - if (mask == RTE_MBUF_F_TX_TCP_CKSUM || - mask == RTE_MBUF_F_TX_UDP_CKSUM) - AXGMAC_SET_BITS_LE(desc->desc3, - TX_NORMAL_DESC3, CIC, 0x3); - else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) - AXGMAC_SET_BITS_LE(desc->desc3, - TX_NORMAL_DESC3, CIC, 0x1); - - rte_wmb(); - - /* Set OWN bit */ - AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); - rte_wmb(); - - /* Save mbuf */ - txq->sw_ring[idx] = tx_pkt; - /* Update current index*/ - txq->cur++; - - tx_pkt = tx_pkt->next; - } - - /* Set LD bit for the last descriptor */ - AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1); - rte_wmb(); - - /* Update stats */ - txq->bytes += pkt_len; - - /* Set OWN bit for the first descriptor */ - desc = &txq->desc[start_index]; - AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); - rte_wmb(); - + idx = AXGBE_GET_DESC_IDX(txq, txq->cur); + desc = &txq->desc[idx]; + + if (tso) + desc->baddr = rte_pktmbuf_iova_offset(mbuf,total_hdr_len); + else + /* Update buffer address and length */ + desc->baddr = rte_mbuf_data_iova(tx_pkt); + + AXGMAC_SET_BITS_LE(desc->desc2, + TX_NORMAL_DESC2, HL_B1L, tx_pkt->data_len); + + rte_wmb(); + + /* Mark it as a NORMAL descriptor */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0); + /* configure h/w Offload */ + mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK; + if (mask == RTE_MBUF_F_TX_TCP_CKSUM || + mask == RTE_MBUF_F_TX_UDP_CKSUM) + AXGMAC_SET_BITS_LE(desc->desc3, + TX_NORMAL_DESC3, CIC, 0x3); + else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) + AXGMAC_SET_BITS_LE(desc->desc3, + TX_NORMAL_DESC3, CIC, 0x1); + + rte_wmb(); + + /* Set OWN bit */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); + rte_wmb(); + + /* Save mbuf */ + txq->sw_ring[idx] = tx_pkt; + /* Update current index*/ + txq->cur++; + + tx_pkt = tx_pkt->next; + } + + /* Set LD bit for the last descriptor */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1); + rte_wmb(); + + printf("tso:: pkt_len = %d\n",pkt_len); + /* Update stats */ + txq->bytes += pkt_len; + + /* Set OWN bit for the first descriptor */ + desc = &txq->desc[start_index]; + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); + rte_wmb(); return 0; } @@ -1077,6 +1150,8 @@ axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts, idx * sizeof(struct axgbe_tx_desc)); /* Update tail reg with next immediate address to kick Tx DMA channel*/ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr); + + txq->pkts += nb_pkt_sent; return nb_pkt_sent; } diff --git a/drivers/net/axgbe/axgbe_rxtx_vec_sse.c b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c index d95a446bef..7034d5737a 100644 --- a/drivers/net/axgbe/axgbe_rxtx_vec_sse.c +++ b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c @@ -65,6 +65,7 @@ axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t idx, nb_commit, loop, i; uint32_t tail_addr; + printf("jesna::Inside axgbe_xmit_pkts_vec \n"); txq = (struct axgbe_tx_queue *)tx_queue; if (txq->nb_desc_free < txq->free_thresh) { axgbe_xmit_cleanup_vec(txq); -- 2.34.1