From: Brick Yang <brick.y...@nxp.com> Check if there exists free enqueue descriptors before enqueuing Tx packet. Also try to free enqueue descriptors in case they are not free.
Fixes: ed1cdbed6a15 ("net/dpaa2: support multiple Tx queues enqueue for ordered") Cc: sta...@dpdk.org Signed-off-by: Brick Yang <brick.y...@nxp.com> Signed-off-by: Rohit Raj <rohit....@nxp.com> Acked-by: Hemant Agrawal <hemant.agra...@nxp.com> --- drivers/event/dpaa2/dpaa2_eventdev.c | 8 ++--- drivers/net/dpaa2/dpaa2_rxtx.c | 50 +++++++++++++++++++--------- 2 files changed, 38 insertions(+), 20 deletions(-) diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c index f499d0d015..fa1a1ade80 100644 --- a/drivers/event/dpaa2/dpaa2_eventdev.c +++ b/drivers/event/dpaa2/dpaa2_eventdev.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2017,2019-2021 NXP + * Copyright 2017,2019-2022 NXP */ #include <assert.h> @@ -176,7 +176,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[], if (retry_count > DPAA2_EV_TX_RETRY_COUNT) { num_tx += loop; nb_events -= loop; - return num_tx + loop; + return num_tx; } } else { loop += ret; @@ -1016,9 +1016,7 @@ dpaa2_eventdev_txa_enqueue(void *port, txq[i] = rte_eth_devices[m[i]->port].data->tx_queues[qid]; } - dpaa2_dev_tx_multi_txq_ordered(txq, m, nb_events); - - return nb_events; + return dpaa2_dev_tx_multi_txq_ordered(txq, m, nb_events); } static struct eventdev_ops dpaa2_eventdev_ops = { diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c index 9436a95ac8..571ea6d16d 100644 --- a/drivers/net/dpaa2/dpaa2_rxtx.c +++ b/drivers/net/dpaa2/dpaa2_rxtx.c @@ -1525,7 +1525,7 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue, uint32_t loop, retry_count; int32_t ret; struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; - uint32_t frames_to_send; + uint32_t frames_to_send, num_free_eq_desc = 0; struct rte_mempool *mp; struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS]; struct dpaa2_queue *dpaa2_q[MAX_TX_RING_SLOTS]; @@ -1547,16 +1547,44 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue, } swp = DPAA2_PER_LCORE_PORTAL; - for (loop = 0; loop < nb_pkts; loop++) { + frames_to_send = (nb_pkts > dpaa2_eqcr_size) ? + dpaa2_eqcr_size : nb_pkts; + + for (loop = 0; loop < frames_to_send; loop++) { dpaa2_q[loop] = (struct dpaa2_queue *)queue[loop]; eth_data = dpaa2_q[loop]->eth_data; priv = eth_data->dev_private; + if (!priv->en_loose_ordered) { + if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) { + if (!num_free_eq_desc) { + num_free_eq_desc = dpaa2_free_eq_descriptors(); + if (!num_free_eq_desc) + goto send_frames; + } + num_free_eq_desc--; + } + } + + DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n", + eth_data, dpaa2_q[loop]->fqid); + + /* Check if the queue is congested */ + retry_count = 0; + while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) { + retry_count++; + /* Retry for some time before giving up */ + if (retry_count > CONG_RETRY_COUNT) + goto send_frames; + } + + /* Prepare enqueue descriptor */ qbman_eq_desc_clear(&eqdesc[loop]); + if (*dpaa2_seqn(*bufs) && priv->en_ordered) { order_sendq = (struct dpaa2_queue *)priv->tx_vq[0]; dpaa2_set_enqueue_descriptor(order_sendq, - (*bufs), - &eqdesc[loop]); + (*bufs), + &eqdesc[loop]); } else { qbman_eq_desc_set_no_orp(&eqdesc[loop], DPAA2_EQ_RESP_ERR_FQ); @@ -1564,14 +1592,6 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue, dpaa2_q[loop]->fqid); } - retry_count = 0; - while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) { - retry_count++; - /* Retry for some time before giving up */ - if (retry_count > CONG_RETRY_COUNT) - goto send_frames; - } - if (likely(RTE_MBUF_DIRECT(*bufs))) { mp = (*bufs)->pool; /* Check the basic scenario and set @@ -1591,7 +1611,6 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue, &fd_arr[loop], mempool_to_bpid(mp)); bufs++; - dpaa2_q[loop]++; continue; } } else { @@ -1637,18 +1656,19 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue, } bufs++; - dpaa2_q[loop]++; } send_frames: frames_to_send = loop; loop = 0; + retry_count = 0; while (loop < frames_to_send) { ret = qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop], &fd_arr[loop], frames_to_send - loop); if (likely(ret > 0)) { loop += ret; + retry_count = 0; } else { retry_count++; if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) @@ -1834,7 +1854,7 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) retry_count = 0; while (i < loop) { ret = qbman_swp_enqueue_multiple_desc(swp, - &eqdesc[loop], &fd_arr[i], loop - i); + &eqdesc[i], &fd_arr[i], loop - i); if (unlikely(ret < 0)) { retry_count++; if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) -- 2.25.1