From: Vanshika Shukla <vanshika.shu...@nxp.com>

TX confirmation provides dedicated confirmation
queues for transmitted packets. These queues are
used by software to get the status and release
transmitted packets buffers.

Signed-off-by: Vanshika Shukla <vanshika.shu...@nxp.com>
Acked-by: Hemant Agrawal <hemant.agra...@nxp.com>
---
 drivers/net/dpaa/dpaa_ethdev.c | 45 ++++++++++++++++++++++-------
 drivers/net/dpaa/dpaa_ethdev.h |  3 +-
 drivers/net/dpaa/dpaa_rxtx.c   | 52 ++++++++++++++++++++++++++++++++++
 drivers/net/dpaa/dpaa_rxtx.h   |  2 ++
 4 files changed, 90 insertions(+), 12 deletions(-)

diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 90b34e42f2..9ffb8c578c 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -1826,9 +1826,15 @@ static int dpaa_tx_queue_init(struct qman_fq *fq,
        opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
        opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
        opts.fqd.context_b = 0;
+#if defined(RTE_LIBRTE_IEEE1588)
+       opts.fqd.context_a.lo = 0;
+       opts.fqd.context_a.hi = fman_dealloc_bufs_mask_hi;
+#else
        /* no tx-confirmation */
-       opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
        opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
+       opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
+#endif
+
        if (fman_ip_rev >= FMAN_V3) {
                /* Set B0V bit in contextA to set ASPID to 0 */
                opts.fqd.context_a.hi |= 0x04000000;
@@ -1861,9 +1867,11 @@ static int dpaa_tx_queue_init(struct qman_fq *fq,
        return ret;
 }
 
-#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
-/* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
-static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
+#if defined(RTE_LIBRTE_DPAA_DEBUG_DRIVER) || defined(RTE_LIBRTE_IEEE1588)
+/* Initialise a DEBUG FQ ([rt]x_error, rx_default) and DPAA TX CONFIRM queue
+ * to support PTP
+ */
+static int dpaa_def_queue_init(struct qman_fq *fq, uint32_t fqid)
 {
        struct qm_mcc_initfq opts = {0};
        int ret;
@@ -1872,15 +1880,15 @@ static int dpaa_debug_queue_init(struct qman_fq *fq, 
uint32_t fqid)
 
        ret = qman_reserve_fqid(fqid);
        if (ret) {
-               DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
+               DPAA_PMD_ERR("Reserve fqid %d failed with ret: %d",
                        fqid, ret);
                return -EINVAL;
        }
        /* "map" this Rx FQ to one of the interfaces Tx FQID */
-       DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
+       DPAA_PMD_DEBUG("Creating fq %p, fqid %d", fq, fqid);
        ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
        if (ret) {
-               DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
+               DPAA_PMD_ERR("create fqid %d failed with ret: %d",
                        fqid, ret);
                return ret;
        }
@@ -1888,7 +1896,7 @@ static int dpaa_debug_queue_init(struct qman_fq *fq, 
uint32_t fqid)
        opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
        ret = qman_init_fq(fq, 0, &opts);
        if (ret)
-               DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
+               DPAA_PMD_ERR("init fqid %d failed with ret: %d",
                            fqid, ret);
        return ret;
 }
@@ -2079,6 +2087,14 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
                goto free_rx;
        }
 
+       dpaa_intf->tx_conf_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
+               MAX_DPAA_CORES, MAX_CACHELINE);
+       if (!dpaa_intf->tx_conf_queues) {
+               DPAA_PMD_ERR("Failed to alloc mem for TX conf queues\n");
+               ret = -ENOMEM;
+               goto free_rx;
+       }
+
        /* If congestion control is enabled globally*/
        if (td_tx_threshold) {
                dpaa_intf->cgr_tx = rte_zmalloc(NULL,
@@ -2115,21 +2131,28 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
        }
        dpaa_intf->nb_tx_queues = MAX_DPAA_CORES;
 
-#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
-       ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues
+#if defined(RTE_LIBRTE_DPAA_DEBUG_DRIVER) || defined(RTE_LIBRTE_IEEE1588)
+       ret = dpaa_def_queue_init(&dpaa_intf->debug_queues
                        [DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
        if (ret) {
                DPAA_PMD_ERR("DPAA RX ERROR queue init failed!");
                goto free_tx;
        }
        dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
-       ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues
+       ret = dpaa_def_queue_init(&dpaa_intf->debug_queues
                        [DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
        if (ret) {
                DPAA_PMD_ERR("DPAA TX ERROR queue init failed!");
                goto free_tx;
        }
        dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
+       ret = dpaa_def_queue_init(dpaa_intf->tx_conf_queues,
+                       fman_intf->fqid_tx_confirm);
+       if (ret) {
+               DPAA_PMD_ERR("DPAA TX CONFIRM queue init failed!");
+               goto free_tx;
+       }
+       dpaa_intf->tx_conf_queues->dpaa_intf = dpaa_intf;
 #endif
 
        DPAA_PMD_DEBUG("All frame queues created");
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index 261a5a3ca7..6aced9d5e9 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2014-2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2017-2019 NXP
+ *   Copyright 2017-2021 NXP
  *
  */
 #ifndef __DPAA_ETHDEV_H__
@@ -131,6 +131,7 @@ struct dpaa_if {
        struct qman_fq *rx_queues;
        struct qman_cgr *cgr_rx;
        struct qman_fq *tx_queues;
+       struct qman_fq *tx_conf_queues;
        struct qman_cgr *cgr_tx;
        struct qman_fq debug_queues[2];
        uint16_t nb_rx_queues;
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index c2579d65ee..189af748e9 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -1082,6 +1082,11 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, 
uint16_t nb_bufs)
        uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0};
        struct dpaa_sw_buf_free buf_to_free[DPAA_MAX_SGS * 
DPAA_MAX_DEQUEUE_NUM_FRAMES];
        uint32_t free_count = 0;
+#if defined(RTE_LIBRTE_IEEE1588)
+       struct qman_fq *fq = q;
+       struct dpaa_if *dpaa_intf = fq->dpaa_intf;
+       struct qman_fq *fq_txconf = dpaa_intf->tx_conf_queues;
+#endif
 
        if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
                ret = rte_dpaa_portal_init((void *)0);
@@ -1162,6 +1167,10 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, 
uint16_t nb_bufs)
                                mbuf = temp_mbuf;
                                realloc_mbuf = 0;
                        }
+
+#if defined(RTE_LIBRTE_IEEE1588)
+       fd_arr[loop].cmd |= DPAA_FD_CMD_FCO | qman_fq_fqid(fq_txconf);
+#endif
 indirect_buf:
                        state = tx_on_dpaa_pool(mbuf, bp_info,
                                                &fd_arr[loop],
@@ -1190,6 +1199,10 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, 
uint16_t nb_bufs)
                sent += frames_to_send;
        }
 
+#if defined(RTE_LIBRTE_IEEE1588)
+       dpaa_eth_tx_conf(fq_txconf);
+#endif
+
        DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", sent, q);
 
        for (loop = 0; loop < free_count; loop++) {
@@ -1200,6 +1213,45 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, 
uint16_t nb_bufs)
        return sent;
 }
 
+void
+dpaa_eth_tx_conf(void *q)
+{
+       struct qman_fq *fq = q;
+       struct qm_dqrr_entry *dq;
+       int num_tx_conf, ret, dq_num;
+       uint32_t vdqcr_flags = 0;
+
+       if (unlikely(rte_dpaa_bpid_info == NULL &&
+                               rte_eal_process_type() == RTE_PROC_SECONDARY))
+               rte_dpaa_bpid_info = fq->bp_array;
+
+       if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
+               ret = rte_dpaa_portal_init((void *)0);
+               if (ret) {
+                       DPAA_PMD_ERR("Failure in affining portal");
+                       return;
+               }
+       }
+
+       num_tx_conf = DPAA_MAX_DEQUEUE_NUM_FRAMES - 2;
+
+       do {
+               dq_num = 0;
+               ret = qman_set_vdq(fq, num_tx_conf, vdqcr_flags);
+               if (ret)
+                       return;
+               do {
+                       dq = qman_dequeue(fq);
+                       if (!dq)
+                               continue;
+                       dq_num++;
+                       dpaa_display_frame_info(&dq->fd, fq->fqid, true);
+                       qman_dqrr_consume(fq, dq);
+                       dpaa_free_mbuf(&dq->fd);
+               } while (fq->flags & QMAN_FQ_STATE_VDQCR);
+       } while (dq_num == num_tx_conf);
+}
+
 uint16_t
 dpaa_eth_queue_tx_slow(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 {
diff --git a/drivers/net/dpaa/dpaa_rxtx.h b/drivers/net/dpaa/dpaa_rxtx.h
index b2d7c0f2a3..042602e087 100644
--- a/drivers/net/dpaa/dpaa_rxtx.h
+++ b/drivers/net/dpaa/dpaa_rxtx.h
@@ -281,6 +281,8 @@ uint16_t dpaa_eth_queue_tx_slow(void *q, struct rte_mbuf 
**bufs,
                                uint16_t nb_bufs);
 uint16_t dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);
 
+void dpaa_eth_tx_conf(void *q);
+
 uint16_t dpaa_eth_tx_drop_all(void *q  __rte_unused,
                              struct rte_mbuf **bufs __rte_unused,
                              uint16_t nb_bufs __rte_unused);
-- 
2.25.1

Reply via email to