defined routines to handle tx queue related ops.
this patch add support to rte_eth_dev_tx_queue*
apis for this PMD.

Signed-off-by: Aman Kumar <aman.ku...@vvdntech.in>
---
 drivers/net/qdma/qdma.h        |   8 +
 drivers/net/qdma/qdma_common.c |  74 +++++++++
 drivers/net/qdma/qdma_devops.c | 270 +++++++++++++++++++++++++++++++--
 3 files changed, 343 insertions(+), 9 deletions(-)

diff --git a/drivers/net/qdma/qdma.h b/drivers/net/qdma/qdma.h
index 5992473b33..8515ebe60e 100644
--- a/drivers/net/qdma/qdma.h
+++ b/drivers/net/qdma/qdma.h
@@ -42,6 +42,7 @@
 #define MIN_RX_PIDX_UPDATE_THRESHOLD (1)
 #define MIN_TX_PIDX_UPDATE_THRESHOLD (1)
 #define DEFAULT_MM_CMPT_CNT_THRESHOLD  (2)
+#define QDMA_TXQ_PIDX_UPDATE_INTERVAL  (1000) /* 100 uSec */
 
 #define WB_TIMEOUT             (100000)
 #define RESET_TIMEOUT          (60000)
@@ -198,6 +199,7 @@ struct qdma_tx_queue {
        uint16_t                        tx_desc_pend;
        uint16_t                        nb_tx_desc; /* No of TX descriptors. */
        rte_spinlock_t                  pidx_update_lock;
+       struct qdma_q_pidx_reg_info     q_pidx_info;
        uint64_t                        offloads; /* Tx offloads */
 
        uint8_t                         st_mode:1;/* dma-mode: MM or ST */
@@ -297,17 +299,23 @@ struct qdma_pci_dev {
 };
 
 void qdma_dev_ops_init(struct rte_eth_dev *dev);
+void qdma_txq_pidx_update(void *arg);
 int qdma_pf_csr_read(struct rte_eth_dev *dev);
 
 uint8_t qmda_get_desc_sz_idx(enum rte_pmd_qdma_bypass_desc_len);
 
 int qdma_init_rx_queue(struct qdma_rx_queue *rxq);
+void qdma_reset_tx_queue(struct qdma_tx_queue *txq);
 void qdma_reset_rx_queue(struct qdma_rx_queue *rxq);
 
 void qdma_clr_rx_queue_ctxts(struct rte_eth_dev *dev, uint32_t qid,
                                uint32_t mode);
 void qdma_inv_rx_queue_ctxts(struct rte_eth_dev *dev, uint32_t qid,
                                uint32_t mode);
+void qdma_clr_tx_queue_ctxts(struct rte_eth_dev *dev, uint32_t qid,
+                               uint32_t mode);
+void qdma_inv_tx_queue_ctxts(struct rte_eth_dev *dev, uint32_t qid,
+                               uint32_t mode);
 int qdma_identify_bars(struct rte_eth_dev *dev);
 int qdma_get_hw_version(struct rte_eth_dev *dev);
 
diff --git a/drivers/net/qdma/qdma_common.c b/drivers/net/qdma/qdma_common.c
index d39e642008..2650438e47 100644
--- a/drivers/net/qdma/qdma_common.c
+++ b/drivers/net/qdma/qdma_common.c
@@ -160,6 +160,80 @@ int qdma_init_rx_queue(struct qdma_rx_queue *rxq)
        return -ENOMEM;
 }
 
+/*
+ * Tx queue reset
+ */
+void qdma_reset_tx_queue(struct qdma_tx_queue *txq)
+{
+       uint32_t i;
+       uint32_t sz;
+
+       txq->tx_fl_tail = 0;
+       if (txq->st_mode) {  /* ST-mode */
+               sz = sizeof(struct qdma_ul_st_h2c_desc);
+               /* Zero out HW ring memory */
+               for (i = 0; i < (sz * (txq->nb_tx_desc)); i++)
+                       ((volatile char *)txq->tx_ring)[i] = 0;
+       } else {
+               sz = sizeof(struct qdma_ul_mm_desc);
+               /* Zero out HW ring memory */
+               for (i = 0; i < (sz * (txq->nb_tx_desc)); i++)
+                       ((volatile char *)txq->tx_ring)[i] = 0;
+       }
+
+       /* Initialize SW ring entries */
+       for (i = 0; i < txq->nb_tx_desc; i++)
+               txq->sw_ring[i] = NULL;
+}
+
+void qdma_inv_tx_queue_ctxts(struct rte_eth_dev *dev,
+                            uint32_t qid, uint32_t mode)
+{
+       struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+       struct qdma_descq_sw_ctxt q_sw_ctxt;
+       struct qdma_descq_hw_ctxt q_hw_ctxt;
+       struct qdma_descq_credit_ctxt q_credit_ctxt;
+       struct qdma_hw_access *hw_access = qdma_dev->hw_access;
+
+       hw_access->qdma_sw_ctx_conf(dev, 0, qid, &q_sw_ctxt,
+                       QDMA_HW_ACCESS_INVALIDATE);
+       hw_access->qdma_hw_ctx_conf(dev, 0, qid, &q_hw_ctxt,
+                       QDMA_HW_ACCESS_INVALIDATE);
+
+       if (mode) {  /* ST-mode */
+               hw_access->qdma_credit_ctx_conf(dev, 0, qid,
+                       &q_credit_ctxt, QDMA_HW_ACCESS_INVALIDATE);
+       }
+}
+
+/**
+ * Clear Tx queue contexts
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ *
+ * @return
+ *   Nothing.
+ */
+void qdma_clr_tx_queue_ctxts(struct rte_eth_dev *dev,
+                            uint32_t qid, uint32_t mode)
+{
+       struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+       struct qdma_descq_sw_ctxt q_sw_ctxt;
+       struct qdma_descq_credit_ctxt q_credit_ctxt;
+       struct qdma_descq_hw_ctxt q_hw_ctxt;
+       struct qdma_hw_access *hw_access = qdma_dev->hw_access;
+
+       hw_access->qdma_sw_ctx_conf(dev, 0, qid, &q_sw_ctxt,
+                       QDMA_HW_ACCESS_CLEAR);
+       hw_access->qdma_hw_ctx_conf(dev, 0, qid, &q_hw_ctxt,
+                       QDMA_HW_ACCESS_CLEAR);
+       if (mode) {  /* ST-mode */
+               hw_access->qdma_credit_ctx_conf(dev, 0, qid,
+                       &q_credit_ctxt, QDMA_HW_ACCESS_CLEAR);
+       }
+}
+
 /* Utility function to find index of an element in an array */
 int index_of_array(uint32_t *arr, uint32_t n, uint32_t element)
 {
diff --git a/drivers/net/qdma/qdma_devops.c b/drivers/net/qdma/qdma_devops.c
index fefbbda012..e411c0f1be 100644
--- a/drivers/net/qdma/qdma_devops.c
+++ b/drivers/net/qdma/qdma_devops.c
@@ -573,13 +573,196 @@ int qdma_dev_tx_queue_setup(struct rte_eth_dev *dev, 
uint16_t tx_queue_id,
                            uint16_t nb_tx_desc, unsigned int socket_id,
                            const struct rte_eth_txconf *tx_conf)
 {
-       (void)dev;
-       (void)tx_queue_id;
-       (void)nb_tx_desc;
-       (void)socket_id;
-       (void)tx_conf;
+       struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+       struct qdma_tx_queue *txq = NULL;
+       struct qdma_ul_mm_desc *tx_ring_mm;
+       struct qdma_ul_st_h2c_desc *tx_ring_st;
+       uint32_t sz;
+       uint8_t  *tx_ring_bypass;
+       int err = 0;
+
+       PMD_DRV_LOG(INFO, "Configuring Tx queue id:%d with %d desc\n",
+                   tx_queue_id, nb_tx_desc);
+
+       if (!qdma_dev->is_vf) {
+               err = qdma_dev_increment_active_queue
+                               (qdma_dev->dma_device_index,
+                               qdma_dev->func_id,
+                               QDMA_DEV_Q_TYPE_H2C);
+               if (err != QDMA_SUCCESS)
+                       return -EINVAL;
+       }
+       if (!qdma_dev->init_q_range) {
+               if (!qdma_dev->is_vf) {
+                       err = qdma_pf_csr_read(dev);
+                       if (err < 0) {
+                               PMD_DRV_LOG(ERR, "CSR read failed\n");
+                               goto tx_setup_err;
+                       }
+               }
+               qdma_dev->init_q_range = 1;
+       }
+       /* allocate rx queue data structure */
+       txq = rte_zmalloc_socket("QDMA_TxQ", sizeof(struct qdma_tx_queue),
+                                               RTE_CACHE_LINE_SIZE, socket_id);
+       if (txq == NULL) {
+               PMD_DRV_LOG(ERR, "Memory allocation failed for "
+                               "Tx queue SW structure\n");
+               err = -ENOMEM;
+               goto tx_setup_err;
+       }
+
+       txq->st_mode = qdma_dev->q_info[tx_queue_id].queue_mode;
+       txq->en_bypass = (qdma_dev->q_info[tx_queue_id].tx_bypass_mode) ? 1 : 0;
+       txq->bypass_desc_sz = qdma_dev->q_info[tx_queue_id].tx_bypass_desc_sz;
+
+       txq->nb_tx_desc = (nb_tx_desc + 1);
+       txq->queue_id = tx_queue_id;
+       txq->dev = dev;
+       txq->port_id = dev->data->port_id;
+       txq->func_id = qdma_dev->func_id;
+       txq->num_queues = dev->data->nb_tx_queues;
+       txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+       txq->ringszidx = index_of_array(qdma_dev->g_ring_sz,
+                                       QDMA_NUM_RING_SIZES, txq->nb_tx_desc);
+       if (txq->ringszidx < 0) {
+               PMD_DRV_LOG(ERR, "Expected Ring size %d not found\n",
+                               txq->nb_tx_desc);
+               err = -EINVAL;
+               goto tx_setup_err;
+       }
+
+       if (qdma_dev->ip_type == EQDMA_SOFT_IP &&
+                       qdma_dev->vivado_rel >= QDMA_VIVADO_2020_2) {
+               if (qdma_dev->dev_cap.desc_eng_mode ==
+                               QDMA_DESC_ENG_BYPASS_ONLY) {
+                       PMD_DRV_LOG(ERR,
+                               "Bypass only mode design "
+                               "is not supported\n");
+                       return -ENOTSUP;
+               }
+
+               if (txq->en_bypass &&
+                               qdma_dev->dev_cap.desc_eng_mode ==
+                               QDMA_DESC_ENG_INTERNAL_ONLY) {
+                       PMD_DRV_LOG(ERR,
+                               "Tx qid %d config in bypass "
+                               "mode not supported on "
+                               "internal only mode design\n",
+                               tx_queue_id);
+                       return -ENOTSUP;
+               }
+       }
+
+       /* Allocate memory for TX descriptor ring */
+       if (txq->st_mode) {
+               if (!qdma_dev->dev_cap.st_en) {
+                       PMD_DRV_LOG(ERR, "Streaming mode not enabled "
+                                       "in the hardware\n");
+                       err = -EINVAL;
+                       goto tx_setup_err;
+               }
+
+               if (txq->en_bypass &&
+                       txq->bypass_desc_sz != 0)
+                       sz = (txq->nb_tx_desc) * (txq->bypass_desc_sz);
+               else
+                       sz = (txq->nb_tx_desc) *
+                                       sizeof(struct qdma_ul_st_h2c_desc);
+               txq->tx_mz = qdma_zone_reserve(dev, "TxHwRn", tx_queue_id, sz,
+                                               socket_id);
+               if (!txq->tx_mz) {
+                       PMD_DRV_LOG(ERR, "Couldn't reserve memory for "
+                                       "ST H2C ring of size %d\n", sz);
+                       err = -ENOMEM;
+                       goto tx_setup_err;
+               }
+
+               txq->tx_ring = txq->tx_mz->addr;
+               tx_ring_st = (struct qdma_ul_st_h2c_desc *)txq->tx_ring;
+
+               tx_ring_bypass = (uint8_t *)txq->tx_ring;
+               /* Write-back status structure */
+               if (txq->en_bypass &&
+                       txq->bypass_desc_sz != 0)
+                       txq->wb_status = (struct wb_status *)&
+                                       tx_ring_bypass[(txq->nb_tx_desc - 1) *
+                                       (txq->bypass_desc_sz)];
+               else
+                       txq->wb_status = (struct wb_status *)&
+                                       tx_ring_st[txq->nb_tx_desc - 1];
+       } else {
+               if (!qdma_dev->dev_cap.mm_en) {
+                       PMD_DRV_LOG(ERR, "Memory mapped mode not "
+                                       "enabled in the hardware\n");
+                       err = -EINVAL;
+                       goto tx_setup_err;
+               }
+
+               if (txq->en_bypass &&
+                       txq->bypass_desc_sz != 0)
+                       sz = (txq->nb_tx_desc) * (txq->bypass_desc_sz);
+               else
+                       sz = (txq->nb_tx_desc) * sizeof(struct qdma_ul_mm_desc);
+               txq->tx_mz = qdma_zone_reserve(dev, "TxHwRn", tx_queue_id,
+                                               sz, socket_id);
+               if (!txq->tx_mz) {
+                       PMD_DRV_LOG(ERR, "Couldn't reserve memory for "
+                                       "MM H2C ring of size %d\n", sz);
+                       err = -ENOMEM;
+                       goto tx_setup_err;
+               }
+
+               txq->tx_ring = txq->tx_mz->addr;
+               tx_ring_mm = (struct qdma_ul_mm_desc *)txq->tx_ring;
+
+               /* Write-back status structure */
+
+               tx_ring_bypass = (uint8_t *)txq->tx_ring;
+               if (txq->en_bypass &&
+                       txq->bypass_desc_sz != 0)
+                       txq->wb_status = (struct wb_status *)&
+                               tx_ring_bypass[(txq->nb_tx_desc - 1) *
+                               (txq->bypass_desc_sz)];
+               else
+                       txq->wb_status = (struct wb_status *)&
+                               tx_ring_mm[txq->nb_tx_desc - 1];
+       }
+
+       PMD_DRV_LOG(INFO, "Tx ring phys addr: 0x%lX, Tx Ring virt addr: 0x%lX",
+           (uint64_t)txq->tx_mz->iova, (uint64_t)txq->tx_ring);
+
+       /* Allocate memory for TX software ring */
+       sz = txq->nb_tx_desc * sizeof(struct rte_mbuf *);
+       txq->sw_ring = rte_zmalloc_socket("TxSwRn", sz,
+                               RTE_CACHE_LINE_SIZE, socket_id);
+       if (txq->sw_ring == NULL) {
+               PMD_DRV_LOG(ERR, "Memory allocation failed for "
+                                "Tx queue SW ring\n");
+               err = -ENOMEM;
+               goto tx_setup_err;
+       }
+
+       rte_spinlock_init(&txq->pidx_update_lock);
+       dev->data->tx_queues[tx_queue_id] = txq;
 
        return 0;
+
+tx_setup_err:
+       PMD_DRV_LOG(ERR, " Tx queue setup failed");
+       if (!qdma_dev->is_vf)
+               qdma_dev_decrement_active_queue(qdma_dev->dma_device_index,
+                                               qdma_dev->func_id,
+                                               QDMA_DEV_Q_TYPE_H2C);
+       if (txq) {
+               if (txq->tx_mz)
+                       rte_memzone_free(txq->tx_mz);
+               if (txq->sw_ring)
+                       rte_free(txq->sw_ring);
+               rte_free(txq);
+       }
+       return err;
 }
 
 void qdma_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_id)
@@ -983,9 +1166,54 @@ int qdma_dev_configure(struct rte_eth_dev *dev)
 
 int qdma_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qid)
 {
-       (void)dev;
-       (void)qid;
+       struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+       struct qdma_tx_queue *txq;
+       uint32_t queue_base =  qdma_dev->queue_base;
+       int err, bypass_desc_sz_idx;
+       struct qdma_descq_sw_ctxt q_sw_ctxt;
+       struct qdma_hw_access *hw_access = qdma_dev->hw_access;
+
+       txq = (struct qdma_tx_queue *)dev->data->tx_queues[qid];
 
+       memset(&q_sw_ctxt, 0, sizeof(struct qdma_descq_sw_ctxt));
+
+       bypass_desc_sz_idx = qmda_get_desc_sz_idx(txq->bypass_desc_sz);
+
+       qdma_reset_tx_queue(txq);
+       qdma_clr_tx_queue_ctxts(dev, (qid + queue_base), txq->st_mode);
+
+       if (txq->st_mode) {
+               q_sw_ctxt.desc_sz = SW_DESC_CNTXT_H2C_STREAM_DMA;
+       } else {
+               q_sw_ctxt.desc_sz = SW_DESC_CNTXT_MEMORY_MAP_DMA;
+               q_sw_ctxt.is_mm = 1;
+       }
+       q_sw_ctxt.wbi_chk = 1;
+       q_sw_ctxt.wbi_intvl_en = 1;
+       q_sw_ctxt.fnc_id = txq->func_id;
+       q_sw_ctxt.qen = 1;
+       q_sw_ctxt.rngsz_idx = txq->ringszidx;
+       q_sw_ctxt.bypass = txq->en_bypass;
+       q_sw_ctxt.wbk_en = 1;
+       q_sw_ctxt.ring_bs_addr = (uint64_t)txq->tx_mz->iova;
+
+       if (txq->en_bypass &&
+               txq->bypass_desc_sz != 0)
+               q_sw_ctxt.desc_sz = bypass_desc_sz_idx;
+
+       /* Set SW Context */
+       err = hw_access->qdma_sw_ctx_conf(dev, 0,
+                       (qid + queue_base), &q_sw_ctxt,
+                       QDMA_HW_ACCESS_WRITE);
+       if (err < 0)
+               return qdma_dev->hw_access->qdma_get_error_code(err);
+
+       txq->q_pidx_info.pidx = 0;
+       hw_access->qdma_queue_pidx_update(dev, qdma_dev->is_vf,
+               qid, 0, &txq->q_pidx_info);
+
+       dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
+       txq->status = RTE_ETH_QUEUE_STATE_STARTED;
        return 0;
 }
 
@@ -1185,8 +1413,32 @@ int qdma_dev_rx_queue_stop(struct rte_eth_dev *dev, 
uint16_t qid)
 
 int qdma_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qid)
 {
-       (void)dev;
-       (void)qid;
+       struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+       uint32_t queue_base =  qdma_dev->queue_base;
+       struct qdma_tx_queue *txq;
+       int cnt = 0;
+       uint16_t count;
+
+       txq = (struct qdma_tx_queue *)dev->data->tx_queues[qid];
+
+       txq->status = RTE_ETH_QUEUE_STATE_STOPPED;
+       /* Wait for TXQ to send out all packets. */
+       while (txq->wb_status->cidx != txq->q_pidx_info.pidx) {
+               usleep(10);
+               if (cnt++ > 10000)
+                       break;
+       }
+
+       qdma_inv_tx_queue_ctxts(dev, (qid + queue_base), txq->st_mode);
+
+       /* Relinquish pending mbufs */
+       for (count = 0; count < txq->nb_tx_desc - 1; count++) {
+               rte_pktmbuf_free(txq->sw_ring[count]);
+               txq->sw_ring[count] = NULL;
+       }
+       qdma_reset_tx_queue(txq);
+
+       dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
 
        return 0;
 }
-- 
2.36.1

Reply via email to