this patch implements routine for dev_start and
dev_stop PMD ops and adds support for RX/TX
datapath burst APIs for VF.

Signed-off-by: Aman Kumar <aman.ku...@vvdntech.in>
---
 drivers/net/qdma/qdma_vf_ethdev.c | 61 +++++++++++++++++++++++++++++++
 1 file changed, 61 insertions(+)

diff --git a/drivers/net/qdma/qdma_vf_ethdev.c 
b/drivers/net/qdma/qdma_vf_ethdev.c
index 5a54c00893..cbae4c9716 100644
--- a/drivers/net/qdma/qdma_vf_ethdev.c
+++ b/drivers/net/qdma/qdma_vf_ethdev.c
@@ -334,6 +334,39 @@ static int qdma_queue_context_invalidate(struct 
rte_eth_dev *dev, uint32_t qid,
        return rv;
 }
 
+static int qdma_vf_dev_start(struct rte_eth_dev *dev)
+{
+       struct qdma_tx_queue *txq;
+       struct qdma_rx_queue *rxq;
+       uint32_t qid;
+       int err;
+
+       PMD_DRV_LOG(INFO, "qdma_dev_start: Starting\n");
+       /* prepare descriptor rings for operation */
+       for (qid = 0; qid < dev->data->nb_tx_queues; qid++) {
+               txq = (struct qdma_tx_queue *)dev->data->tx_queues[qid];
+
+               /* Deferred Queues should not start with dev_start */
+               if (!txq->tx_deferred_start) {
+                       err = qdma_vf_dev_tx_queue_start(dev, qid);
+                       if (err != 0)
+                               return err;
+               }
+       }
+
+       for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
+               rxq = (struct qdma_rx_queue *)dev->data->rx_queues[qid];
+
+               /* Deferred Queues should not start with dev_start */
+               if (!rxq->rx_deferred_start) {
+                       err = qdma_vf_dev_rx_queue_start(dev, qid);
+                       if (err != 0)
+                               return err;
+               }
+       }
+       return 0;
+}
+
 static int qdma_vf_dev_link_update(struct rte_eth_dev *dev,
                                        __rte_unused int wait_to_complete)
 {
@@ -361,6 +394,24 @@ static int qdma_vf_dev_infos_get(__rte_unused struct 
rte_eth_dev *dev,
        return 0;
 }
 
+static int qdma_vf_dev_stop(struct rte_eth_dev *dev)
+{
+       uint32_t qid;
+#ifdef RTE_LIBRTE_QDMA_DEBUG_DRIVER
+       struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+
+       PMD_DRV_LOG(INFO, "VF-%d(DEVFN) Stop H2C & C2H queues",
+                       qdma_dev->func_id);
+#endif
+       /* reset driver's internal queue structures to default values */
+       for (qid = 0; qid < dev->data->nb_tx_queues; qid++)
+               qdma_vf_dev_tx_queue_stop(dev, qid);
+       for (qid = 0; qid < dev->data->nb_rx_queues; qid++)
+               qdma_vf_dev_rx_queue_stop(dev, qid);
+
+       return 0;
+}
+
 int qdma_vf_dev_close(struct rte_eth_dev *dev)
 {
        struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
@@ -371,6 +422,9 @@ int qdma_vf_dev_close(struct rte_eth_dev *dev)
 
        PMD_DRV_LOG(INFO, "Closing all queues\n");
 
+       if (dev->data->dev_started)
+               qdma_vf_dev_stop(dev);
+
        /* iterate over rx queues */
        for (qid = 0; qid < dev->data->nb_rx_queues; ++qid) {
                rxq = dev->data->rx_queues[qid];
@@ -729,6 +783,8 @@ int qdma_vf_dev_tx_queue_stop(struct rte_eth_dev *dev, 
uint16_t qid)
 static struct eth_dev_ops qdma_vf_eth_dev_ops = {
        .dev_configure        = qdma_vf_dev_configure,
        .dev_infos_get        = qdma_vf_dev_infos_get,
+       .dev_start            = qdma_vf_dev_start,
+       .dev_stop             = qdma_vf_dev_stop,
        .dev_close            = qdma_vf_dev_close,
        .dev_reset            = qdma_vf_dev_reset,
        .link_update          = qdma_vf_dev_link_update,
@@ -811,6 +867,8 @@ static int eth_qdma_vf_dev_init(struct rte_eth_dev *dev)
        dma_priv->h2c_bypass_mode = 0;
 
        dev->dev_ops = &qdma_vf_eth_dev_ops;
+       dev->rx_pkt_burst = &qdma_recv_pkts;
+       dev->tx_pkt_burst = &qdma_xmit_pkts;
 
        dma_priv->config_bar_idx = DEFAULT_VF_CONFIG_BAR;
        dma_priv->bypass_bar_idx = BAR_ID_INVALID;
@@ -913,6 +971,9 @@ static int eth_qdma_vf_dev_uninit(struct rte_eth_dev *dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return -EPERM;
 
+       if (qdma_dev->dev_configured)
+               qdma_vf_dev_close(dev);
+
        qdma_ethdev_offline(dev);
 
        if (qdma_dev->reset_state != RESET_STATE_RECV_PF_RESET_REQ)
-- 
2.36.1

Reply via email to