add routines for below ethdev ops:
 1. dev_configure
 2. dev_reset
 3. dev_close

Signed-off-by: Aman Kumar <aman.ku...@vvdntech.in>
---
 drivers/net/qdma/qdma.h        |  11 +-
 drivers/net/qdma/qdma_devops.c | 292 ++++++++++++++++++++++++++++++++-
 drivers/net/qdma/qdma_devops.h |  42 +++++
 drivers/net/qdma/qdma_ethdev.c |   4 +-
 4 files changed, 339 insertions(+), 10 deletions(-)

diff --git a/drivers/net/qdma/qdma.h b/drivers/net/qdma/qdma.h
index f4155380f9..7314af71d7 100644
--- a/drivers/net/qdma/qdma.h
+++ b/drivers/net/qdma/qdma.h
@@ -28,9 +28,16 @@
 
 #define DEFAULT_QUEUE_BASE     (0)
 
+#define QDMA_MAX_BURST_SIZE (128)
+#define QDMA_MIN_RXBUFF_SIZE   (256)
+
 #define DEFAULT_TIMER_CNT_TRIG_MODE_TIMER      (5)
 #define DEFAULT_TIMER_CNT_TRIG_MODE_COUNT_TIMER        (30)
 
+#define WB_TIMEOUT             (100000)
+#define RESET_TIMEOUT          (60000)
+#define SHUTDOWN_TIMEOUT       (60000)
+
 /* Completion Context config */
 #define CMPT_DEFAULT_COLOR_BIT           (1)
 #define CMPT_CNTXT_DESC_SIZE_8B          (0)
@@ -121,8 +128,6 @@ struct qdma_rx_queue {
        uint32_t                queue_id; /* RX queue index. */
        uint64_t                mbuf_initializer; /* value to init mbufs */
 
-       struct qdma_pkt_stats   stats;
-
        uint16_t                port_id; /* Device port identifier. */
        uint8_t                 status:1;
        uint8_t                 err:1;
@@ -167,6 +172,7 @@ struct qdma_tx_queue {
        uint8_t                         tx_deferred_start:1;
        uint8_t                         en_bypass:1;
        uint8_t                         status:1;
+       enum rte_pmd_qdma_bypass_desc_len               bypass_desc_sz:7;
        uint16_t                        port_id; /* Device port identifier. */
        uint8_t                         func_id; /* RX queue index. */
        int8_t                          ringszidx;
@@ -238,6 +244,7 @@ struct qdma_pci_dev {
        struct queue_info *q_info;
        uint8_t init_q_range;
 
+       void    **cmpt_queues;
        /* Pointer to QDMA access layer function pointers */
        struct qdma_hw_access *hw_access;
 
diff --git a/drivers/net/qdma/qdma_devops.c b/drivers/net/qdma/qdma_devops.c
index cf3ef6de34..2dd76e82c3 100644
--- a/drivers/net/qdma/qdma_devops.c
+++ b/drivers/net/qdma/qdma_devops.c
@@ -26,6 +26,25 @@
 #include "qdma_platform.h"
 #include "qdma_devops.h"
 
+static int qdma_pf_fmap_prog(struct rte_eth_dev *dev)
+{
+       struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+       struct qdma_fmap_cfg fmap_cfg;
+       int ret = 0;
+
+       memset(&fmap_cfg, 0, sizeof(struct qdma_fmap_cfg));
+
+       /* FMAP configuration */
+       fmap_cfg.qbase = qdma_dev->queue_base;
+       fmap_cfg.qmax = qdma_dev->qsets_en;
+       ret = qdma_dev->hw_access->qdma_fmap_conf(dev,
+                       qdma_dev->func_id, &fmap_cfg, QDMA_HW_ACCESS_WRITE);
+       if (ret < 0)
+               return qdma_dev->hw_access->qdma_get_error_code(ret);
+
+       return ret;
+}
+
 /**
  * DPDK callback to configure a RX queue.
  *
@@ -159,7 +178,7 @@ int qdma_dev_infos_get(struct rte_eth_dev *dev,
        dev_info->max_rx_queues = qdma_dev->dev_cap.num_qs;
        dev_info->max_tx_queues = qdma_dev->dev_cap.num_qs;
 
-       dev_info->min_rx_bufsize = 256;
+       dev_info->min_rx_bufsize = QDMA_MIN_RXBUFF_SIZE;
        dev_info->max_rx_pktlen = DMA_BRAM_SIZE;
        dev_info->max_mac_addrs = 1;
 
@@ -192,7 +211,110 @@ int qdma_dev_stop(struct rte_eth_dev *dev)
  */
 int qdma_dev_close(struct rte_eth_dev *dev)
 {
-       (void)dev;
+       struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+       struct qdma_tx_queue *txq;
+       struct qdma_rx_queue *rxq;
+       struct qdma_cmpt_queue *cmptq;
+       uint32_t qid;
+       struct qdma_fmap_cfg fmap_cfg;
+       int ret = 0;
+
+       PMD_DRV_LOG(INFO, "PF-%d(DEVFN) DEV Close\n", qdma_dev->func_id);
+
+       if (dev->data->dev_started)
+               qdma_dev_stop(dev);
+
+       memset(&fmap_cfg, 0, sizeof(struct qdma_fmap_cfg));
+       qdma_dev->hw_access->qdma_fmap_conf(dev,
+               qdma_dev->func_id, &fmap_cfg, QDMA_HW_ACCESS_CLEAR);
+
+       /* iterate over rx queues */
+       for (qid = 0; qid < dev->data->nb_rx_queues; ++qid) {
+               rxq = dev->data->rx_queues[qid];
+               if (rxq) {
+                       PMD_DRV_LOG(INFO, "Remove C2H queue: %d", qid);
+
+                       if (rxq->sw_ring)
+                               rte_free(rxq->sw_ring);
+                       if (rxq->st_mode) { /* if ST-mode */
+                               if (rxq->rx_cmpt_mz)
+                                       rte_memzone_free(rxq->rx_cmpt_mz);
+                       }
+
+                       
qdma_dev_decrement_active_queue(qdma_dev->dma_device_index,
+                                       qdma_dev->func_id,
+                                       QDMA_DEV_Q_TYPE_C2H);
+
+                       if (rxq->st_mode)
+                               
qdma_dev_decrement_active_queue(qdma_dev->dma_device_index,
+                                       qdma_dev->func_id,
+                                       QDMA_DEV_Q_TYPE_CMPT);
+
+                       if (rxq->rx_mz)
+                               rte_memzone_free(rxq->rx_mz);
+                       rte_free(rxq);
+                       PMD_DRV_LOG(INFO, "C2H queue %d removed", qid);
+               }
+       }
+
+       /* iterate over tx queues */
+       for (qid = 0; qid < dev->data->nb_tx_queues; ++qid) {
+               txq = dev->data->tx_queues[qid];
+               if (txq) {
+                       PMD_DRV_LOG(INFO, "Remove H2C queue: %d", qid);
+
+                       if (txq->sw_ring)
+                               rte_free(txq->sw_ring);
+                       if (txq->tx_mz)
+                               rte_memzone_free(txq->tx_mz);
+                       rte_free(txq);
+                       PMD_DRV_LOG(INFO, "H2C queue %d removed", qid);
+
+                       
qdma_dev_decrement_active_queue(qdma_dev->dma_device_index,
+                                       qdma_dev->func_id,
+                                       QDMA_DEV_Q_TYPE_H2C);
+               }
+       }
+       if (qdma_dev->dev_cap.mm_cmpt_en) {
+               /* iterate over cmpt queues */
+               for (qid = 0; qid < qdma_dev->qsets_en; ++qid) {
+                       cmptq = qdma_dev->cmpt_queues[qid];
+                       if (cmptq != NULL) {
+                               PMD_DRV_LOG(INFO, "PF-%d(DEVFN) Remove CMPT 
queue: %d",
+                                               qdma_dev->func_id, qid);
+                               if (cmptq->cmpt_mz)
+                                       rte_memzone_free(cmptq->cmpt_mz);
+                               rte_free(cmptq);
+                               PMD_DRV_LOG(INFO, "PF-%d(DEVFN) CMPT queue %d 
removed",
+                                               qdma_dev->func_id, qid);
+                               
qdma_dev_decrement_active_queue(qdma_dev->dma_device_index,
+                                       qdma_dev->func_id,
+                                       QDMA_DEV_Q_TYPE_CMPT);
+                       }
+               }
+
+               if (qdma_dev->cmpt_queues != NULL) {
+                       rte_free(qdma_dev->cmpt_queues);
+                       qdma_dev->cmpt_queues = NULL;
+               }
+       }
+       qdma_dev->qsets_en = 0;
+       ret = qdma_dev_update(qdma_dev->dma_device_index, qdma_dev->func_id,
+                       qdma_dev->qsets_en, (int *)&qdma_dev->queue_base);
+       if (ret != QDMA_SUCCESS) {
+               PMD_DRV_LOG(ERR, "PF-%d(DEVFN) qmax update failed: %d\n",
+                       qdma_dev->func_id, ret);
+               return 0;
+       }
+
+       qdma_dev->init_q_range = 0;
+       rte_free(qdma_dev->q_info);
+       qdma_dev->q_info = NULL;
+       qdma_dev->dev_configured = 0;
+
+       /* cancel pending polls */
+       if (qdma_dev->is_master)
+               rte_eal_alarm_cancel(qdma_check_errors, (void *)dev);
 
        return 0;
 }
@@ -212,9 +334,61 @@ int qdma_dev_close(struct rte_eth_dev *dev)
  */
 int qdma_dev_reset(struct rte_eth_dev *dev)
 {
-       (void)dev;
-
-       return 0;
+       struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+       uint32_t vf_device_count = 0;
+       uint32_t i = 0;
+       int ret = 0;
+
+       /* Get the number of active VFs for this PF device */
+       vf_device_count = qdma_dev->vf_online_count;
+       qdma_dev->reset_in_progress = 1;
+
+       /* Uninitialze PCI device */
+       ret = qdma_eth_dev_uninit(dev);
+       if (ret != QDMA_SUCCESS) {
+               PMD_DRV_LOG(ERR, "PF-%d(DEVFN) uninitialization failed: %d\n",
+                       qdma_dev->func_id, ret);
+               return -1;
+       }
+
+       /* Initialize PCI device */
+       ret = qdma_eth_dev_init(dev);
+       if (ret != QDMA_SUCCESS) {
+               PMD_DRV_LOG(ERR, "PF-%d(DEVFN) initialization failed: %d\n",
+                       qdma_dev->func_id, ret);
+               return -1;
+       }
+
+       /* Send "PF_RESET_DONE" mailbox message from PF to all its VFs,
+        * so that VFs can come online again
+        */
+       for (i = 0; i < pci_dev->max_vfs; i++) {
+               if (qdma_dev->vfinfo[i].func_id == QDMA_FUNC_ID_INVALID)
+                       continue;
+       }
+
+       /* Start waiting for a maximum of 60 secs to get all its VFs
+        * to come online that were active before PF reset
+        */
+       i = 0;
+       while (i < RESET_TIMEOUT) {
+               if (qdma_dev->vf_online_count == vf_device_count) {
+                       PMD_DRV_LOG(INFO,
+                               "%s: Reset completed for PF-%d(DEVFN)\n",
+                               __func__, qdma_dev->func_id);
+                       break;
+               }
+               rte_delay_ms(1);
+               i++;
+       }
+
+       if (i >= RESET_TIMEOUT) {
+               PMD_DRV_LOG(ERR, "%s: Failed reset for PF-%d(DEVFN)\n",
+                       __func__, qdma_dev->func_id);
+       }
+
+       return ret;
 }
 
 /**
@@ -228,7 +402,113 @@ int qdma_dev_reset(struct rte_eth_dev *dev)
  */
 int qdma_dev_configure(struct rte_eth_dev *dev)
 {
-       (void)dev;
+       struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+       uint16_t qid = 0;
+       int ret = 0, queue_base = -1;
+       uint8_t stat_id;
+
+       PMD_DRV_LOG(INFO, "Configure the qdma engines\n");
+
+       qdma_dev->qsets_en = RTE_MAX(dev->data->nb_rx_queues,
+                                       dev->data->nb_tx_queues);
+       if (qdma_dev->qsets_en > qdma_dev->dev_cap.num_qs) {
+               PMD_DRV_LOG(ERR, "PF-%d(DEVFN) Error: Number of Queues to be"
+                               " configured are greater than the queues"
+                               " supported by the hardware\n",
+                               qdma_dev->func_id);
+               qdma_dev->qsets_en = 0;
+               return -1;
+       }
+
+       /* Request queue base from the resource manager */
+       ret = qdma_dev_update(qdma_dev->dma_device_index, qdma_dev->func_id,
+                       qdma_dev->qsets_en, &queue_base);
+       if (ret != QDMA_SUCCESS) {
+               PMD_DRV_LOG(ERR, "PF-%d(DEVFN) queue allocation failed: %d\n",
+                       qdma_dev->func_id, ret);
+               return -1;
+       }
+
+       ret = qdma_dev_qinfo_get(qdma_dev->dma_device_index, qdma_dev->func_id,
+                               (int *)&qdma_dev->queue_base,
+                               &qdma_dev->qsets_en);
+       if (ret != QDMA_SUCCESS) {
+               PMD_DRV_LOG(ERR, "%s: Error %d querying qbase\n",
+                               __func__, ret);
+               return -1;
+       }
+       PMD_DRV_LOG(INFO, "Bus: 0x%x, PF-%d(DEVFN) queue_base: %d\n",
+               qdma_dev->dma_device_index,
+               qdma_dev->func_id,
+               qdma_dev->queue_base);
+
+       qdma_dev->q_info = rte_zmalloc("qinfo", sizeof(struct queue_info) *
+                                       (qdma_dev->qsets_en), 0);
+       if (!qdma_dev->q_info) {
+               PMD_DRV_LOG(ERR, "PF-%d(DEVFN) Cannot allocate "
+                               "memory for queue info\n", qdma_dev->func_id);
+               return (-ENOMEM);
+       }
+
+       /* Reserve memory for cmptq ring pointers
+        * Max completion queues can be maximum of rx and tx queues.
+        */
+       qdma_dev->cmpt_queues = rte_zmalloc("cmpt_queues",
+                                           sizeof(qdma_dev->cmpt_queues[0]) *
+                                               qdma_dev->qsets_en,
+                                               RTE_CACHE_LINE_SIZE);
+       if (qdma_dev->cmpt_queues == NULL) {
+               PMD_DRV_LOG(ERR, "PF-%d(DEVFN) cmpt ring pointers memory "
+                               "allocation failed:\n", qdma_dev->func_id);
+               rte_free(qdma_dev->q_info);
+               qdma_dev->q_info = NULL;
+               return -(ENOMEM);
+       }
+
+       for (qid = 0 ; qid < qdma_dev->qsets_en; qid++) {
+               /* Initialize queue_modes to all 1's ( i.e. Streaming) */
+               qdma_dev->q_info[qid].queue_mode = RTE_PMD_QDMA_STREAMING_MODE;
+
+               /* Disable the cmpt over flow check by default */
+               qdma_dev->q_info[qid].dis_cmpt_ovf_chk = 0;
+
+               qdma_dev->q_info[qid].trigger_mode = qdma_dev->trigger_mode;
+               qdma_dev->q_info[qid].timer_count =
+                                       qdma_dev->timer_count;
+       }
+
+       for (qid = 0 ; qid < dev->data->nb_rx_queues; qid++) {
+               qdma_dev->q_info[qid].cmpt_desc_sz = qdma_dev->cmpt_desc_len;
+               qdma_dev->q_info[qid].rx_bypass_mode =
+                                               qdma_dev->c2h_bypass_mode;
+               qdma_dev->q_info[qid].en_prefetch = qdma_dev->en_desc_prefetch;
+               qdma_dev->q_info[qid].immediate_data_state = 0;
+       }
+
+       for (qid = 0 ; qid < dev->data->nb_tx_queues; qid++)
+               qdma_dev->q_info[qid].tx_bypass_mode =
+                                               qdma_dev->h2c_bypass_mode;
+       for (stat_id = 0, qid = 0;
+               stat_id < RTE_ETHDEV_QUEUE_STAT_CNTRS;
+               stat_id++, qid++) {
+               /* Initialize map with qid same as stat_id */
+               qdma_dev->tx_qid_statid_map[stat_id] =
+                       (qid < dev->data->nb_tx_queues) ? qid : -1;
+               qdma_dev->rx_qid_statid_map[stat_id] =
+                       (qid < dev->data->nb_rx_queues) ? qid : -1;
+       }
+
+       ret = qdma_pf_fmap_prog(dev);
+       if (ret < 0) {
+               PMD_DRV_LOG(ERR, "FMAP programming failed\n");
+               rte_free(qdma_dev->q_info);
+               qdma_dev->q_info = NULL;
+               rte_free(qdma_dev->cmpt_queues);
+               qdma_dev->cmpt_queues = NULL;
+               return ret;
+       }
+
+       qdma_dev->dev_configured = 1;
 
        return 0;
 }
diff --git a/drivers/net/qdma/qdma_devops.h b/drivers/net/qdma/qdma_devops.h
index 240fa6b60c..c0f903f1cf 100644
--- a/drivers/net/qdma/qdma_devops.h
+++ b/drivers/net/qdma/qdma_devops.h
@@ -13,6 +13,29 @@ extern "C" {
 /** @defgroup dpdk_devops_func Functions
  */
 
+/**
+ * DPDK callback to register an Ethernet PCIe device.
+ *
+ * The Following actions are performed by this function:
+ *  - Parse and validate device arguments
+ *  - Identify PCIe BARs present in the device
+ *  - Register device operations
+ *  - Enable MM C2H and H2C channels
+ *  - Register PCIe device with Queue Resource Manager
+ *  - Program the QDMA IP global registers (by 1st PF that was probed)
+ *  - Enable HW errors and launch QDMA HW error monitoring thread
+ *    (by 1st PF that was probed)
+ *  - If VF is enabled, then enable Mailbox interrupt and register
+ *    Rx message handling function as interrupt handler
+ *
+ * @param dev Pointer to Ethernet device structure
+ *
+ * @return 0 on success, < 0 on failure
+ * @ingroup dpdk_devops_func
+ *
+ */
+int qdma_eth_dev_init(struct rte_eth_dev *dev);
+
 /**
  * DPDK callback for Ethernet device configuration.
  *
@@ -480,6 +503,25 @@ int qdma_vf_dev_close(struct rte_eth_dev *dev);
  */
 int qdma_dev_reset(struct rte_eth_dev *dev);
 
+/**
+ * DPDK callback to deregister a PCI device.
+ *
+ * The Following actions are performed by this function:
+ *  - Flushes out pending actions from the Tx Mailbox List
+ *  - Terminate Tx Mailbox thread
+ *  - Disable Mailbox interrupt and unregister interrupt handler
+ *  - Unregister PCIe device from Queue Resource Manager
+ *  - Cancel QDMA HW error monitoring thread if created by this device
+ *  - Disable MM C2H and H2C channels
+ *
+ * @param dev Pointer to Ethernet device structure
+ *
+ * @return 0 on success, < 0 on failure
+ * @ingroup dpdk_devops_func
+ *
+ */
+int qdma_eth_dev_uninit(struct rte_eth_dev *dev);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/drivers/net/qdma/qdma_ethdev.c b/drivers/net/qdma/qdma_ethdev.c
index 79aac4aa60..cc1e8eee71 100644
--- a/drivers/net/qdma/qdma_ethdev.c
+++ b/drivers/net/qdma/qdma_ethdev.c
@@ -333,7 +333,7 @@ static int get_max_pci_bus_num(uint8_t start_bus, uint8_t 
*end_bus)
  * @return
  *   0 on success, negative errno value on failure.
  */
-static int qdma_eth_dev_init(struct rte_eth_dev *dev)
+int qdma_eth_dev_init(struct rte_eth_dev *dev)
 {
        struct qdma_pci_dev *dma_priv;
        uint8_t *baseaddr;
@@ -578,7 +578,7 @@ static int qdma_eth_dev_init(struct rte_eth_dev *dev)
  * @return
  *   0 on success, negative errno value on failure.
  */
-static int qdma_eth_dev_uninit(struct rte_eth_dev *dev)
+int qdma_eth_dev_uninit(struct rte_eth_dev *dev)
 {
        struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
 
-- 
2.36.1

Reply via email to