Add ethdev option for NFDK firmware, implement tx_queue setup and
release function for NFDK firmware.

Signed-off-by: Jin Liu <jin....@corigine.com>
Signed-off-by: Diana Wang <na.w...@corigine.com>
Signed-off-by: Peng Zhang <peng.zh...@corigine.com>
Signed-off-by: Chaoyong He <chaoyong...@corigine.com>
Signed-off-by: Niklas Söderlund <niklas.soderl...@corigine.com>
---
 drivers/net/nfp/nfp_ethdev.c    |  32 ++++++-
 drivers/net/nfp/nfp_ethdev_vf.c |  40 +++++++--
 drivers/net/nfp/nfp_rxtx.c      | 153 ++++++++++++++++++++++++++++++++
 drivers/net/nfp/nfp_rxtx.h      |   7 ++
 4 files changed, 226 insertions(+), 6 deletions(-)

diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index c09a035323..2bf7a565bc 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -358,11 +358,41 @@ static const struct eth_dev_ops nfp_net_nfd3_eth_dev_ops 
= {
        .rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
 };
 
+static const struct eth_dev_ops nfp_net_nfdk_eth_dev_ops = {
+       .dev_configure          = nfp_net_configure,
+       .dev_start              = nfp_net_start,
+       .dev_stop               = nfp_net_nfd3_stop,
+       .dev_set_link_up        = nfp_net_set_link_up,
+       .dev_set_link_down      = nfp_net_set_link_down,
+       .dev_close              = nfp_net_nfd3_close,
+       .promiscuous_enable     = nfp_net_promisc_enable,
+       .promiscuous_disable    = nfp_net_promisc_disable,
+       .link_update            = nfp_net_link_update,
+       .stats_get              = nfp_net_stats_get,
+       .stats_reset            = nfp_net_stats_reset,
+       .dev_infos_get          = nfp_net_infos_get,
+       .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
+       .mtu_set                = nfp_net_dev_mtu_set,
+       .mac_addr_set           = nfp_net_set_mac_addr,
+       .vlan_offload_set       = nfp_net_vlan_offload_set,
+       .reta_update            = nfp_net_reta_update,
+       .reta_query             = nfp_net_reta_query,
+       .rss_hash_update        = nfp_net_rss_hash_update,
+       .rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
+       .rx_queue_setup         = nfp_net_rx_queue_setup,
+       .rx_queue_release       = nfp_net_rx_queue_release,
+       .tx_queue_setup         = nfp_net_nfdk_tx_queue_setup,
+       .tx_queue_release       = nfp_net_nfdk_tx_queue_release,
+       .rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
+       .rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
+};
+
 static inline int
 nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw, struct rte_eth_dev *eth_dev)
 {
        switch (NFD_CFG_CLASS_VER_of(hw->ver)) {
        case NFP_NET_CFG_VERSION_DP_NFD3:
+               eth_dev->dev_ops = &nfp_net_nfd3_eth_dev_ops;
                break;
        case NFP_NET_CFG_VERSION_DP_NFDK:
                if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 5) {
@@ -370,13 +400,13 @@ nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw, struct 
rte_eth_dev *eth_dev)
                                NFD_CFG_MAJOR_VERSION_of(hw->ver));
                        return -EINVAL;
                }
+               eth_dev->dev_ops = &nfp_net_nfdk_eth_dev_ops;
                break;
        default:
                PMD_DRV_LOG(ERR, "The version of firmware is not correct.");
                return -EINVAL;
        }
 
-       eth_dev->dev_ops = &nfp_net_nfd3_eth_dev_ops;
        eth_dev->rx_queue_count = nfp_net_rx_queue_count;
        eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
        eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts;
diff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c
index e83c9dbcaf..ee9ff977cc 100644
--- a/drivers/net/nfp/nfp_ethdev_vf.c
+++ b/drivers/net/nfp/nfp_ethdev_vf.c
@@ -265,6 +265,35 @@ static const struct eth_dev_ops nfp_netvf_nfd3_eth_dev_ops 
= {
        .rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
 };
 
+static const struct eth_dev_ops nfp_netvf_nfdk_eth_dev_ops = {
+       .dev_configure          = nfp_net_configure,
+       .dev_start              = nfp_netvf_start,
+       .dev_stop               = nfp_netvf_nfd3_stop,
+       .dev_set_link_up        = nfp_netvf_set_link_up,
+       .dev_set_link_down      = nfp_netvf_set_link_down,
+       .dev_close              = nfp_netvf_nfd3_close,
+       .promiscuous_enable     = nfp_net_promisc_enable,
+       .promiscuous_disable    = nfp_net_promisc_disable,
+       .link_update            = nfp_net_link_update,
+       .stats_get              = nfp_net_stats_get,
+       .stats_reset            = nfp_net_stats_reset,
+       .dev_infos_get          = nfp_net_infos_get,
+       .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
+       .mtu_set                = nfp_net_dev_mtu_set,
+       .mac_addr_set           = nfp_net_set_mac_addr,
+       .vlan_offload_set       = nfp_net_vlan_offload_set,
+       .reta_update            = nfp_net_reta_update,
+       .reta_query             = nfp_net_reta_query,
+       .rss_hash_update        = nfp_net_rss_hash_update,
+       .rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
+       .rx_queue_setup         = nfp_net_rx_queue_setup,
+       .rx_queue_release       = nfp_net_rx_queue_release,
+       .tx_queue_setup         = nfp_net_nfdk_tx_queue_setup,
+       .tx_queue_release       = nfp_net_nfdk_tx_queue_release,
+       .rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
+       .rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
+};
+
 static int
 nfp_netvf_init(struct rte_eth_dev *eth_dev)
 {
@@ -291,11 +320,6 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
 
-       eth_dev->dev_ops = &nfp_netvf_nfd3_eth_dev_ops;
-       eth_dev->rx_queue_count = nfp_net_rx_queue_count;
-       eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
-       eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts;
-
        hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
        if (hw->ctrl_bar == NULL) {
                PMD_DRV_LOG(ERR,
@@ -309,6 +333,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
 
        switch (NFD_CFG_CLASS_VER_of(hw->ver)) {
        case NFP_NET_CFG_VERSION_DP_NFD3:
+               eth_dev->dev_ops = &nfp_netvf_nfd3_eth_dev_ops;
                break;
        case NFP_NET_CFG_VERSION_DP_NFDK:
                if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 5) {
@@ -316,12 +341,17 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
                                NFD_CFG_MAJOR_VERSION_of(hw->ver));
                        return -EINVAL;
                }
+               eth_dev->dev_ops = &nfp_netvf_nfdk_eth_dev_ops;
                break;
        default:
                PMD_DRV_LOG(ERR, "The version of firmware is not correct.");
                return -EINVAL;
        }
 
+       eth_dev->rx_queue_count = nfp_net_rx_queue_count;
+       eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
+       eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts;
+
        /* For secondary processes, the primary has done all the work */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
diff --git a/drivers/net/nfp/nfp_rxtx.c b/drivers/net/nfp/nfp_rxtx.c
index 2c9875e829..bf5817db4b 100644
--- a/drivers/net/nfp/nfp_rxtx.c
+++ b/drivers/net/nfp/nfp_rxtx.c
@@ -987,3 +987,156 @@ nfp_net_nfd3_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pk
 
        return i;
 }
+
+static void
+nfp_net_nfdk_tx_queue_release_mbufs(struct nfp_net_txq *txq)
+{
+       uint32_t i;
+
+       if (txq->ktxbufs == NULL)
+               return;
+
+       for (i = 0; i < txq->tx_count; i++) {
+               if (txq->ktxbufs[i].mbuf) {
+                       rte_pktmbuf_free_seg(txq->ktxbufs[i].mbuf);
+                       txq->ktxbufs[i].mbuf = NULL;
+               }
+       }
+}
+
+void
+nfp_net_nfdk_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+       struct nfp_net_txq *txq = dev->data->tx_queues[queue_idx];
+
+       if (txq) {
+               nfp_net_nfdk_tx_queue_release_mbufs(txq);
+               rte_free(txq->ktxbufs);
+               rte_free(txq);
+       }
+}
+
+void
+nfp_net_nfdk_reset_tx_queue(struct nfp_net_txq *txq)
+{
+       nfp_net_nfdk_tx_queue_release_mbufs(txq);
+       txq->wr_p = 0;
+       txq->rd_p = 0;
+}
+
+int
+nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,
+               uint16_t queue_idx,
+               uint16_t nb_desc,
+               unsigned int socket_id,
+               const struct rte_eth_txconf *tx_conf)
+{
+       const struct rte_memzone *tz;
+       struct nfp_net_txq *txq;
+       uint16_t tx_free_thresh;
+       struct nfp_net_hw *hw;
+       uint32_t tx_desc_sz;
+
+       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* Validating number of descriptors */
+       tx_desc_sz = nb_desc * sizeof(struct nfp_net_nfdk_tx_desc);
+       if (((NFDK_TX_DESC_PER_SIMPLE_PKT * tx_desc_sz) % NFP_ALIGN_RING_DESC) 
!= 0 ||
+           ((NFDK_TX_DESC_PER_SIMPLE_PKT * nb_desc) % NFDK_TX_DESC_BLOCK_CNT) 
!= 0 ||
+             nb_desc > NFP_NET_MAX_TX_DESC || nb_desc < NFP_NET_MIN_TX_DESC) {
+               PMD_DRV_LOG(ERR, "Wrong nb_desc value");
+               return -EINVAL;
+       }
+
+       tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+                               tx_conf->tx_free_thresh :
+                               DEFAULT_TX_FREE_THRESH);
+
+       if (tx_free_thresh > (nb_desc)) {
+               PMD_DRV_LOG(ERR,
+                       "tx_free_thresh must be less than the number of TX "
+                       "descriptors. (tx_free_thresh=%u port=%d "
+                       "queue=%d)", (unsigned int)tx_free_thresh,
+                       dev->data->port_id, (int)queue_idx);
+               return -(EINVAL);
+       }
+
+       /*
+        * Free memory prior to re-allocation if needed. This is the case after
+        * calling nfp_net_stop
+        */
+       if (dev->data->tx_queues[queue_idx]) {
+               PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
+                               queue_idx);
+               nfp_net_nfdk_tx_queue_release(dev, queue_idx);
+               dev->data->tx_queues[queue_idx] = NULL;
+       }
+
+       /* Allocating tx queue data structure */
+       txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
+                       RTE_CACHE_LINE_SIZE, socket_id);
+       if (txq == NULL) {
+               PMD_DRV_LOG(ERR, "Error allocating tx dma");
+               return -ENOMEM;
+       }
+
+       /*
+        * Allocate TX ring hardware descriptors. A memzone large enough to
+        * handle the maximum ring size is allocated in order to allow for
+        * resizing in later calls to the queue setup function.
+        */
+       tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+                               sizeof(struct nfp_net_nfdk_tx_desc) *
+                               NFDK_TX_DESC_PER_SIMPLE_PKT *
+                               NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN,
+                               socket_id);
+       if (tz == NULL) {
+               PMD_DRV_LOG(ERR, "Error allocating tx dma");
+               nfp_net_nfdk_tx_queue_release(dev, queue_idx);
+               return -ENOMEM;
+       }
+
+       txq->tx_count = nb_desc * NFDK_TX_DESC_PER_SIMPLE_PKT;
+       txq->tx_free_thresh = tx_free_thresh;
+       txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
+       txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
+       txq->tx_wthresh = tx_conf->tx_thresh.wthresh;
+
+       /* queue mapping based on firmware configuration */
+       txq->qidx = queue_idx;
+       txq->tx_qcidx = queue_idx * hw->stride_tx;
+       txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
+
+       txq->port_id = dev->data->port_id;
+
+       /* Saving physical and virtual addresses for the TX ring */
+       txq->dma = (uint64_t)tz->iova;
+       txq->ktxds = (struct nfp_net_nfdk_tx_desc *)tz->addr;
+
+       /* mbuf pointers array for referencing mbufs linked to TX descriptors */
+       txq->ktxbufs = rte_zmalloc_socket("txq->ktxbufs",
+                               sizeof(*txq->ktxbufs) * txq->tx_count,
+                               RTE_CACHE_LINE_SIZE, socket_id);
+
+       if (txq->ktxbufs == NULL) {
+               nfp_net_nfdk_tx_queue_release(dev, queue_idx);
+               return -ENOMEM;
+       }
+       PMD_TX_LOG(DEBUG, "ktxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
+               txq->ktxbufs, txq->ktxds, (unsigned long)txq->dma);
+
+       nfp_net_nfdk_reset_tx_queue(txq);
+
+       dev->data->tx_queues[queue_idx] = txq;
+       txq->hw = hw;
+       /*
+        * Telling the HW about the physical address of the TX ring and number
+        * of descriptors in log2 format
+        */
+       nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
+       nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), 
rte_log2_u32(txq->tx_count));
+
+       return 0;
+}
diff --git a/drivers/net/nfp/nfp_rxtx.h b/drivers/net/nfp/nfp_rxtx.h
index 81e2f7560b..cce3e0c0e5 100644
--- a/drivers/net/nfp/nfp_rxtx.h
+++ b/drivers/net/nfp/nfp_rxtx.h
@@ -352,6 +352,13 @@ int nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
                                  const struct rte_eth_txconf *tx_conf);
 uint16_t nfp_net_nfd3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                                  uint16_t nb_pkts);
+int nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,
+               uint16_t queue_idx,
+               uint16_t nb_desc,
+               unsigned int socket_id,
+               const struct rte_eth_txconf *tx_conf);
+void nfp_net_nfdk_tx_queue_release(struct rte_eth_dev *dev, uint16_t 
queue_idx);
+void nfp_net_nfdk_reset_tx_queue(struct nfp_net_txq *txq);
 
 #endif /* _NFP_RXTX_H_ */
 /*
-- 
2.27.0

Reply via email to