Adds the setup/start logic for the ctrl vNIC. This vNIC is used by
the PMD and flower firmware application as a communication channel
between driver and firmware. In the case of OVS it is also used to
communicate flow statistics from hardware to the driver.

A rte_eth device is not exposed to DPDK for this vNIC as it is strictly
used internally by flower logic.

Because of the add of ctrl vNIC, a new PCItoCPPBar is needed. Modify the
related logics.

Signed-off-by: Chaoyong He <chaoyong...@corigine.com>
Reviewed-by: Niklas Söderlund <niklas.soderl...@corigine.com>
---
 drivers/net/nfp/flower/nfp_flower.c        | 220 +++++++++++++++++++++++++++++
 drivers/net/nfp/flower/nfp_flower.h        |   6 +
 drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c |  31 ++--
 3 files changed, 245 insertions(+), 12 deletions(-)

diff --git a/drivers/net/nfp/flower/nfp_flower.c 
b/drivers/net/nfp/flower/nfp_flower.c
index 24aa288..18ffa5c 100644
--- a/drivers/net/nfp/flower/nfp_flower.c
+++ b/drivers/net/nfp/flower/nfp_flower.c
@@ -27,6 +27,7 @@
 #define DEFAULT_FLBUF_SIZE 9216
 
 #define PF_VNIC_NB_DESC 1024
+#define CTRL_VNIC_NB_DESC 512
 
 static const struct rte_eth_rxconf rx_conf = {
        .rx_free_thresh = DEFAULT_RX_FREE_THRESH,
@@ -206,6 +207,11 @@
        .dev_close              = nfp_flower_pf_close,
 };
 
+static const struct eth_dev_ops nfp_flower_ctrl_vnic_ops = {
+       .dev_infos_get          = nfp_net_infos_get,
+       .dev_configure          = nfp_net_configure,
+};
+
 struct dp_packet {
        struct rte_mbuf mbuf;
        uint32_t source;
@@ -496,12 +502,192 @@ struct dp_packet {
        return 0;
 }
 
+static int
+nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
+{
+       uint32_t i;
+       int ret = 0;
+       uint16_t n_txq;
+       uint16_t n_rxq;
+       unsigned int numa_node;
+       struct rte_mempool *mp;
+       struct nfp_pf_dev *pf_dev;
+       struct rte_eth_dev *eth_dev;
+       struct nfp_app_fw_flower *app_fw_flower;
+
+       /* Set up some pointers here for ease of use */
+       pf_dev = hw->pf_dev;
+       app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(pf_dev->app_fw_priv);
+
+       ret = nfp_flower_init_vnic_common(hw, "ctrl_vnic");
+       if (ret != 0) {
+               PMD_INIT_LOG(ERR, "Could not init pf vnic");
+               return -EINVAL;
+       }
+
+       /* Allocate memory for the eth_dev of the vNIC */
+       hw->eth_dev = rte_eth_dev_allocate("nfp_ctrl_vnic");
+       if (hw->eth_dev == NULL) {
+               PMD_INIT_LOG(ERR, "Could not allocate ctrl vnic");
+               return -ENOMEM;
+       }
+
+       /* Grab the pointer to the newly created rte_eth_dev here */
+       eth_dev = hw->eth_dev;
+
+       numa_node = rte_socket_id();
+
+       /* Create a mbuf pool for the ctrl vNIC */
+       app_fw_flower->ctrl_pktmbuf_pool = 
rte_pktmbuf_pool_create("ctrl_mbuf_pool",
+                       4 * CTRL_VNIC_NB_DESC, 64, 0, 9216, numa_node);
+       if (app_fw_flower->ctrl_pktmbuf_pool == NULL) {
+               PMD_INIT_LOG(ERR, "create mbuf pool for ctrl vnic failed");
+               ret = -ENOMEM;
+               goto port_release;
+       }
+
+       mp = app_fw_flower->ctrl_pktmbuf_pool;
+
+       eth_dev->dev_ops = &nfp_flower_ctrl_vnic_ops;
+       rte_eth_dev_probing_finish(eth_dev);
+
+       /* Configure the ctrl vNIC device */
+       n_rxq = hw->max_rx_queues;
+       n_txq = hw->max_tx_queues;
+       eth_dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
+               sizeof(eth_dev->data->rx_queues[0]) * n_rxq,
+               RTE_CACHE_LINE_SIZE);
+       if (eth_dev->data->rx_queues == NULL) {
+               PMD_INIT_LOG(ERR, "rte_zmalloc failed for ctrl vNIC rx queues");
+               ret = -ENOMEM;
+               goto mempool_cleanup;
+       }
+
+       eth_dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
+               sizeof(eth_dev->data->tx_queues[0]) * n_txq,
+               RTE_CACHE_LINE_SIZE);
+       if (eth_dev->data->tx_queues == NULL) {
+               PMD_INIT_LOG(ERR, "rte_zmalloc failed for ctrl vNIC tx queues");
+               ret = -ENOMEM;
+               goto rx_queue_free;
+       }
+
+       /* Fill in some of the eth_dev fields */
+       eth_dev->device = &pf_dev->pci_dev->device;
+       eth_dev->data->nb_tx_queues = n_rxq;
+       eth_dev->data->nb_rx_queues = n_txq;
+       eth_dev->data->dev_private = hw;
+
+       /* Set up the Rx queues */
+       for (i = 0; i < n_rxq; i++) {
+               ret = nfp_net_rx_queue_setup(eth_dev, i, CTRL_VNIC_NB_DESC, 
numa_node,
+                               &rx_conf, mp);
+               if (ret != 0) {
+                       PMD_INIT_LOG(ERR, "Configure ctrl vNIC Rx queue %d 
failed", i);
+                       goto rx_queue_cleanup;
+               }
+       }
+
+       /* Set up the Tx queues */
+       for (i = 0; i < n_txq; i++) {
+               ret = nfp_net_nfd3_tx_queue_setup(eth_dev, i, 
CTRL_VNIC_NB_DESC, numa_node,
+                               &tx_conf);
+               if (ret != 0) {
+                       PMD_INIT_LOG(ERR, "Configure ctrl vNIC Tx queue %d 
failed", i);
+                       goto tx_queue_cleanup;
+               }
+       }
+
+       return 0;
+
+tx_queue_cleanup:
+       for (i = 0; i < n_txq; i++)
+               nfp_net_tx_queue_release(eth_dev, i);
+rx_queue_cleanup:
+       for (i = 0; i < n_rxq; i++)
+               nfp_net_rx_queue_release(eth_dev, i);
+       rte_free(eth_dev->data->tx_queues);
+rx_queue_free:
+       rte_free(eth_dev->data->rx_queues);
+mempool_cleanup:
+       rte_mempool_free(mp);
+port_release:
+       rte_eth_dev_release_port(hw->eth_dev);
+
+       return ret;
+}
+
+static void
+nfp_flower_cleanup_ctrl_vnic(struct nfp_net_hw *hw)
+{
+       uint32_t i;
+       struct nfp_app_fw_flower *app_fw_flower;
+
+       app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(hw->pf_dev->app_fw_priv);
+
+       for (i = 0; i < hw->max_tx_queues; i++)
+               nfp_net_tx_queue_release(hw->eth_dev, i);
+
+       for (i = 0; i < hw->max_rx_queues; i++)
+               nfp_net_rx_queue_release(hw->eth_dev, i);
+
+       rte_free(hw->eth_dev->data->tx_queues);
+       rte_free(hw->eth_dev->data->rx_queues);
+       rte_mempool_free(app_fw_flower->ctrl_pktmbuf_pool);
+       rte_eth_dev_release_port(hw->eth_dev);
+}
+
+static int
+nfp_flower_start_ctrl_vnic(struct nfp_net_hw *hw)
+{
+       int ret;
+       uint32_t update;
+       uint32_t new_ctrl;
+       struct rte_eth_dev *dev;
+
+       dev = hw->eth_dev;
+
+       /* Disabling queues just in case... */
+       nfp_net_disable_queues(dev);
+
+       /* Enabling the required queues in the device */
+       nfp_net_enable_queues(dev);
+
+       /* Writing configuration parameters in the device */
+       nfp_net_params_setup(hw);
+
+       new_ctrl = NFP_NET_CFG_CTRL_ENABLE;
+       update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
+                       NFP_NET_CFG_UPDATE_MSIX;
+
+       rte_wmb();
+
+       /* If an error when reconfig we avoid to change hw state */
+       ret = nfp_net_reconfig(hw, new_ctrl, update);
+       if (ret != 0) {
+               PMD_INIT_LOG(ERR, "Failed to reconfig ctrl vnic");
+               return -EIO;
+       }
+
+       hw->ctrl = new_ctrl;
+
+       /* Setup the freelist ring */
+       ret = nfp_net_rx_freelist_setup(dev);
+       if (ret != 0) {
+               PMD_INIT_LOG(ERR, "Error with flower ctrl vNIC freelist setup");
+               return -EIO;
+       }
+
+       return 0;
+}
+
 int
 nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev)
 {
        int ret;
        unsigned int numa_node;
        struct nfp_net_hw *pf_hw;
+       struct nfp_net_hw *ctrl_hw;
        struct nfp_app_fw_flower *app_fw_flower;
 
        numa_node = rte_socket_id();
@@ -561,8 +747,42 @@ struct dp_packet {
                goto pf_vnic_cleanup;
        }
 
+       /* The ctrl vNIC struct comes directly after the PF one */
+       app_fw_flower->ctrl_hw = pf_hw + 1;
+       ctrl_hw = app_fw_flower->ctrl_hw;
+
+       /* Map the ctrl vNIC ctrl bar */
+       ctrl_hw->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, "_pf0_net_ctrl_bar",
+               32768, &ctrl_hw->ctrl_area);
+       if (ctrl_hw->ctrl_bar == NULL) {
+               PMD_INIT_LOG(ERR, "Cloud not map the ctrl vNIC ctrl bar");
+               ret = -ENODEV;
+               goto pf_vnic_cleanup;
+       }
+
+       /* Now populate the ctrl vNIC */
+       ctrl_hw->pf_dev = pf_dev;
+       ctrl_hw->cpp = pf_dev->cpp;
+
+       ret = nfp_flower_init_ctrl_vnic(app_fw_flower->ctrl_hw);
+       if (ret != 0) {
+               PMD_INIT_LOG(ERR, "Could not initialize flower ctrl vNIC");
+               goto ctrl_cpp_area_cleanup;
+       }
+
+       /* Start the ctrl vNIC */
+       ret = nfp_flower_start_ctrl_vnic(app_fw_flower->ctrl_hw);
+       if (ret != 0) {
+               PMD_INIT_LOG(ERR, "Could not start flower ctrl vNIC");
+               goto ctrl_vnic_cleanup;
+       }
+
        return 0;
 
+ctrl_vnic_cleanup:
+       nfp_flower_cleanup_ctrl_vnic(app_fw_flower->ctrl_hw);
+ctrl_cpp_area_cleanup:
+       nfp_cpp_area_free(ctrl_hw->ctrl_area);
 pf_vnic_cleanup:
        nfp_flower_cleanup_pf_vnic(app_fw_flower->pf_hw);
 pf_cpp_area_cleanup:
diff --git a/drivers/net/nfp/flower/nfp_flower.h 
b/drivers/net/nfp/flower/nfp_flower.h
index 981d88d..e18703e 100644
--- a/drivers/net/nfp/flower/nfp_flower.h
+++ b/drivers/net/nfp/flower/nfp_flower.h
@@ -14,6 +14,12 @@ struct nfp_app_fw_flower {
        /* Pointer to the PF vNIC */
        struct nfp_net_hw *pf_hw;
 
+       /* Pointer to a mempool for the ctrlvNIC */
+       struct rte_mempool *ctrl_pktmbuf_pool;
+
+       /* Pointer to the ctrl vNIC */
+       struct nfp_net_hw *ctrl_hw;
+
        /* the eth table as reported by firmware */
        struct nfp_eth_table *nfp_eth_table;
 };
diff --git a/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c 
b/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
index 08bc4e8..22c8bc4 100644
--- a/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
+++ b/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
@@ -91,7 +91,10 @@
  * @refcnt:    number of current users
  * @iomem:     mapped IO memory
  */
+#define NFP_BAR_MIN 1
+#define NFP_BAR_MID 5
 #define NFP_BAR_MAX 7
+
 struct nfp_bar {
        struct nfp_pcie_user *nfp;
        uint32_t barcfg;
@@ -292,6 +295,7 @@ struct nfp_pcie_user {
  * BAR0.0: Reserved for General Mapping (for MSI-X access to PCIe SRAM)
  *
  *         Halving PCItoCPPBars for primary and secondary processes.
+ *         For CoreNIC firmware:
  *         NFP PMD just requires two fixed slots, one for configuration BAR,
  *         and another for accessing the hw queues. Another slot is needed
  *         for setting the link up or down. Secondary processes do not need
@@ -301,6 +305,9 @@ struct nfp_pcie_user {
  *         supported. Due to this requirement and future extensions requiring
  *         new slots per process, only one secondary process is supported by
  *         now.
+ *         For Flower firmware:
+ *         NFP PMD need another fixed slots, used as the configureation BAR
+ *         for ctrl vNIC.
  */
 static int
 nfp_enable_bars(struct nfp_pcie_user *nfp)
@@ -309,11 +316,11 @@ struct nfp_pcie_user {
        int x, start, end;
 
        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-               start = 4;
-               end = 1;
+               start = NFP_BAR_MID;
+               end = NFP_BAR_MIN;
        } else {
-               start = 7;
-               end = 4;
+               start = NFP_BAR_MAX;
+               end = NFP_BAR_MID;
        }
        for (x = start; x > end; x--) {
                bar = &nfp->bar[x - 1];
@@ -341,11 +348,11 @@ struct nfp_pcie_user {
        int x, start, end;
 
        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-               start = 4;
-               end = 1;
+               start = NFP_BAR_MID;
+               end = NFP_BAR_MIN;
        } else {
-               start = 7;
-               end = 4;
+               start = NFP_BAR_MAX;
+               end = NFP_BAR_MID;
        }
        for (x = start; x > end; x--) {
                bar = &nfp->bar[x - 1];
@@ -364,11 +371,11 @@ struct nfp_pcie_user {
        int x, start, end;
 
        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-               start = 4;
-               end = 1;
+               start = NFP_BAR_MID;
+               end = NFP_BAR_MIN;
        } else {
-               start = 7;
-               end = 4;
+               start = NFP_BAR_MAX;
+               end = NFP_BAR_MID;
        }
 
        for (x = start; x > end; x--) {
-- 
1.8.3.1

Reply via email to