Fix the resource leak problem in the exit logic of flower firmware.

Fixes: e1124c4f8a45 ("net/nfp: add flower representor framework")
Cc: sta...@dpdk.org

Signed-off-by: Chaoyong He <chaoyong...@corigine.com>
Reviewed-by: Long Wu <long...@corigine.com>
Reviewed-by: Peng Zhang <peng.zh...@corigine.com>
---
 drivers/net/nfp/flower/nfp_flower.c           | 73 ++++---------------
 drivers/net/nfp/flower/nfp_flower.h           |  1 +
 .../net/nfp/flower/nfp_flower_representor.c   | 64 ++++++++++++++++
 3 files changed, 80 insertions(+), 58 deletions(-)

diff --git a/drivers/net/nfp/flower/nfp_flower.c 
b/drivers/net/nfp/flower/nfp_flower.c
index 6b523d98b0..3698a3d4aa 100644
--- a/drivers/net/nfp/flower/nfp_flower.c
+++ b/drivers/net/nfp/flower/nfp_flower.c
@@ -82,63 +82,6 @@ nfp_flower_pf_start(struct rte_eth_dev *dev)
        return 0;
 }
 
-/* Reset and stop device. The device can not be restarted. */
-static int
-nfp_flower_pf_close(struct rte_eth_dev *dev)
-{
-       uint16_t i;
-       struct nfp_net_hw *hw;
-       struct nfp_pf_dev *pf_dev;
-       struct nfp_net_txq *this_tx_q;
-       struct nfp_net_rxq *this_rx_q;
-       struct nfp_flower_representor *repr;
-       struct nfp_app_fw_flower *app_fw_flower;
-
-       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-               return 0;
-
-       repr = dev->data->dev_private;
-       hw = repr->app_fw_flower->pf_hw;
-       pf_dev = hw->pf_dev;
-       app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(pf_dev->app_fw_priv);
-
-       nfp_mtr_priv_uninit(pf_dev);
-
-       /*
-        * We assume that the DPDK application is stopping all the
-        * threads/queues before calling the device close function.
-        */
-       nfp_net_disable_queues(dev);
-
-       /* Clear queues */
-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               this_tx_q = dev->data->tx_queues[i];
-               nfp_net_reset_tx_queue(this_tx_q);
-       }
-
-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               this_rx_q = dev->data->rx_queues[i];
-               nfp_net_reset_rx_queue(this_rx_q);
-       }
-
-       /* Cancel possible impending LSC work here before releasing the port */
-       rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void 
*)dev);
-
-       nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff);
-
-       /* Now it is safe to free all PF resources */
-       PMD_DRV_LOG(INFO, "Freeing PF resources");
-       nfp_cpp_area_free(pf_dev->ctrl_area);
-       nfp_cpp_area_free(pf_dev->qc_area);
-       free(pf_dev->hwinfo);
-       free(pf_dev->sym_tbl);
-       nfp_cpp_free(pf_dev->cpp);
-       rte_free(app_fw_flower);
-       rte_free(pf_dev);
-
-       return 0;
-}
-
 static const struct eth_dev_ops nfp_flower_pf_vnic_ops = {
        .dev_infos_get          = nfp_net_infos_get,
        .link_update            = nfp_net_link_update,
@@ -146,7 +89,6 @@ static const struct eth_dev_ops nfp_flower_pf_vnic_ops = {
 
        .dev_start              = nfp_flower_pf_start,
        .dev_stop               = nfp_net_stop,
-       .dev_close              = nfp_flower_pf_close,
 };
 
 static inline struct nfp_flower_representor *
@@ -858,6 +800,21 @@ nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev,
        return ret;
 }
 
+void
+nfp_uninit_app_fw_flower(struct nfp_pf_dev *pf_dev)
+{
+       struct nfp_app_fw_flower *app_fw_flower;
+
+       app_fw_flower = pf_dev->app_fw_priv;
+       nfp_flower_cleanup_ctrl_vnic(app_fw_flower->ctrl_hw);
+       nfp_cpp_area_free(app_fw_flower->ctrl_hw->ctrl_area);
+       nfp_cpp_area_free(pf_dev->ctrl_area);
+       rte_free(app_fw_flower->pf_hw);
+       nfp_mtr_priv_uninit(pf_dev);
+       nfp_flow_priv_uninit(pf_dev);
+       rte_free(app_fw_flower);
+}
+
 int
 nfp_secondary_init_app_fw_flower(struct nfp_pf_dev *pf_dev)
 {
diff --git a/drivers/net/nfp/flower/nfp_flower.h 
b/drivers/net/nfp/flower/nfp_flower.h
index 6f27c06acc..8393de66c5 100644
--- a/drivers/net/nfp/flower/nfp_flower.h
+++ b/drivers/net/nfp/flower/nfp_flower.h
@@ -106,6 +106,7 @@ nfp_flower_support_decap_v2(const struct nfp_app_fw_flower 
*app_fw_flower)
 
 int nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev,
                const struct nfp_dev_info *dev_info);
+void nfp_uninit_app_fw_flower(struct nfp_pf_dev *pf_dev);
 int nfp_secondary_init_app_fw_flower(struct nfp_pf_dev *pf_dev);
 bool nfp_flower_pf_dispatch_pkts(struct nfp_net_hw *hw,
                struct rte_mbuf *mbuf,
diff --git a/drivers/net/nfp/flower/nfp_flower_representor.c 
b/drivers/net/nfp/flower/nfp_flower_representor.c
index 7212d9e024..02089d390e 100644
--- a/drivers/net/nfp/flower/nfp_flower_representor.c
+++ b/drivers/net/nfp/flower/nfp_flower_representor.c
@@ -328,12 +328,75 @@ nfp_flower_repr_free(struct nfp_flower_representor *repr,
        }
 }
 
+/* Reset and stop device. The device can not be restarted. */
+static int
+nfp_flower_repr_dev_close(struct rte_eth_dev *dev)
+{
+       uint16_t i;
+       struct nfp_net_hw *hw;
+       struct nfp_pf_dev *pf_dev;
+       struct nfp_net_txq *this_tx_q;
+       struct nfp_net_rxq *this_rx_q;
+       struct nfp_flower_representor *repr;
+       struct nfp_app_fw_flower *app_fw_flower;
+
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return 0;
+
+       repr = dev->data->dev_private;
+       app_fw_flower = repr->app_fw_flower;
+       hw = app_fw_flower->pf_hw;
+       pf_dev = hw->pf_dev;
+
+       /*
+        * We assume that the DPDK application is stopping all the
+        * threads/queues before calling the device close function.
+        */
+       nfp_net_disable_queues(dev);
+
+       /* Clear queues */
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               this_tx_q = dev->data->tx_queues[i];
+               nfp_net_reset_tx_queue(this_tx_q);
+       }
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               this_rx_q = dev->data->rx_queues[i];
+               nfp_net_reset_rx_queue(this_rx_q);
+       }
+
+       if (pf_dev->app_fw_id != NFP_APP_FW_FLOWER_NIC)
+               return -EINVAL;
+
+       nfp_flower_repr_free(repr, repr->repr_type);
+
+       for (i = 0; i < MAX_FLOWER_VFS; i++) {
+               if (app_fw_flower->vf_reprs[i] != NULL)
+                       return 0;
+       }
+
+       for (i = 0; i < NFP_MAX_PHYPORTS; i++) {
+               if (app_fw_flower->phy_reprs[i] != NULL)
+                       return 0;
+       }
+
+       if (app_fw_flower->pf_repr != NULL)
+               return 0;
+
+       /* Now it is safe to free all PF resources */
+       nfp_uninit_app_fw_flower(pf_dev);
+       nfp_pf_uninit(pf_dev);
+
+       return 0;
+}
+
 static const struct eth_dev_ops nfp_flower_pf_repr_dev_ops = {
        .dev_infos_get        = nfp_flower_repr_dev_infos_get,
 
        .dev_start            = nfp_flower_pf_start,
        .dev_configure        = nfp_net_configure,
        .dev_stop             = nfp_net_stop,
+       .dev_close            = nfp_flower_repr_dev_close,
 
        .rx_queue_setup       = nfp_net_rx_queue_setup,
        .tx_queue_setup       = nfp_net_tx_queue_setup,
@@ -356,6 +419,7 @@ static const struct eth_dev_ops nfp_flower_repr_dev_ops = {
        .dev_start            = nfp_flower_repr_dev_start,
        .dev_configure        = nfp_net_configure,
        .dev_stop             = nfp_flower_repr_dev_stop,
+       .dev_close            = nfp_flower_repr_dev_close,
 
        .rx_queue_setup       = nfp_flower_repr_rx_queue_setup,
        .tx_queue_setup       = nfp_flower_repr_tx_queue_setup,
-- 
2.39.1

Reply via email to