The rte_eth_dev_close() function now handles freeing resources for
devices (e.g., mac_addrs).  To conform with the new close() behaviour we
are asserting the RTE_ETH_DEV_CLOSE_REMOVE flag so that
rte_eth_dev_close() releases all device level dynamic memory.

Second level memory allocated to each individual rx/tx queue is now
freed as part of the close() operation therefore making it safe for the
rte_eth_dev_close() function to free the device private data without
orphaning the rx/tx queue pointers.

Cc: Matt Peters <matt.pet...@windriver.com>
Signed-off-by: Allain Legacy <allain.leg...@windriver.com>
---
 drivers/net/avp/avp_ethdev.c | 44 ++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 42 insertions(+), 2 deletions(-)

diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c
index 09388d05f..0f7481c86 100644
--- a/drivers/net/avp/avp_ethdev.c
+++ b/drivers/net/avp/avp_ethdev.c
@@ -959,6 +959,8 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev)
        eth_dev->dev_ops = &avp_eth_dev_ops;
        eth_dev->rx_pkt_burst = &avp_recv_pkts;
        eth_dev->tx_pkt_burst = &avp_xmit_pkts;
+       /* Let rte_eth_dev_close() release the port resources */
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
 
        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
                /*
@@ -1940,8 +1942,25 @@ avp_dev_rx_queue_release(void *rx_queue)
        unsigned int i;
 
        for (i = 0; i < avp->num_rx_queues; i++) {
-               if (data->rx_queues[i] == rxq)
+               if (data->rx_queues[i] == rxq) {
+                       rte_free(data->rx_queues[i]);
                        data->rx_queues[i] = NULL;
+               }
+       }
+}
+
+static void
+avp_dev_rx_queue_release_all(struct rte_eth_dev *eth_dev)
+{
+       struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+       struct rte_eth_dev_data *data = avp->dev_data;
+       unsigned int i;
+
+       for (i = 0; i < avp->num_rx_queues; i++) {
+               if (data->rx_queues[i]) {
+                       rte_free(data->rx_queues[i]);
+                       data->rx_queues[i] = NULL;
+               }
        }
 }
 
@@ -1954,8 +1973,25 @@ avp_dev_tx_queue_release(void *tx_queue)
        unsigned int i;
 
        for (i = 0; i < avp->num_tx_queues; i++) {
-               if (data->tx_queues[i] == txq)
+               if (data->tx_queues[i] == txq) {
+                       rte_free(data->tx_queues[i]);
                        data->tx_queues[i] = NULL;
+               }
+       }
+}
+
+static void
+avp_dev_tx_queue_release_all(struct rte_eth_dev *eth_dev)
+{
+       struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+       struct rte_eth_dev_data *data = avp->dev_data;
+       unsigned int i;
+
+       for (i = 0; i < avp->num_tx_queues; i++) {
+               if (data->tx_queues[i]) {
+                       rte_free(data->tx_queues[i]);
+                       data->tx_queues[i] = NULL;
+               }
        }
 }
 
@@ -2104,6 +2140,10 @@ avp_dev_close(struct rte_eth_dev *eth_dev)
                /* continue */
        }
 
+       /* release dynamic storage for rx/tx queues */
+       avp_dev_rx_queue_release_all(eth_dev);
+       avp_dev_tx_queue_release_all(eth_dev);
+
 unlock:
        rte_spinlock_unlock(&avp->lock);
 }
-- 
2.12.1

Reply via email to