Event ports are configured to implicitly release the scheduler contexts
currently held in the next call to rte_event_dequeue_burst().
A worker core might still hold a scheduling context during exit as the
next call to rte_event_dequeue_burst() is never made.
This might lead to deadlock based on the worker exit timing and when
there are very less number of flows.

Add a cleanup function to release any scheduling contexts held by the
worker by using RTE_EVENT_OP_RELEASE.

Signed-off-by: Pavan Nikhilesh <pbhagavat...@marvell.com>
---
 examples/ipsec-secgw/ipsec_worker.c | 40 ++++++++++++++++++++---------
 1 file changed, 28 insertions(+), 12 deletions(-)

diff --git a/examples/ipsec-secgw/ipsec_worker.c 
b/examples/ipsec-secgw/ipsec_worker.c
index 8639426c5c..3df5acf384 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -749,7 +749,7 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct 
eh_event_link_info *links,
                uint8_t nb_links)
 {
        struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
-       unsigned int nb_rx = 0;
+       unsigned int nb_rx = 0, nb_tx;
        struct rte_mbuf *pkt;
        struct rte_event ev;
        uint32_t lcore_id;
@@ -847,11 +847,19 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct 
eh_event_link_info *links,
                 * directly enqueued to the adapter and it would be
                 * internally submitted to the eth device.
                 */
-               rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
-                               links[0].event_port_id,
-                               &ev,    /* events */
-                               1,      /* nb_events */
-                               0       /* flags */);
+               nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
+                                                        links[0].event_port_id,
+                                                        &ev, /* events */
+                                                        1,   /* nb_events */
+                                                        0 /* flags */);
+               if (!nb_tx)
+                       rte_pktmbuf_free(ev.mbuf);
+       }
+
+       if (ev.u64) {
+               ev.op = RTE_EVENT_OP_RELEASE;
+               rte_event_enqueue_burst(links[0].eventdev_id,
+                                       links[0].event_port_id, &ev, 1);
        }
 }
 
@@ -864,7 +872,7 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct 
eh_event_link_info *links,
                uint8_t nb_links)
 {
        struct lcore_conf_ev_tx_int_port_wrkr lconf;
-       unsigned int nb_rx = 0;
+       unsigned int nb_rx = 0, nb_tx;
        struct rte_event ev;
        uint32_t lcore_id;
        int32_t socket_id;
@@ -952,11 +960,19 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct 
eh_event_link_info *links,
                 * directly enqueued to the adapter and it would be
                 * internally submitted to the eth device.
                 */
-               rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
-                               links[0].event_port_id,
-                               &ev,    /* events */
-                               1,      /* nb_events */
-                               0       /* flags */);
+               nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
+                                                        links[0].event_port_id,
+                                                        &ev, /* events */
+                                                        1,   /* nb_events */
+                                                        0 /* flags */);
+               if (!nb_tx)
+                       rte_pktmbuf_free(ev.mbuf);
+       }
+
+       if (ev.u64) {
+               ev.op = RTE_EVENT_OP_RELEASE;
+               rte_event_enqueue_burst(links[0].eventdev_id,
+                                       links[0].event_port_id, &ev, 1);
        }
 }
 
-- 
2.25.1

Reply via email to