From: Pavan Nikhilesh <pbhagavat...@marvell.com>

Provide only burst enqueue and dequeue.

Signed-off-by: Pavan Nikhilesh <pbhagavat...@marvell.com>

--

RFC v3:
 * Rewrite. Pavan Nikhilesh is now the author.
---
 drivers/event/cnxk/cn10k_eventdev.c | 74 ++---------------------------
 drivers/event/cnxk/cn10k_worker.c   | 49 +++++++++----------
 drivers/event/cnxk/cn10k_worker.h   |  1 -
 drivers/event/cnxk/cn9k_eventdev.c  | 73 +---------------------------
 drivers/event/cnxk/cn9k_worker.c    | 26 ++++------
 drivers/event/cnxk/cn9k_worker.h    |  3 --
 6 files changed, 36 insertions(+), 190 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c 
b/drivers/event/cnxk/cn10k_eventdev.c
index 531c489172..a099356562 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -311,11 +311,6 @@ cn10k_sso_fp_tmplt_fns_set(struct rte_eventdev *event_dev)
 {
 #if !defined(CNXK_DIS_TMPLT_FUNC)
        struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
-       const event_dequeue_t sso_hws_deq[NIX_RX_OFFLOAD_MAX] = {
-#define R(name, flags)[flags] = cn10k_sso_hws_deq_##name,
-               NIX_RX_FASTPATH_MODES
-#undef R
-       };
 
        const event_dequeue_burst_t sso_hws_deq_burst[NIX_RX_OFFLOAD_MAX] = {
 #define R(name, flags)[flags] = cn10k_sso_hws_deq_burst_##name,
@@ -323,86 +318,42 @@ cn10k_sso_fp_tmplt_fns_set(struct rte_eventdev *event_dev)
 #undef R
        };
 
-       const event_dequeue_t sso_hws_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
-#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_##name,
-               NIX_RX_FASTPATH_MODES
-#undef R
-       };
-
        const event_dequeue_burst_t sso_hws_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = 
{
 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_burst_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_t sso_hws_deq_seg[NIX_RX_OFFLOAD_MAX] = {
-#define R(name, flags)[flags] = cn10k_sso_hws_deq_seg_##name,
-
-               NIX_RX_FASTPATH_MODES
-#undef R
-       };
-
        const event_dequeue_burst_t sso_hws_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = 
{
 #define R(name, flags)[flags] = cn10k_sso_hws_deq_seg_burst_##name,
                        NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_t sso_hws_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
-#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_seg_##name,
-               NIX_RX_FASTPATH_MODES
-#undef R
-       };
-
        const event_dequeue_burst_t 
sso_hws_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_seg_burst_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_t sso_hws_reas_deq[NIX_RX_OFFLOAD_MAX] = {
-#define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_##name,
-               NIX_RX_FASTPATH_MODES
-#undef R
-       };
-
        const event_dequeue_burst_t sso_hws_reas_deq_burst[NIX_RX_OFFLOAD_MAX] 
= {
 #define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_burst_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_t sso_hws_reas_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
-#define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_tmo_##name,
-               NIX_RX_FASTPATH_MODES
-#undef R
-       };
-
        const event_dequeue_burst_t 
sso_hws_reas_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = {
 #define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_tmo_burst_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_t sso_hws_reas_deq_seg[NIX_RX_OFFLOAD_MAX] = {
-#define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_seg_##name,
-
-               NIX_RX_FASTPATH_MODES
-#undef R
-       };
-
        const event_dequeue_burst_t 
sso_hws_reas_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = {
 #define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_seg_burst_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_t sso_hws_reas_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
-#define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_tmo_seg_##name,
-               NIX_RX_FASTPATH_MODES
-#undef R
-       };
-
        const event_dequeue_burst_t 
sso_hws_reas_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
 #define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_tmo_seg_burst_##name,
                NIX_RX_FASTPATH_MODES
@@ -424,48 +375,33 @@ cn10k_sso_fp_tmplt_fns_set(struct rte_eventdev *event_dev)
 
        if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
                if (dev->rx_offloads & NIX_RX_REAS_F) {
-                       CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, 
sso_hws_reas_deq_seg);
                        CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
                                               sso_hws_reas_deq_seg_burst);
-                       if (dev->is_timeout_deq) {
-                               CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
-                                                      
sso_hws_reas_deq_tmo_seg);
+                       if (dev->is_timeout_deq)
                                CN10K_SET_EVDEV_DEQ_OP(dev, 
event_dev->dequeue_burst,
                                                       
sso_hws_reas_deq_tmo_seg_burst);
-                       }
                } else {
-                       CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, 
sso_hws_deq_seg);
                        CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
                                               sso_hws_deq_seg_burst);
 
-                       if (dev->is_timeout_deq) {
-                               CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
-                                                      sso_hws_deq_tmo_seg);
+                       if (dev->is_timeout_deq)
                                CN10K_SET_EVDEV_DEQ_OP(dev, 
event_dev->dequeue_burst,
                                                       
sso_hws_deq_tmo_seg_burst);
-                       }
                }
        } else {
                if (dev->rx_offloads & NIX_RX_REAS_F) {
-                       CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, 
sso_hws_reas_deq);
                        CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
                                               sso_hws_reas_deq_burst);
 
-                       if (dev->is_timeout_deq) {
-                               CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
-                                                      sso_hws_reas_deq_tmo);
+                       if (dev->is_timeout_deq)
                                CN10K_SET_EVDEV_DEQ_OP(dev, 
event_dev->dequeue_burst,
                                                       
sso_hws_reas_deq_tmo_burst);
-                       }
                } else {
-                       CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, 
sso_hws_deq);
                        CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, 
sso_hws_deq_burst);
 
-                       if (dev->is_timeout_deq) {
-                               CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, 
sso_hws_deq_tmo);
+                       if (dev->is_timeout_deq)
                                CN10K_SET_EVDEV_DEQ_OP(dev, 
event_dev->dequeue_burst,
                                                       sso_hws_deq_tmo_burst);
-                       }
                }
        }
 
@@ -514,7 +450,6 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
        cn10k_sso_fp_blk_fns_set(event_dev);
        cn10k_sso_fp_tmplt_fns_set(event_dev);
 
-       event_dev->enqueue = cn10k_sso_hws_enq;
        event_dev->enqueue_burst = cn10k_sso_hws_enq_burst;
        event_dev->enqueue_new_burst = cn10k_sso_hws_enq_new_burst;
        event_dev->enqueue_forward_burst = cn10k_sso_hws_enq_fwd_burst;
@@ -848,7 +783,6 @@ eventdev_fops_update(struct rte_eventdev *event_dev)
        struct rte_event_fp_ops *fp_op =
                rte_event_fp_ops + event_dev->data->dev_id;
 
-       fp_op->dequeue = event_dev->dequeue;
        fp_op->dequeue_burst = event_dev->dequeue_burst;
 }
 
diff --git a/drivers/event/cnxk/cn10k_worker.c 
b/drivers/event/cnxk/cn10k_worker.c
index a0e85face1..c49138316c 100644
--- a/drivers/event/cnxk/cn10k_worker.c
+++ b/drivers/event/cnxk/cn10k_worker.c
@@ -107,32 +107,6 @@ sso_lmt_aw_wait_fc(struct cn10k_sso_hws *ws, int64_t req)
        }
 }
 
-uint16_t __rte_hot
-cn10k_sso_hws_enq(void *port, const struct rte_event *ev)
-{
-       struct cn10k_sso_hws *ws = port;
-
-       switch (ev->op) {
-       case RTE_EVENT_OP_NEW:
-               return cn10k_sso_hws_new_event(ws, ev);
-       case RTE_EVENT_OP_FORWARD:
-               cn10k_sso_hws_forward_event(ws, ev);
-               break;
-       case RTE_EVENT_OP_RELEASE:
-               if (ws->swtag_req) {
-                       cnxk_sso_hws_desched(ev->u64, ws->base);
-                       ws->swtag_req = 0;
-                       break;
-               }
-               cnxk_sso_hws_swtag_flush(ws->base);
-               break;
-       default:
-               return 0;
-       }
-
-       return 1;
-}
-
 #define VECTOR_SIZE_BITS            0xFFFFFFFFFFF80000ULL
 #define VECTOR_GET_LINE_OFFSET(line) (19 + (3 * line))
 
@@ -384,8 +358,29 @@ uint16_t __rte_hot
 cn10k_sso_hws_enq_burst(void *port, const struct rte_event ev[],
                        uint16_t nb_events)
 {
+       struct cn10k_sso_hws *ws = port;
+
        RTE_SET_USED(nb_events);
-       return cn10k_sso_hws_enq(port, ev);
+
+       switch (ev->op) {
+       case RTE_EVENT_OP_NEW:
+               return cn10k_sso_hws_new_event(ws, ev);
+       case RTE_EVENT_OP_FORWARD:
+               cn10k_sso_hws_forward_event(ws, ev);
+               break;
+       case RTE_EVENT_OP_RELEASE:
+               if (ws->swtag_req) {
+                       cnxk_sso_hws_desched(ev->u64, ws->base);
+                       ws->swtag_req = 0;
+                       break;
+               }
+               cnxk_sso_hws_swtag_flush(ws->base);
+               break;
+       default:
+               return 0;
+       }
+
+       return 1;
 }
 
 uint16_t __rte_hot
diff --git a/drivers/event/cnxk/cn10k_worker.h 
b/drivers/event/cnxk/cn10k_worker.h
index 4785cc6575..f0bfa12640 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -366,7 +366,6 @@ cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, 
struct rte_event *ev,
 }
 
 /* CN10K Fastpath functions. */
-uint16_t __rte_hot cn10k_sso_hws_enq(void *port, const struct rte_event *ev);
 uint16_t __rte_hot cn10k_sso_hws_enq_burst(void *port,
                                           const struct rte_event ev[],
                                           uint16_t nb_events);
diff --git a/drivers/event/cnxk/cn9k_eventdev.c 
b/drivers/event/cnxk/cn9k_eventdev.c
index 377e910837..431e1670d5 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -314,48 +314,24 @@ cn9k_sso_fp_tmplt_fns_set(struct rte_eventdev *event_dev)
 #if !defined(CNXK_DIS_TMPLT_FUNC)
        struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
        /* Single WS modes */
-       const event_dequeue_t sso_hws_deq[NIX_RX_OFFLOAD_MAX] = {
-#define R(name, flags)[flags] = cn9k_sso_hws_deq_##name,
-               NIX_RX_FASTPATH_MODES
-#undef R
-       };
-
        const event_dequeue_burst_t sso_hws_deq_burst[NIX_RX_OFFLOAD_MAX] = {
 #define R(name, flags)[flags] = cn9k_sso_hws_deq_burst_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_t sso_hws_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
-#define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_##name,
-               NIX_RX_FASTPATH_MODES
-#undef R
-       };
-
        const event_dequeue_burst_t sso_hws_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = 
{
 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_burst_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_t sso_hws_deq_seg[NIX_RX_OFFLOAD_MAX] = {
-#define R(name, flags)[flags] = cn9k_sso_hws_deq_seg_##name,
-               NIX_RX_FASTPATH_MODES
-#undef R
-       };
-
        const event_dequeue_burst_t sso_hws_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = 
{
 #define R(name, flags)[flags] = cn9k_sso_hws_deq_seg_burst_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_t sso_hws_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
-#define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_seg_##name,
-               NIX_RX_FASTPATH_MODES
-#undef R
-       };
-
        const event_dequeue_burst_t 
sso_hws_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_seg_burst_##name,
                NIX_RX_FASTPATH_MODES
@@ -363,48 +339,24 @@ cn9k_sso_fp_tmplt_fns_set(struct rte_eventdev *event_dev)
        };
 
        /* Dual WS modes */
-       const event_dequeue_t sso_hws_dual_deq[NIX_RX_OFFLOAD_MAX] = {
-#define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_##name,
-               NIX_RX_FASTPATH_MODES
-#undef R
-       };
-
        const event_dequeue_burst_t sso_hws_dual_deq_burst[NIX_RX_OFFLOAD_MAX] 
= {
 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_burst_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_t sso_hws_dual_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
-#define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_##name,
-               NIX_RX_FASTPATH_MODES
-#undef R
-       };
-
        const event_dequeue_burst_t 
sso_hws_dual_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = {
 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_burst_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_t sso_hws_dual_deq_seg[NIX_RX_OFFLOAD_MAX] = {
-#define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_seg_##name,
-               NIX_RX_FASTPATH_MODES
-#undef R
-       };
-
        const event_dequeue_burst_t 
sso_hws_dual_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = {
 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_seg_burst_##name,
                NIX_RX_FASTPATH_MODES
 #undef R
        };
 
-       const event_dequeue_t sso_hws_dual_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
-#define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_seg_##name,
-               NIX_RX_FASTPATH_MODES
-#undef R
-       };
-
        const event_dequeue_burst_t 
sso_hws_dual_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_seg_burst_##name,
                NIX_RX_FASTPATH_MODES
@@ -436,31 +388,22 @@ cn9k_sso_fp_tmplt_fns_set(struct rte_eventdev *event_dev)
 #undef T
        };
 
-       event_dev->enqueue = cn9k_sso_hws_enq;
        event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
        event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
        event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
        event_dev->profile_switch = cn9k_sso_hws_profile_switch;
        if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
-               CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq_seg);
                CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
                                      sso_hws_deq_seg_burst);
-               if (dev->is_timeout_deq) {
-                       CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
-                                             sso_hws_deq_tmo_seg);
+               if (dev->is_timeout_deq)
                        CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
                                              sso_hws_deq_tmo_seg_burst);
-               }
        } else {
-               CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
                CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
                                      sso_hws_deq_burst);
-               if (dev->is_timeout_deq) {
-                       CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
-                                             sso_hws_deq_tmo);
+               if (dev->is_timeout_deq)
                        CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
                                              sso_hws_deq_tmo_burst);
-               }
        }
        event_dev->ca_enqueue = cn9k_sso_hws_ca_enq;
        event_dev->dma_enqueue = cn9k_dma_adapter_enqueue;
@@ -473,7 +416,6 @@ cn9k_sso_fp_tmplt_fns_set(struct rte_eventdev *event_dev)
                                      sso_hws_tx_adptr_enq);
 
        if (dev->dual_ws) {
-               event_dev->enqueue = cn9k_sso_hws_dual_enq;
                event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
                event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
                event_dev->enqueue_forward_burst =
@@ -483,25 +425,17 @@ cn9k_sso_fp_tmplt_fns_set(struct rte_eventdev *event_dev)
                event_dev->profile_switch = cn9k_sso_hws_dual_profile_switch;
 
                if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
-                       CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
-                                             sso_hws_dual_deq_seg);
                        CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
                                              sso_hws_dual_deq_seg_burst);
                        if (dev->is_timeout_deq) {
-                               CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
-                                                     sso_hws_dual_deq_tmo_seg);
                                CN9K_SET_EVDEV_DEQ_OP(
                                        dev, event_dev->dequeue_burst,
                                        sso_hws_dual_deq_tmo_seg_burst);
                        }
                } else {
-                       CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
-                                             sso_hws_dual_deq);
                        CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
                                              sso_hws_dual_deq_burst);
                        if (dev->is_timeout_deq) {
-                               CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
-                                                     sso_hws_dual_deq_tmo);
                                CN9K_SET_EVDEV_DEQ_OP(
                                        dev, event_dev->dequeue_burst,
                                        sso_hws_dual_deq_tmo_burst);
@@ -573,7 +507,6 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
        cn9k_sso_fp_blk_fns_set(event_dev);
        cn9k_sso_fp_tmplt_fns_set(event_dev);
 
-       event_dev->enqueue = cn9k_sso_hws_enq;
        event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
        event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
        event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
@@ -581,7 +514,6 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
        event_dev->profile_switch = cn9k_sso_hws_profile_switch;
 
        if (dev->dual_ws) {
-               event_dev->enqueue = cn9k_sso_hws_dual_enq;
                event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
                event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
                event_dev->enqueue_forward_burst = 
cn9k_sso_hws_dual_enq_fwd_burst;
@@ -917,7 +849,6 @@ eventdev_fops_tstamp_update(struct rte_eventdev *event_dev)
        struct rte_event_fp_ops *fp_op =
                rte_event_fp_ops + event_dev->data->dev_id;
 
-       fp_op->dequeue = event_dev->dequeue;
        fp_op->dequeue_burst = event_dev->dequeue_burst;
 }
 
diff --git a/drivers/event/cnxk/cn9k_worker.c b/drivers/event/cnxk/cn9k_worker.c
index a9ac49a5a7..86aa3f1c30 100644
--- a/drivers/event/cnxk/cn9k_worker.c
+++ b/drivers/event/cnxk/cn9k_worker.c
@@ -8,10 +8,13 @@
 #include "cn9k_cryptodev_ops.h"
 
 uint16_t __rte_hot
-cn9k_sso_hws_enq(void *port, const struct rte_event *ev)
+cn9k_sso_hws_enq_burst(void *port, const struct rte_event ev[],
+                      uint16_t nb_events)
 {
        struct cn9k_sso_hws *ws = port;
 
+       RTE_SET_USED(nb_events);
+
        switch (ev->op) {
        case RTE_EVENT_OP_NEW:
                return cn9k_sso_hws_new_event(ws, ev);
@@ -33,14 +36,6 @@ cn9k_sso_hws_enq(void *port, const struct rte_event *ev)
        return 1;
 }
 
-uint16_t __rte_hot
-cn9k_sso_hws_enq_burst(void *port, const struct rte_event ev[],
-                      uint16_t nb_events)
-{
-       RTE_SET_USED(nb_events);
-       return cn9k_sso_hws_enq(port, ev);
-}
-
 uint16_t __rte_hot
 cn9k_sso_hws_enq_new_burst(void *port, const struct rte_event ev[],
                           uint16_t nb_events)
@@ -80,11 +75,14 @@ cn9k_sso_hws_profile_switch(void *port, uint8_t profile)
 /* Dual ws ops. */
 
 uint16_t __rte_hot
-cn9k_sso_hws_dual_enq(void *port, const struct rte_event *ev)
+cn9k_sso_hws_dual_enq_burst(void *port, const struct rte_event ev[],
+                           uint16_t nb_events)
 {
        struct cn9k_sso_hws_dual *dws = port;
        uint64_t base;
 
+       RTE_SET_USED(nb_events);
+
        base = dws->base[!dws->vws];
        switch (ev->op) {
        case RTE_EVENT_OP_NEW:
@@ -107,14 +105,6 @@ cn9k_sso_hws_dual_enq(void *port, const struct rte_event 
*ev)
        return 1;
 }
 
-uint16_t __rte_hot
-cn9k_sso_hws_dual_enq_burst(void *port, const struct rte_event ev[],
-                           uint16_t nb_events)
-{
-       RTE_SET_USED(nb_events);
-       return cn9k_sso_hws_dual_enq(port, ev);
-}
-
 uint16_t __rte_hot
 cn9k_sso_hws_dual_enq_new_burst(void *port, const struct rte_event ev[],
                                uint16_t nb_events)
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index c92fa72f11..635c2f6e9a 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -359,7 +359,6 @@ cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event 
*ev,
 }
 
 /* CN9K Fastpath functions. */
-uint16_t __rte_hot cn9k_sso_hws_enq(void *port, const struct rte_event *ev);
 uint16_t __rte_hot cn9k_sso_hws_enq_burst(void *port,
                                          const struct rte_event ev[],
                                          uint16_t nb_events);
@@ -371,8 +370,6 @@ uint16_t __rte_hot cn9k_sso_hws_enq_fwd_burst(void *port,
                                              uint16_t nb_events);
 int __rte_hot cn9k_sso_hws_profile_switch(void *port, uint8_t profile);
 
-uint16_t __rte_hot cn9k_sso_hws_dual_enq(void *port,
-                                        const struct rte_event *ev);
 uint16_t __rte_hot cn9k_sso_hws_dual_enq_burst(void *port,
                                               const struct rte_event ev[],
                                               uint16_t nb_events);
-- 
2.43.0

Reply via email to