From: Pavan Nikhilesh <pbhagavat...@marvell.com>

Add Event vector support for CN20K Rx/Tx adapter.

Signed-off-by: Pavan Nikhilesh <pbhagavat...@marvell.com>
---
 drivers/event/cnxk/cn20k_eventdev.c      | 185 ++++++++++++++++++++++-
 drivers/event/cnxk/cn20k_tx_worker.h     |  84 ++++++++++
 drivers/event/cnxk/cn20k_worker.h        |  63 ++++++++
 drivers/event/cnxk/cnxk_eventdev.h       |   3 +
 drivers/event/cnxk/cnxk_eventdev_adptr.c |  16 +-
 5 files changed, 340 insertions(+), 11 deletions(-)

diff --git a/drivers/event/cnxk/cn20k_eventdev.c 
b/drivers/event/cnxk/cn20k_eventdev.c
index 3c95247499..e525ab2a9c 100644
--- a/drivers/event/cnxk/cn20k_eventdev.c
+++ b/drivers/event/cnxk/cn20k_eventdev.c
@@ -74,6 +74,7 @@ cn20k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
        ws->xaq_lmt = dev->xaq_lmt;
        ws->fc_cache_space = (int64_t __rte_atomic *)dev->fc_cache_space;
        ws->aw_lmt = dev->sso.lmt_base;
+       ws->lmt_base = dev->sso.lmt_base;
 
        /* Set get_work timeout for HWS */
        val = NSEC2USEC(dev->deq_tmo_ns);
@@ -594,7 +595,8 @@ cn20k_sso_rx_adapter_caps_get(const struct rte_eventdev 
*event_dev,
        else
                *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
                        RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
-                       RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
+                       RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID |
+                       RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR;
 
        return 0;
 }
@@ -640,6 +642,156 @@ cn20k_sso_tstamp_hdl_update(uint16_t port_id, uint16_t 
flags, bool ptp_en)
        eventdev_fops_tstamp_update(event_dev);
 }
 
+static int
+cn20k_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id, 
uint16_t port_id,
+                    const struct rte_event_eth_rx_adapter_queue_conf 
*queue_conf, int agq)
+{
+       struct roc_nix_rq *rq;
+       uint32_t tag_mask;
+       uint16_t wqe_skip;
+       uint8_t tt;
+       int rc;
+
+       rq = &cnxk_eth_dev->rqs[rq_id];
+       if (queue_conf->rx_queue_flags & 
RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
+               tag_mask = agq;
+               tt = SSO_TT_AGG;
+               rq->flow_tag_width = 0;
+       } else {
+               tag_mask = (port_id & 0xFF) << 20;
+               tag_mask |= (RTE_EVENT_TYPE_ETHDEV << 28);
+               tt = queue_conf->ev.sched_type;
+               rq->flow_tag_width = 20;
+               if (queue_conf->rx_queue_flags & 
RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) {
+                       rq->flow_tag_width = 0;
+                       tag_mask |= queue_conf->ev.flow_id;
+               }
+       }
+
+       rq->tag_mask = tag_mask;
+       rq->sso_ena = 1;
+       rq->tt = tt;
+       rq->hwgrp = queue_conf->ev.queue_id;
+       wqe_skip = RTE_ALIGN_CEIL(sizeof(struct rte_mbuf), ROC_CACHE_LINE_SZ);
+       wqe_skip = wqe_skip / ROC_CACHE_LINE_SZ;
+       rq->wqe_skip = wqe_skip;
+
+       rc = roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
+       return rc;
+}
+
+static int
+cn20k_sso_rx_adapter_vwqe_enable(struct cnxk_sso_evdev *dev, uint16_t port_id, 
uint16_t rq_id,
+                                const struct 
rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+       uint32_t agq, tag_mask, stag_mask;
+       struct roc_sso_agq_data data;
+       int rc;
+
+       tag_mask = (port_id & 0xff) << 20;
+       if (queue_conf->rx_queue_flags & 
RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)
+               tag_mask |= queue_conf->ev.flow_id;
+       else
+               tag_mask |= rq_id;
+
+       stag_mask = tag_mask;
+       tag_mask |= RTE_EVENT_TYPE_ETHDEV_VECTOR << 28;
+       stag_mask |= RTE_EVENT_TYPE_ETHDEV << 28;
+
+       memset(&data, 0, sizeof(struct roc_sso_agq_data));
+       data.tag = tag_mask;
+       data.tt = queue_conf->ev.sched_type;
+       data.stag = stag_mask;
+       data.vwqe_aura = 
roc_npa_aura_handle_to_aura(queue_conf->vector_mp->pool_id);
+       data.vwqe_max_sz_exp = rte_log2_u32(queue_conf->vector_sz);
+       data.vwqe_wait_tmo = queue_conf->vector_timeout_ns / ((SSO_AGGR_DEF_TMO 
+ 1) * 100);
+       data.xqe_type = 0;
+
+       rc = roc_sso_hwgrp_agq_alloc(&dev->sso, queue_conf->ev.queue_id, &data);
+       if (rc < 0)
+               return rc;
+
+       agq = roc_sso_hwgrp_agq_from_tag(&dev->sso, queue_conf->ev.queue_id, 
tag_mask, 0);
+       return agq;
+}
+
+static int
+cn20k_rx_adapter_queue_add(const struct rte_eventdev *event_dev, const struct 
rte_eth_dev *eth_dev,
+                          int32_t rx_queue_id,
+                          const struct rte_event_eth_rx_adapter_queue_conf 
*queue_conf)
+{
+       struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+       uint16_t port = eth_dev->data->port_id;
+       struct cnxk_eth_rxq_sp *rxq_sp;
+       int i, rc = 0, agq = 0;
+
+       if (rx_queue_id < 0) {
+               for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+                       rc |= cn20k_rx_adapter_queue_add(event_dev, eth_dev, i, 
queue_conf);
+       } else {
+               rxq_sp = 
cnxk_eth_rxq_to_sp(eth_dev->data->rx_queues[rx_queue_id]);
+               cnxk_sso_updt_xae_cnt(dev, rxq_sp, RTE_EVENT_TYPE_ETHDEV);
+               rc = cnxk_sso_xae_reconfigure((struct rte_eventdev 
*)(uintptr_t)event_dev);
+               if (queue_conf->rx_queue_flags & 
RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
+                       cnxk_sso_updt_xae_cnt(dev, queue_conf->vector_mp,
+                                             RTE_EVENT_TYPE_ETHDEV_VECTOR);
+                       rc = cnxk_sso_xae_reconfigure((struct rte_eventdev 
*)(uintptr_t)event_dev);
+                       if (rc < 0)
+                               return rc;
+
+                       rc = cn20k_sso_rx_adapter_vwqe_enable(dev, port, 
rx_queue_id, queue_conf);
+                       if (rc < 0)
+                               return rc;
+                       agq = rc;
+               }
+
+               rc = cn20k_sso_rxq_enable(cnxk_eth_dev, (uint16_t)rx_queue_id, 
port, queue_conf,
+                                         agq);
+
+               /* Propagate force bp devarg */
+               cnxk_eth_dev->nix.force_rx_aura_bp = dev->force_ena_bp;
+               cnxk_sso_tstamp_cfg(port, eth_dev, dev);
+               cnxk_eth_dev->nb_rxq_sso++;
+       }
+
+       if (rc < 0) {
+               plt_err("Failed to configure Rx adapter port=%d, q=%d", port,
+                       queue_conf->ev.queue_id);
+               return rc;
+       }
+
+       dev->rx_offloads |= cnxk_eth_dev->rx_offload_flags;
+       return 0;
+}
+
+static int
+cn20k_rx_adapter_queue_del(const struct rte_eventdev *event_dev, const struct 
rte_eth_dev *eth_dev,
+                          int32_t rx_queue_id)
+{
+       struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+       struct roc_nix_rq *rxq;
+       int i, rc = 0;
+
+       RTE_SET_USED(event_dev);
+       if (rx_queue_id < 0) {
+               for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+                       cn20k_rx_adapter_queue_del(event_dev, eth_dev, i);
+       } else {
+               rxq = &cnxk_eth_dev->rqs[rx_queue_id];
+               if (rxq->tt == SSO_TT_AGG)
+                       roc_sso_hwgrp_agq_free(&dev->sso, rxq->hwgrp, 
rxq->tag_mask);
+               rc = cnxk_sso_rxq_disable(eth_dev, (uint16_t)rx_queue_id);
+               cnxk_eth_dev->nb_rxq_sso--;
+       }
+
+       if (rc < 0)
+               plt_err("Failed to clear Rx adapter config port=%d, q=%d", 
eth_dev->data->port_id,
+                       rx_queue_id);
+       return rc;
+}
+
 static int
 cn20k_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
                               const struct rte_eth_dev *eth_dev, int32_t 
rx_queue_id,
@@ -656,7 +808,7 @@ cn20k_sso_rx_adapter_queue_add(const struct rte_eventdev 
*event_dev,
        if (rc)
                return -EINVAL;
 
-       rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id, 
queue_conf);
+       rc = cn20k_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id, 
queue_conf);
        if (rc)
                return -EINVAL;
 
@@ -689,7 +841,29 @@ cn20k_sso_rx_adapter_queue_del(const struct rte_eventdev 
*event_dev,
        if (rc)
                return -EINVAL;
 
-       return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
+       return cn20k_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
+}
+
+static int
+cn20k_sso_rx_adapter_vector_limits(const struct rte_eventdev *dev,
+                                  const struct rte_eth_dev *eth_dev,
+                                  struct 
rte_event_eth_rx_adapter_vector_limits *limits)
+{
+       int ret;
+
+       RTE_SET_USED(dev);
+       RTE_SET_USED(eth_dev);
+       ret = strncmp(eth_dev->device->driver->name, "net_cn20k", 8);
+       if (ret)
+               return -ENOTSUP;
+
+       limits->log2_sz = true;
+       limits->min_sz = 1 << ROC_NIX_VWQE_MIN_SIZE_LOG2;
+       limits->max_sz = 1 << ROC_NIX_VWQE_MAX_SIZE_LOG2;
+       limits->min_timeout_ns = (SSO_AGGR_DEF_TMO + 1) * 100;
+       limits->max_timeout_ns = (BITMASK_ULL(11, 0) + 1) * 
limits->min_timeout_ns;
+
+       return 0;
 }
 
 static int
@@ -703,7 +877,8 @@ cn20k_sso_tx_adapter_caps_get(const struct rte_eventdev 
*dev, const struct rte_e
        if (ret)
                *caps = 0;
        else
-               *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
+               *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT |
+                       RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
 
        return 0;
 }
@@ -806,6 +981,8 @@ static struct eventdev_ops cn20k_sso_dev_ops = {
        .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
        .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
 
+       .eth_rx_adapter_vector_limits_get = cn20k_sso_rx_adapter_vector_limits,
+
        .eth_tx_adapter_caps_get = cn20k_sso_tx_adapter_caps_get,
        .eth_tx_adapter_queue_add = cn20k_sso_tx_adapter_queue_add,
        .eth_tx_adapter_queue_del = cn20k_sso_tx_adapter_queue_del,
diff --git a/drivers/event/cnxk/cn20k_tx_worker.h 
b/drivers/event/cnxk/cn20k_tx_worker.h
index c8ab560b0e..b09d845b09 100644
--- a/drivers/event/cnxk/cn20k_tx_worker.h
+++ b/drivers/event/cnxk/cn20k_tx_worker.h
@@ -139,10 +139,58 @@ cn20k_sso_tx_one(struct cn20k_sso_hws *ws, struct 
rte_mbuf *m, uint64_t *cmd, ui
        return 1;
 }
 
+static __rte_always_inline uint16_t
+cn20k_sso_vwqe_split_tx(struct cn20k_sso_hws *ws, struct rte_mbuf **mbufs, 
uint16_t nb_mbufs,
+                       uint64_t *cmd, const uint64_t *txq_data, const uint32_t 
flags)
+{
+       uint16_t count = 0, port, queue, ret = 0, last_idx = 0;
+       struct cn20k_eth_txq *txq;
+       int32_t space;
+       int i;
+
+       port = mbufs[0]->port;
+       queue = rte_event_eth_tx_adapter_txq_get(mbufs[0]);
+       for (i = 0; i < nb_mbufs; i++) {
+               if (port != mbufs[i]->port || queue != 
rte_event_eth_tx_adapter_txq_get(mbufs[i])) {
+                       if (count) {
+                               txq = (struct cn20k_eth_txq
+                                              *)(txq_data[(txq_data[port] >> 
48) + queue] &
+                                                 (BIT_ULL(48) - 1));
+                               /* Transmit based on queue depth */
+                               space = cn20k_sso_sq_depth(txq);
+                               if (space < count)
+                                       goto done;
+                               cn20k_nix_xmit_pkts_vector(txq, (uint64_t *)ws, 
&mbufs[last_idx],
+                                                          count, cmd, flags | 
NIX_TX_VWQE_F);
+                               ret += count;
+                               count = 0;
+                       }
+                       port = mbufs[i]->port;
+                       queue = rte_event_eth_tx_adapter_txq_get(mbufs[i]);
+                       last_idx = i;
+               }
+               count++;
+       }
+       if (count) {
+               txq = (struct cn20k_eth_txq *)(txq_data[(txq_data[port] >> 48) 
+ queue] &
+                                              (BIT_ULL(48) - 1));
+               /* Transmit based on queue depth */
+               space = cn20k_sso_sq_depth(txq);
+               if (space < count)
+                       goto done;
+               cn20k_nix_xmit_pkts_vector(txq, (uint64_t *)ws, 
&mbufs[last_idx], count, cmd,
+                                          flags | NIX_TX_VWQE_F);
+               ret += count;
+       }
+done:
+       return ret;
+}
+
 static __rte_always_inline uint16_t
 cn20k_sso_hws_event_tx(struct cn20k_sso_hws *ws, struct rte_event *ev, 
uint64_t *cmd,
                       const uint64_t *txq_data, const uint32_t flags)
 {
+       struct cn20k_eth_txq *txq;
        struct rte_mbuf *m;
        uintptr_t lmt_addr;
        uint16_t lmt_id;
@@ -150,6 +198,42 @@ cn20k_sso_hws_event_tx(struct cn20k_sso_hws *ws, struct 
rte_event *ev, uint64_t
        lmt_addr = ws->lmt_base;
        ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id);
 
+       if (ev->event_type & RTE_EVENT_TYPE_VECTOR) {
+               struct rte_mbuf **mbufs = ev->vec->mbufs;
+               uint64_t meta = *(uint64_t *)ev->vec;
+               uint16_t offset, nb_pkts, left;
+               int32_t space;
+
+               nb_pkts = meta & 0xFFFF;
+               offset = (meta >> 16) & 0xFFF;
+               if (meta & BIT(31)) {
+                       txq = (struct cn20k_eth_txq
+                                      *)(txq_data[(txq_data[meta >> 32] >> 48) 
+ (meta >> 48)] &
+                                         (BIT_ULL(48) - 1));
+
+                       /* Transmit based on queue depth */
+                       space = cn20k_sso_sq_depth(txq);
+                       if (space <= 0)
+                               return 0;
+                       nb_pkts = nb_pkts < space ? nb_pkts : (uint16_t)space;
+                       cn20k_nix_xmit_pkts_vector(txq, (uint64_t *)ws, mbufs + 
offset, nb_pkts,
+                                                  cmd, flags | NIX_TX_VWQE_F);
+               } else {
+                       nb_pkts = cn20k_sso_vwqe_split_tx(ws, mbufs + offset, 
nb_pkts, cmd,
+                                                         txq_data, flags);
+               }
+               left = (meta & 0xFFFF) - nb_pkts;
+
+               if (!left) {
+                       rte_mempool_put(rte_mempool_from_obj(ev->vec), ev->vec);
+               } else {
+                       *(uint64_t *)ev->vec =
+                               (meta & ~0xFFFFFFFUL) | (((uint32_t)nb_pkts + 
offset) << 16) | left;
+               }
+               rte_prefetch0(ws);
+               return !left;
+       }
+
        m = ev->mbuf;
        return cn20k_sso_tx_one(ws, m, cmd, lmt_id, lmt_addr, ev->sched_type, 
txq_data, flags);
 }
diff --git a/drivers/event/cnxk/cn20k_worker.h 
b/drivers/event/cnxk/cn20k_worker.h
index 9075073fd2..5799e5cc49 100644
--- a/drivers/event/cnxk/cn20k_worker.h
+++ b/drivers/event/cnxk/cn20k_worker.h
@@ -41,6 +41,58 @@ cn20k_sso_process_tstamp(uint64_t u64, uint64_t mbuf, struct 
cnxk_timesync_info
        }
 }
 
+static __rte_always_inline void
+cn20k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, 
struct cn20k_sso_hws *ws)
+{
+       uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM;
+       struct cnxk_timesync_info *tstamp = ws->tstamp[port_id];
+       void *lookup_mem = ws->lookup_mem;
+       uintptr_t lbase = ws->lmt_base;
+       struct rte_event_vector *vec;
+       uint16_t nb_mbufs, non_vec;
+       struct rte_mbuf **wqe;
+       struct rte_mbuf *mbuf;
+       uint64_t sa_base = 0;
+       uintptr_t cpth = 0;
+       int i;
+
+       mbuf_init |= ((uint64_t)port_id) << 48;
+       vec = (struct rte_event_vector *)vwqe;
+       wqe = vec->mbufs;
+
+       rte_prefetch0(&vec->ptrs[0]);
+#define OBJS_PER_CLINE (RTE_CACHE_LINE_SIZE / sizeof(void *))
+       for (i = OBJS_PER_CLINE; i < vec->nb_elem; i += OBJS_PER_CLINE)
+               rte_prefetch0(&vec->ptrs[i]);
+
+       if (flags & NIX_RX_OFFLOAD_TSTAMP_F && tstamp)
+               mbuf_init |= 8;
+
+       nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);
+       nb_mbufs = cn20k_nix_recv_pkts_vector(&mbuf_init, wqe, nb_mbufs, flags 
| NIX_RX_VWQE_F,
+                                             lookup_mem, tstamp, lbase, 0);
+       wqe += nb_mbufs;
+       non_vec = vec->nb_elem - nb_mbufs;
+
+       while (non_vec) {
+               struct nix_cqe_hdr_s *cqe = (struct nix_cqe_hdr_s *)wqe[0];
+
+               mbuf = (struct rte_mbuf *)((char *)cqe - sizeof(struct 
rte_mbuf));
+
+               /* Mark mempool obj as "get" as it is alloc'ed by NIX */
+               RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
+
+               cn20k_nix_cqe_to_mbuf(cqe, cqe->tag, mbuf, lookup_mem, 
mbuf_init, cpth, sa_base,
+                                     flags);
+
+               if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
+                       cn20k_sso_process_tstamp((uint64_t)wqe[0], 
(uint64_t)mbuf, tstamp);
+               wqe[0] = (struct rte_mbuf *)mbuf;
+               non_vec--;
+               wqe++;
+       }
+}
+
 static __rte_always_inline void
 cn20k_sso_hws_post_process(struct cn20k_sso_hws *ws, uint64_t *u64, const 
uint32_t flags)
 {
@@ -65,6 +117,17 @@ cn20k_sso_hws_post_process(struct cn20k_sso_hws *ws, 
uint64_t *u64, const uint32
                if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
                        cn20k_sso_process_tstamp(u64[1], mbuf, 
ws->tstamp[port]);
                u64[1] = mbuf;
+       } else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == 
RTE_EVENT_TYPE_ETHDEV_VECTOR) {
+               uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]);
+               __uint128_t vwqe_hdr = *(__uint128_t *)u64[1];
+
+               vwqe_hdr = ((vwqe_hdr >> 64) & 0xFFF) | BIT_ULL(31) | 
((vwqe_hdr & 0xFFFF) << 48) |
+                          ((uint64_t)port << 32);
+               *(uint64_t *)u64[1] = (uint64_t)vwqe_hdr;
+               cn20k_process_vwqe(u64[1], port, flags, ws);
+               /* Mark vector mempool object as get */
+               RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]), 
(void **)&u64[1], 1,
+                                         1);
        }
 }
 
diff --git a/drivers/event/cnxk/cnxk_eventdev.h 
b/drivers/event/cnxk/cnxk_eventdev.h
index 4066497e6b..33b3538753 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -266,6 +266,9 @@ int cnxk_sso_rx_adapter_start(const struct rte_eventdev 
*event_dev,
                              const struct rte_eth_dev *eth_dev);
 int cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
                             const struct rte_eth_dev *eth_dev);
+void cnxk_sso_tstamp_cfg(uint16_t port_id, const struct rte_eth_dev *eth_dev,
+                        struct cnxk_sso_evdev *dev);
+int cnxk_sso_rxq_disable(const struct rte_eth_dev *eth_dev, uint16_t rq_id);
 int cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev,
                                  const struct rte_eth_dev *eth_dev,
                                  int32_t tx_queue_id);
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c 
b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 3cac42111a..4cf48db74c 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -167,9 +167,10 @@ cnxk_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, 
uint16_t rq_id,
        return rc;
 }
 
-static int
-cnxk_sso_rxq_disable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id)
+int
+cnxk_sso_rxq_disable(const struct rte_eth_dev *eth_dev, uint16_t rq_id)
 {
+       struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
        struct roc_nix_rq *rq;
 
        rq = &cnxk_eth_dev->rqs[rq_id];
@@ -209,10 +210,11 @@ cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev 
*cnxk_eth_dev,
        return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
 }
 
-static void
-cnxk_sso_tstamp_cfg(uint16_t port_id, struct cnxk_eth_dev *cnxk_eth_dev,
-                   struct cnxk_sso_evdev *dev)
+void
+cnxk_sso_tstamp_cfg(uint16_t port_id, const struct rte_eth_dev *eth_dev, 
struct cnxk_sso_evdev *dev)
 {
+       struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+
        if (cnxk_eth_dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP || 
cnxk_eth_dev->ptp_en)
                dev->tstamp[port_id] = &cnxk_eth_dev->tstamp;
 }
@@ -263,7 +265,7 @@ cnxk_sso_rx_adapter_queue_add(
 
                /* Propagate force bp devarg */
                cnxk_eth_dev->nix.force_rx_aura_bp = dev->force_ena_bp;
-               cnxk_sso_tstamp_cfg(eth_dev->data->port_id, cnxk_eth_dev, dev);
+               cnxk_sso_tstamp_cfg(eth_dev->data->port_id, eth_dev, dev);
                cnxk_eth_dev->nb_rxq_sso++;
        }
 
@@ -290,7 +292,7 @@ cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev 
*event_dev,
                for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
                        cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, i);
        } else {
-               rc = cnxk_sso_rxq_disable(cnxk_eth_dev, (uint16_t)rx_queue_id);
+               rc = cnxk_sso_rxq_disable(eth_dev, (uint16_t)rx_queue_id);
                cnxk_eth_dev->nb_rxq_sso--;
 
                /* Enable drop_re if it was disabled earlier */
-- 
2.25.1

Reply via email to