From: Pavan Nikhilesh <pbhagavat...@marvell.com>

Add CN20K start function along with few cleanup API's to maintain
sanity.

Signed-off-by: Pavan Nikhilesh <pbhagavat...@marvell.com>
---
 drivers/event/cnxk/cn10k_eventdev.c | 103 +--------------------------
 drivers/event/cnxk/cn20k_eventdev.c |  76 ++++++++++++++++++++
 drivers/event/cnxk/cnxk_common.h    | 104 ++++++++++++++++++++++++++++
 3 files changed, 183 insertions(+), 100 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c 
b/drivers/event/cnxk/cn10k_eventdev.c
index 82d973a420..087560f43f 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -153,83 +153,6 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, 
uintptr_t base,
        return 0;
 }
 
-static void
-cn10k_sso_hws_reset(void *arg, void *hws)
-{
-       struct cnxk_sso_evdev *dev = arg;
-       struct cn10k_sso_hws *ws = hws;
-       uintptr_t base = ws->base;
-       uint64_t pend_state;
-       union {
-               __uint128_t wdata;
-               uint64_t u64[2];
-       } gw;
-       uint8_t pend_tt;
-       bool is_pend;
-
-       roc_sso_hws_gwc_invalidate(&dev->sso, &ws->hws_id, 1);
-       plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
-       /* Wait till getwork/swtp/waitw/desched completes. */
-       is_pend = false;
-       /* Work in WQE0 is always consumed, unless its a SWTAG. */
-       pend_state = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
-       if (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(54)) ||
-           ws->swtag_req)
-               is_pend = true;
-
-       do {
-               pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
-       } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
-                              BIT_ULL(56) | BIT_ULL(54)));
-       pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
-       if (is_pend && pend_tt != SSO_TT_EMPTY) { /* Work was pending */
-               if (pend_tt == SSO_TT_ATOMIC || pend_tt == SSO_TT_ORDERED)
-                       cnxk_sso_hws_swtag_untag(base +
-                                                SSOW_LF_GWS_OP_SWTAG_UNTAG);
-               plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
-       } else if (pend_tt != SSO_TT_EMPTY) {
-               plt_write64(0, base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
-       }
-
-       /* Wait for desched to complete. */
-       do {
-               pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
-       } while (pend_state & (BIT_ULL(58) | BIT_ULL(56)));
-
-       switch (dev->gw_mode) {
-       case CNXK_GW_MODE_PREF:
-       case CNXK_GW_MODE_PREF_WFE:
-               while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) & BIT_ULL(63))
-                       ;
-               break;
-       case CNXK_GW_MODE_NONE:
-       default:
-               break;
-       }
-
-       if (CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_PRF_WQE0)) !=
-           SSO_TT_EMPTY) {
-               plt_write64(BIT_ULL(16) | 1,
-                           ws->base + SSOW_LF_GWS_OP_GET_WORK0);
-               do {
-                       roc_load_pair(gw.u64[0], gw.u64[1],
-                                     ws->base + SSOW_LF_GWS_WQE0);
-               } while (gw.u64[0] & BIT_ULL(63));
-               pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
-               if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
-                       if (pend_tt == SSO_TT_ATOMIC ||
-                           pend_tt == SSO_TT_ORDERED)
-                               cnxk_sso_hws_swtag_untag(
-                                       base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
-                       plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
-               }
-       }
-
-       plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
-       roc_sso_hws_gwc_invalidate(&dev->sso, &ws->hws_id, 1);
-       rte_mb();
-}
-
 static void
 cn10k_sso_set_rsrc(void *arg)
 {
@@ -707,24 +630,6 @@ cn10k_sso_port_unlink(struct rte_eventdev *event_dev, void 
*port, uint8_t queues
        return cn10k_sso_port_unlink_profile(event_dev, port, queues, 
nb_unlinks, 0);
 }
 
-static void
-cn10k_sso_configure_queue_stash(struct rte_eventdev *event_dev)
-{
-       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
-       struct roc_sso_hwgrp_stash stash[dev->stash_cnt];
-       int i, rc;
-
-       plt_sso_dbg();
-       for (i = 0; i < dev->stash_cnt; i++) {
-               stash[i].hwgrp = dev->stash_parse_data[i].queue;
-               stash[i].stash_offset = dev->stash_parse_data[i].stash_offset;
-               stash[i].stash_count = dev->stash_parse_data[i].stash_length;
-       }
-       rc = roc_sso_hwgrp_stash_config(&dev->sso, stash, dev->stash_cnt);
-       if (rc < 0)
-               plt_warn("failed to configure HWGRP WQE stashing rc = %d", rc);
-}
-
 static int
 cn10k_sso_start(struct rte_eventdev *event_dev)
 {
@@ -736,9 +641,8 @@ cn10k_sso_start(struct rte_eventdev *event_dev)
        if (rc < 0)
                return rc;
 
-       cn10k_sso_configure_queue_stash(event_dev);
-       rc = cnxk_sso_start(event_dev, cn10k_sso_hws_reset,
-                           cn10k_sso_hws_flush_events);
+       cnxk_sso_configure_queue_stash(event_dev);
+       rc = cnxk_sso_start(event_dev, cnxk_sso_hws_reset, 
cn10k_sso_hws_flush_events);
        if (rc < 0)
                return rc;
        cn10k_sso_fp_fns_set(event_dev);
@@ -759,8 +663,7 @@ cn10k_sso_stop(struct rte_eventdev *event_dev)
        for (i = 0; i < event_dev->data->nb_ports; i++)
                hws[i] = i;
        roc_sso_hws_gwc_invalidate(&dev->sso, hws, event_dev->data->nb_ports);
-       cnxk_sso_stop(event_dev, cn10k_sso_hws_reset,
-                     cn10k_sso_hws_flush_events);
+       cnxk_sso_stop(event_dev, cnxk_sso_hws_reset, 
cn10k_sso_hws_flush_events);
 }
 
 static int
diff --git a/drivers/event/cnxk/cn20k_eventdev.c 
b/drivers/event/cnxk/cn20k_eventdev.c
index 34636c77ce..90902bce40 100644
--- a/drivers/event/cnxk/cn20k_eventdev.c
+++ b/drivers/event/cnxk/cn20k_eventdev.c
@@ -86,6 +86,61 @@ cn20k_sso_hws_release(void *arg, void *hws)
        memset(ws, 0, sizeof(*ws));
 }
 
+static int
+cn20k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base, 
cnxk_handle_event_t fn,
+                          void *arg)
+{
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
+       uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX;
+       struct cn20k_sso_hws *ws = hws;
+       uint64_t cq_ds_cnt = 1;
+       uint64_t aq_cnt = 1;
+       uint64_t ds_cnt = 1;
+       struct rte_event ev;
+       uint64_t val, req;
+
+       plt_write64(0, base + SSO_LF_GGRP_QCTL);
+
+       roc_sso_hws_gwc_invalidate(&dev->sso, &ws->hws_id, 1);
+       plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
+       req = queue_id;     /* GGRP ID */
+       req |= BIT_ULL(18); /* Grouped */
+       req |= BIT_ULL(16); /* WAIT */
+
+       aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
+       ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
+       cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
+       cq_ds_cnt &= 0x3FFF3FFF0000;
+
+       while (aq_cnt || cq_ds_cnt || ds_cnt) {
+               plt_write64(req, ws->base + SSOW_LF_GWS_OP_GET_WORK0);
+               cn20k_sso_hws_get_work_empty(ws, &ev, 0);
+               if (fn != NULL && ev.u64 != 0)
+                       fn(arg, ev);
+               if (ev.sched_type != SSO_TT_EMPTY)
+                       cnxk_sso_hws_swtag_flush(ws->base);
+               else if (retry-- == 0)
+                       break;
+               do {
+                       val = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
+               } while (val & BIT_ULL(56));
+               aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
+               ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
+               cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
+               /* Extract cq and ds count */
+               cq_ds_cnt &= 0x3FFF3FFF0000;
+       }
+
+       if (aq_cnt || cq_ds_cnt || ds_cnt)
+               return -EAGAIN;
+
+       plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
+       roc_sso_hws_gwc_invalidate(&dev->sso, &ws->hws_id, 1);
+       rte_mb();
+
+       return 0;
+}
+
 static void
 cn20k_sso_set_rsrc(void *arg)
 {
@@ -314,6 +369,25 @@ cn20k_sso_port_unlink(struct rte_eventdev *event_dev, void 
*port, uint8_t queues
        return cn20k_sso_port_unlink_profile(event_dev, port, queues, 
nb_unlinks, 0);
 }
 
+static int
+cn20k_sso_start(struct rte_eventdev *event_dev)
+{
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+       uint8_t hws[RTE_EVENT_MAX_PORTS_PER_DEV];
+       int rc, i;
+
+       cnxk_sso_configure_queue_stash(event_dev);
+       rc = cnxk_sso_start(event_dev, cnxk_sso_hws_reset, 
cn20k_sso_hws_flush_events);
+       if (rc < 0)
+               return rc;
+       cn20k_sso_fp_fns_set(event_dev);
+       for (i = 0; i < event_dev->data->nb_ports; i++)
+               hws[i] = i;
+       roc_sso_hws_gwc_invalidate(&dev->sso, hws, event_dev->data->nb_ports);
+
+       return rc;
+}
+
 static struct eventdev_ops cn20k_sso_dev_ops = {
        .dev_infos_get = cn20k_sso_info_get,
        .dev_configure = cn20k_sso_dev_configure,
@@ -332,6 +406,8 @@ static struct eventdev_ops cn20k_sso_dev_ops = {
        .port_link_profile = cn20k_sso_port_link_profile,
        .port_unlink_profile = cn20k_sso_port_unlink_profile,
        .timeout_ticks = cnxk_sso_timeout_ticks,
+
+       .dev_start = cn20k_sso_start,
 };
 
 static int
diff --git a/drivers/event/cnxk/cnxk_common.h b/drivers/event/cnxk/cnxk_common.h
index 712d82bee7..c361d0530d 100644
--- a/drivers/event/cnxk/cnxk_common.h
+++ b/drivers/event/cnxk/cnxk_common.h
@@ -8,6 +8,15 @@
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
 
+struct cnxk_sso_hws_prf {
+       uint64_t base;
+       uint32_t gw_wdata;
+       void *lookup_mem;
+       uint64_t gw_rdata;
+       uint8_t swtag_req;
+       uint8_t hws_id;
+};
+
 static uint32_t
 cnxk_sso_hws_prf_wdata(struct cnxk_sso_evdev *dev)
 {
@@ -52,4 +61,99 @@ cnxk_sso_hws_preschedule_get(uint8_t preschedule_type)
        return gw_mode;
 }
 
+static void
+cnxk_sso_hws_reset(void *arg, void *ws)
+{
+       struct cnxk_sso_evdev *dev = arg;
+       struct cnxk_sso_hws_prf *ws_prf;
+       uint64_t pend_state;
+       uint8_t swtag_req;
+       uintptr_t base;
+       uint8_t hws_id;
+       union {
+               __uint128_t wdata;
+               uint64_t u64[2];
+       } gw;
+       uint8_t pend_tt;
+       bool is_pend;
+
+       ws_prf = ws;
+       base = ws_prf->base;
+       hws_id = ws_prf->hws_id;
+       swtag_req = ws_prf->swtag_req;
+
+       roc_sso_hws_gwc_invalidate(&dev->sso, &hws_id, 1);
+       plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
+       /* Wait till getwork/swtp/waitw/desched completes. */
+       is_pend = false;
+       /* Work in WQE0 is always consumed, unless its a SWTAG. */
+       pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
+       if (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(54)) || swtag_req)
+               is_pend = true;
+
+       do {
+               pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
+       } while (pend_state &
+                (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) | BIT_ULL(56) | 
BIT_ULL(54)));
+       pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
+       if (is_pend && pend_tt != SSO_TT_EMPTY) { /* Work was pending */
+               if (pend_tt == SSO_TT_ATOMIC || pend_tt == SSO_TT_ORDERED)
+                       cnxk_sso_hws_swtag_untag(base + 
SSOW_LF_GWS_OP_SWTAG_UNTAG);
+               plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
+       } else if (pend_tt != SSO_TT_EMPTY) {
+               plt_write64(0, base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
+       }
+
+       /* Wait for desched to complete. */
+       do {
+               pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
+       } while (pend_state & (BIT_ULL(58) | BIT_ULL(56)));
+
+       switch (dev->gw_mode) {
+       case CNXK_GW_MODE_PREF:
+       case CNXK_GW_MODE_PREF_WFE:
+               while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) & BIT_ULL(63))
+                       ;
+               break;
+       case CNXK_GW_MODE_NONE:
+       default:
+               break;
+       }
+
+       if (CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_PRF_WQE0)) != 
SSO_TT_EMPTY) {
+               plt_write64(BIT_ULL(16) | 1, base + SSOW_LF_GWS_OP_GET_WORK0);
+               do {
+                       roc_load_pair(gw.u64[0], gw.u64[1], base + 
SSOW_LF_GWS_WQE0);
+               } while (gw.u64[0] & BIT_ULL(63));
+               pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
+               if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
+                       if (pend_tt == SSO_TT_ATOMIC || pend_tt == 
SSO_TT_ORDERED)
+                               cnxk_sso_hws_swtag_untag(base + 
SSOW_LF_GWS_OP_SWTAG_UNTAG);
+                       plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
+               }
+       }
+
+       plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
+       roc_sso_hws_gwc_invalidate(&dev->sso, &hws_id, 1);
+       rte_mb();
+}
+
+static void
+cnxk_sso_configure_queue_stash(struct rte_eventdev *event_dev)
+{
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+       struct roc_sso_hwgrp_stash stash[dev->stash_cnt];
+       int i, rc;
+
+       plt_sso_dbg();
+       for (i = 0; i < dev->stash_cnt; i++) {
+               stash[i].hwgrp = dev->stash_parse_data[i].queue;
+               stash[i].stash_offset = dev->stash_parse_data[i].stash_offset;
+               stash[i].stash_count = dev->stash_parse_data[i].stash_length;
+       }
+       rc = roc_sso_hwgrp_stash_config(&dev->sso, stash, dev->stash_cnt);
+       if (rc < 0)
+               plt_warn("failed to configure HWGRP WQE stashing rc = %d", rc);
+}
+
 #endif /* __CNXK_COMMON_H__ */
-- 
2.25.1

Reply via email to