From: Pavan Nikhilesh <pbhagavat...@marvell.com>

Invalidate GWC on event port i.e., HWS reset to prevent
invalid response from SSO.

Signed-off-by: Pavan Nikhilesh <pbhagavat...@marvell.com>
---
 drivers/common/cnxk/roc_sso.c       | 31 +++++++++++++++++++++++++++++
 drivers/common/cnxk/roc_sso.h       |  2 ++
 drivers/common/cnxk/version.map     |  1 +
 drivers/event/cnxk/cn10k_eventdev.c | 19 +++++++++++++++++-
 4 files changed, 52 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index a5f48d5bbc..1ea0761531 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -357,6 +357,37 @@ roc_sso_hws_stats_get(struct roc_sso *roc_sso, uint8_t hws,
        return rc;
 }
 
+void
+roc_sso_hws_gwc_invalidate(struct roc_sso *roc_sso, uint8_t *hws,
+                          uint8_t nb_hws)
+{
+       struct sso *sso = roc_sso_to_sso_priv(roc_sso);
+       struct ssow_lf_inv_req *req;
+       struct dev *dev = &sso->dev;
+       struct mbox *mbox;
+       int i;
+
+       if (!nb_hws)
+               return;
+
+       mbox = mbox_get(dev->mbox);
+       req = mbox_alloc_msg_sso_ws_cache_inv(mbox);
+       if (req == NULL) {
+               mbox_process(mbox);
+               req = mbox_alloc_msg_sso_ws_cache_inv(mbox);
+               if (req == NULL) {
+                       mbox_put(mbox);
+                       return;
+               }
+       }
+       req->hdr.ver = SSOW_INVAL_SELECTIVE_VER;
+       req->nb_hws = nb_hws;
+       for (i = 0; i < nb_hws; i++)
+               req->hws[i] = hws[i];
+       mbox_process(mbox);
+       mbox_put(mbox);
+}
+
 int
 roc_sso_hwgrp_stats_get(struct roc_sso *roc_sso, uint8_t hwgrp,
                        struct roc_sso_hwgrp_stats *stats)
diff --git a/drivers/common/cnxk/roc_sso.h b/drivers/common/cnxk/roc_sso.h
index a2bb6fcb22..8ee62afb9a 100644
--- a/drivers/common/cnxk/roc_sso.h
+++ b/drivers/common/cnxk/roc_sso.h
@@ -100,6 +100,8 @@ int __roc_api roc_sso_hwgrp_free_xaq_aura(struct roc_sso 
*roc_sso,
 int __roc_api roc_sso_hwgrp_stash_config(struct roc_sso *roc_sso,
                                         struct roc_sso_hwgrp_stash *stash,
                                         uint16_t nb_stash);
+void __roc_api roc_sso_hws_gwc_invalidate(struct roc_sso *roc_sso, uint8_t 
*hws,
+                                         uint8_t nb_hws);
 
 /* Debug */
 void __roc_api roc_sso_dump(struct roc_sso *roc_sso, uint8_t nb_hws,
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 8c71497df8..cfb7efbdc7 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -475,6 +475,7 @@ INTERNAL {
        roc_sso_hws_base_get;
        roc_sso_hws_link;
        roc_sso_hws_stats_get;
+       roc_sso_hws_gwc_invalidate;
        roc_sso_hws_unlink;
        roc_sso_ns_to_gw;
        roc_sso_rsrc_fini;
diff --git a/drivers/event/cnxk/cn10k_eventdev.c 
b/drivers/event/cnxk/cn10k_eventdev.c
index 499a3aace7..56482c20a1 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -118,6 +118,7 @@ static int
 cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
                           cnxk_handle_event_t fn, void *arg)
 {
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
        uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX;
        struct cn10k_sso_hws *ws = hws;
        uint64_t cq_ds_cnt = 1;
@@ -128,6 +129,7 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, 
uintptr_t base,
 
        plt_write64(0, base + SSO_LF_GGRP_QCTL);
 
+       roc_sso_hws_gwc_invalidate(&dev->sso, &ws->hws_id, 1);
        plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
        req = queue_id;     /* GGRP ID */
        req |= BIT_ULL(18); /* Grouped */
@@ -162,6 +164,7 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, 
uintptr_t base,
                return -EAGAIN;
 
        plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
+       roc_sso_hws_gwc_invalidate(&dev->sso, &ws->hws_id, 1);
        rte_mb();
 
        return 0;
@@ -181,6 +184,7 @@ cn10k_sso_hws_reset(void *arg, void *hws)
        uint8_t pend_tt;
        bool is_pend;
 
+       roc_sso_hws_gwc_invalidate(&dev->sso, &ws->hws_id, 1);
        plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
        /* Wait till getwork/swtp/waitw/desched completes. */
        is_pend = false;
@@ -237,6 +241,7 @@ cn10k_sso_hws_reset(void *arg, void *hws)
        }
 
        plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
+       roc_sso_hws_gwc_invalidate(&dev->sso, &ws->hws_id, 1);
        rte_mb();
 }
 
@@ -670,7 +675,9 @@ cn10k_sso_configure_queue_stash(struct rte_eventdev 
*event_dev)
 static int
 cn10k_sso_start(struct rte_eventdev *event_dev)
 {
-       int rc;
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+       uint8_t hws[RTE_EVENT_MAX_PORTS_PER_DEV];
+       int rc, i;
 
        rc = cn10k_sso_updt_tx_adptr_data(event_dev);
        if (rc < 0)
@@ -682,6 +689,9 @@ cn10k_sso_start(struct rte_eventdev *event_dev)
        if (rc < 0)
                return rc;
        cn10k_sso_fp_fns_set(event_dev);
+       for (i = 0; i < event_dev->data->nb_ports; i++)
+               hws[i] = i;
+       roc_sso_hws_gwc_invalidate(&dev->sso, hws, event_dev->data->nb_ports);
 
        return rc;
 }
@@ -689,6 +699,13 @@ cn10k_sso_start(struct rte_eventdev *event_dev)
 static void
 cn10k_sso_stop(struct rte_eventdev *event_dev)
 {
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+       uint8_t hws[RTE_EVENT_MAX_PORTS_PER_DEV];
+       int i;
+
+       for (i = 0; i < event_dev->data->nb_ports; i++)
+               hws[i] = i;
+       roc_sso_hws_gwc_invalidate(&dev->sso, hws, event_dev->data->nb_ports);
        cnxk_sso_stop(event_dev, cn10k_sso_hws_reset,
                      cn10k_sso_hws_flush_events);
 }
-- 
2.41.0

Reply via email to