On Sun, May 15, 2022 at 4:00 PM Shijith Thotton <sthot...@marvell.com> wrote: > > If an event queue flush does not complete after a fixed number of tries, > remaining queues are flushed before retrying the one with incomplete > flush. > > Signed-off-by: Shijith Thotton <sthot...@marvell.com>
Please rebase with dpdk-next-net-eventdev/for-main +++ b/drivers/event/cnxk/cn10k_eventdev.c @@@ -140,7 -138,11 +141,15 @@@ cn10k_sso_hws_flush_events(void *hws, u if (fn != NULL && ev.u64 != 0) fn(arg, ev); if (ev.sched_type != SSO_TT_EMPTY) ++<<<<<<< HEAD + cnxk_sso_hws_swtag_flush(ws->base); ++======= + cnxk_sso_hws_swtag_flush( + ws->base + SSOW_LF_GWS_WQE0, + ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH); + else if (retry-- == 0) + break; ++>>>>>>> event/cnxk: flush event queues over multiple pass do { val = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE); } while (val & BIT_ULL(56)); diff --cc drivers/event/cnxk/cn9k_eventdev.c index 987888d3db,db41f41358..0000000000 --- a/drivers/event/cnxk/cn9k_eventdev.c +++ b/drivers/event/cnxk/cn9k_eventdev.c @@@ -122,7 -122,7 +122,11 @@@ cn9k_sso_hws_flush_events(void *hws, ui cnxk_handle_event_t fn, void *arg) { struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg); ++<<<<<<< HEAD + struct cnxk_timesync_info *tstamp; ++======= + uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX; ++>>>>>>> event/cnxk: flush event queues over multiple pass struct cn9k_sso_hws_dual *dws; struct cn9k_sso_hws *ws; uint64_t cq_ds_cnt = 1; @@@ -163,7 -157,11 +167,15 @@@ if (fn != NULL && ev.u64 != 0) fn(arg, ev); if (ev.sched_type != SSO_TT_EMPTY) ++<<<<<<< HEAD + cnxk_sso_hws_swtag_flush(ws_base); ++======= + cnxk_sso_hws_swtag_flush( + ws_base + SSOW_LF_GWS_TAG, + ws_base + SSOW_LF_GWS_OP_SWTAG_FLUSH); + else if (retry-- == 0) + break; ++>>>>>>> event/cnxk: flush event queues over multiple pass do { val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE); } while (val & BIT_ULL(56) > --- > drivers/event/cnxk/cn10k_eventdev.c | 10 +++++++++- > drivers/event/cnxk/cn9k_eventdev.c | 10 +++++++++- > drivers/event/cnxk/cnxk_eventdev.c | 31 ++++++++++++++++++++++------- > drivers/event/cnxk/cnxk_eventdev.h | 5 +++-- > 4 files changed, 45 insertions(+), 11 deletions(-) > > diff --git a/drivers/event/cnxk/cn10k_eventdev.c > b/drivers/event/cnxk/cn10k_eventdev.c > index 9b4d2895ec..6cdfc14d79 100644 > --- a/drivers/event/cnxk/cn10k_eventdev.c > +++ b/drivers/event/cnxk/cn10k_eventdev.c > @@ -108,10 +108,11 @@ cn10k_sso_hws_release(void *arg, void *hws) > memset(ws, 0, sizeof(*ws)); > } > > -static void > +static int > cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base, > cnxk_handle_event_t fn, void *arg) > { > + uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX; > struct cn10k_sso_hws *ws = hws; > uint64_t cq_ds_cnt = 1; > uint64_t aq_cnt = 1; > @@ -140,6 +141,8 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, > uintptr_t base, > cnxk_sso_hws_swtag_flush( > ws->base + SSOW_LF_GWS_WQE0, > ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH); > + else if (retry-- == 0) > + break; > do { > val = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE); > } while (val & BIT_ULL(56)); > @@ -150,8 +153,13 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, > uintptr_t base, > cq_ds_cnt &= 0x3FFF3FFF0000; > } > > + if (aq_cnt || cq_ds_cnt || ds_cnt) > + return -EAGAIN; > + > plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL); > rte_mb(); > + > + return 0; > } > > static void > diff --git a/drivers/event/cnxk/cn9k_eventdev.c > b/drivers/event/cnxk/cn9k_eventdev.c > index 4bba477dd1..db41f41358 100644 > --- a/drivers/event/cnxk/cn9k_eventdev.c > +++ b/drivers/event/cnxk/cn9k_eventdev.c > @@ -117,11 +117,12 @@ cn9k_sso_hws_release(void *arg, void *hws) > } > } > > -static void > +static int > cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base, > cnxk_handle_event_t fn, void *arg) > { > struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg); > + uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX; > struct cn9k_sso_hws_dual *dws; > struct cn9k_sso_hws *ws; > uint64_t cq_ds_cnt = 1; > @@ -159,6 +160,8 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, > uintptr_t base, > cnxk_sso_hws_swtag_flush( > ws_base + SSOW_LF_GWS_TAG, > ws_base + SSOW_LF_GWS_OP_SWTAG_FLUSH); > + else if (retry-- == 0) > + break; > do { > val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE); > } while (val & BIT_ULL(56)); > @@ -169,7 +172,12 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, > uintptr_t base, > cq_ds_cnt &= 0x3FFF3FFF0000; > } > > + if (aq_cnt || cq_ds_cnt || ds_cnt) > + return -EAGAIN; > + > plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL); > + > + return 0; > } > > static void > diff --git a/drivers/event/cnxk/cnxk_eventdev.c > b/drivers/event/cnxk/cnxk_eventdev.c > index be021d86c9..91235ed9f3 100644 > --- a/drivers/event/cnxk/cnxk_eventdev.c > +++ b/drivers/event/cnxk/cnxk_eventdev.c > @@ -385,9 +385,10 @@ static void > cnxk_sso_cleanup(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t > reset_fn, > cnxk_sso_hws_flush_t flush_fn, uint8_t enable) > { > + uint8_t pend_list[RTE_EVENT_MAX_QUEUES_PER_DEV], pend_cnt, new_pcnt; > struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); > uintptr_t hwgrp_base; > - uint16_t i; > + uint8_t queue_id, i; > void *ws; > > for (i = 0; i < dev->nb_event_ports; i++) { > @@ -396,14 +397,30 @@ cnxk_sso_cleanup(struct rte_eventdev *event_dev, > cnxk_sso_hws_reset_t reset_fn, > } > > rte_mb(); > + > + /* Consume all the events through HWS0 */ > ws = event_dev->data->ports[0]; > > - for (i = 0; i < dev->nb_event_queues; i++) { > - /* Consume all the events through HWS0 */ > - hwgrp_base = roc_sso_hwgrp_base_get(&dev->sso, i); > - flush_fn(ws, i, hwgrp_base, cnxk_handle_event, event_dev); > - /* Enable/Disable SSO GGRP */ > - plt_write64(enable, hwgrp_base + SSO_LF_GGRP_QCTL); > + /* Starting list of queues to flush */ > + pend_cnt = dev->nb_event_queues; > + for (i = 0; i < dev->nb_event_queues; i++) > + pend_list[i] = i; > + > + while (pend_cnt) { > + new_pcnt = 0; > + for (i = 0; i < pend_cnt; i++) { > + queue_id = pend_list[i]; > + hwgrp_base = > + roc_sso_hwgrp_base_get(&dev->sso, queue_id); > + if (flush_fn(ws, queue_id, hwgrp_base, > + cnxk_handle_event, event_dev)) { > + pend_list[new_pcnt++] = queue_id; > + continue; > + } > + /* Enable/Disable SSO GGRP */ > + plt_write64(enable, hwgrp_base + SSO_LF_GGRP_QCTL); > + } > + pend_cnt = new_pcnt; > } > } > > diff --git a/drivers/event/cnxk/cnxk_eventdev.h > b/drivers/event/cnxk/cnxk_eventdev.h > index 5564746e6d..a490829a8a 100644 > --- a/drivers/event/cnxk/cnxk_eventdev.h > +++ b/drivers/event/cnxk/cnxk_eventdev.h > @@ -54,6 +54,7 @@ > #define CN10K_GW_MODE_PREF 1 > #define CN10K_GW_MODE_PREF_WFE 2 > > +#define CNXK_SSO_FLUSH_RETRY_MAX 0xfff > #define CNXK_VALID_DEV_OR_ERR_RET(dev, drv_name) > \ > do { > \ > if (strncmp(dev->driver->name, drv_name, strlen(drv_name))) > \ > @@ -69,8 +70,8 @@ typedef int (*cnxk_sso_unlink_t)(void *dev, void *ws, > uint16_t *map, > uint16_t nb_link); > typedef void (*cnxk_handle_event_t)(void *arg, struct rte_event ev); > typedef void (*cnxk_sso_hws_reset_t)(void *arg, void *ws); > -typedef void (*cnxk_sso_hws_flush_t)(void *ws, uint8_t queue_id, uintptr_t > base, > - cnxk_handle_event_t fn, void *arg); > +typedef int (*cnxk_sso_hws_flush_t)(void *ws, uint8_t queue_id, uintptr_t > base, > + cnxk_handle_event_t fn, void *arg); > > struct cnxk_sso_qos { > uint16_t queue; > -- > 2.25.1 >