From: Pavan Nikhilesh <pbhagavat...@marvell.com>

Add event timer adapter support for CN20K platform.
Implement new HWWQE insertion feature supported by CN20K platform.

Signed-off-by: Pavan Nikhilesh <pbhagavat...@marvell.com>
---
 drivers/common/cnxk/roc_tim.c        |   6 +-
 drivers/event/cnxk/cn20k_eventdev.c  |  16 ++-
 drivers/event/cnxk/cn20k_worker.h    |   6 +
 drivers/event/cnxk/cnxk_tim_evdev.c  |  37 ++++-
 drivers/event/cnxk/cnxk_tim_evdev.h  |  14 ++
 drivers/event/cnxk/cnxk_tim_worker.c |  82 +++++++++--
 drivers/event/cnxk/cnxk_tim_worker.h | 201 +++++++++++++++++++++++++++
 7 files changed, 350 insertions(+), 12 deletions(-)

diff --git a/drivers/common/cnxk/roc_tim.c b/drivers/common/cnxk/roc_tim.c
index e326ea0122..a1461fedb1 100644
--- a/drivers/common/cnxk/roc_tim.c
+++ b/drivers/common/cnxk/roc_tim.c
@@ -409,7 +409,7 @@ tim_hw_info_get(struct roc_tim *roc_tim)
        mbox_alloc_msg_tim_get_hw_info(mbox);
        rc = mbox_process_msg(mbox, (void **)&rsp);
        if (rc && rc != MBOX_MSG_INVALID) {
-               plt_err("Failed to get SSO HW info");
+               plt_err("Failed to get TIM HW info");
                rc = -EIO;
                goto exit;
        }
@@ -443,6 +443,10 @@ roc_tim_init(struct roc_tim *roc_tim)
        nb_lfs = roc_tim->nb_lfs;
 
        rc = tim_hw_info_get(roc_tim);
+       if (rc) {
+               plt_tim_dbg("Failed to get TIM HW info");
+               return 0;
+       }
 
        rc = tim_free_lf_count_get(dev, &nb_free_lfs);
        if (rc) {
diff --git a/drivers/event/cnxk/cn20k_eventdev.c 
b/drivers/event/cnxk/cn20k_eventdev.c
index 57e15b6d8c..d68700fc05 100644
--- a/drivers/event/cnxk/cn20k_eventdev.c
+++ b/drivers/event/cnxk/cn20k_eventdev.c
@@ -957,6 +957,13 @@ cn20k_sso_tx_adapter_queue_del(uint8_t id, const struct 
rte_eventdev *event_dev,
        return cn20k_sso_updt_tx_adptr_data(event_dev);
 }
 
+static int
+cn20k_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags, uint32_t 
*caps,
+                  const struct event_timer_adapter_ops **ops)
+{
+       return cnxk_tim_caps_get(evdev, flags, caps, ops, 
cn20k_sso_set_priv_mem);
+}
+
 static struct eventdev_ops cn20k_sso_dev_ops = {
        .dev_infos_get = cn20k_sso_info_get,
        .dev_configure = cn20k_sso_dev_configure,
@@ -991,6 +998,8 @@ static struct eventdev_ops cn20k_sso_dev_ops = {
        .eth_tx_adapter_stop = cnxk_sso_tx_adapter_stop,
        .eth_tx_adapter_free = cnxk_sso_tx_adapter_free,
 
+       .timer_adapter_caps_get = cn20k_tim_caps_get,
+
        .xstats_get = cnxk_sso_xstats_get,
        .xstats_reset = cnxk_sso_xstats_reset,
        .xstats_get_names = cnxk_sso_xstats_get_names,
@@ -1068,4 +1077,9 @@ RTE_PMD_REGISTER_PARAM_STRING(event_cn20k,
                              CNXK_SSO_XAE_CNT "=<int>"
                              CNXK_SSO_GGRP_QOS "=<string>"
                              CNXK_SSO_STASH "=<string>"
-                             CNXK_SSO_FORCE_BP "=1");
+                             CNXK_SSO_FORCE_BP "=1"
+                             CNXK_TIM_DISABLE_NPA "=1"
+                             CNXK_TIM_CHNK_SLOTS "=<int>"
+                             CNXK_TIM_RINGS_LMT "=<int>"
+                             CNXK_TIM_STATS_ENA "=1"
+                             CNXK_TIM_EXT_CLK "=<string>");
diff --git a/drivers/event/cnxk/cn20k_worker.h 
b/drivers/event/cnxk/cn20k_worker.h
index 5799e5cc49..b014e549b9 100644
--- a/drivers/event/cnxk/cn20k_worker.h
+++ b/drivers/event/cnxk/cn20k_worker.h
@@ -5,6 +5,7 @@
 #ifndef __CN20K_WORKER_H__
 #define __CN20K_WORKER_H__
 
+#include <rte_event_timer_adapter.h>
 #include <rte_eventdev.h>
 
 #include "cn20k_eventdev.h"
@@ -128,6 +129,11 @@ cn20k_sso_hws_post_process(struct cn20k_sso_hws *ws, 
uint64_t *u64, const uint32
                /* Mark vector mempool object as get */
                RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]), 
(void **)&u64[1], 1,
                                          1);
+       } else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_TIMER) {
+               struct rte_event_timer *tev = (struct rte_event_timer *)u64[1];
+
+               tev->state = RTE_EVENT_TIMER_NOT_ARMED;
+               u64[1] = tev->ev.u64;
        }
 }
 
diff --git a/drivers/event/cnxk/cnxk_tim_evdev.c 
b/drivers/event/cnxk/cnxk_tim_evdev.c
index 27a4dfb490..994d1d1090 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.c
+++ b/drivers/event/cnxk/cnxk_tim_evdev.c
@@ -78,9 +78,25 @@ cnxk_tim_chnk_pool_create(struct cnxk_tim_ring *tim_ring,
        return rc;
 }
 
+static int
+cnxk_tim_enable_hwwqe(struct cnxk_tim_evdev *dev, struct cnxk_tim_ring 
*tim_ring)
+{
+       struct roc_tim_hwwqe_cfg hwwqe_cfg;
+
+       memset(&hwwqe_cfg, 0, sizeof(hwwqe_cfg));
+       hwwqe_cfg.hwwqe_ena = 1;
+       hwwqe_cfg.grp_ena = 0;
+       hwwqe_cfg.flw_ctrl_ena = 0;
+       hwwqe_cfg.result_offset = CNXK_TIM_HWWQE_RES_OFFSET_B;
+
+       tim_ring->lmt_base = dev->tim.roc_sso->lmt_base;
+       return roc_tim_lf_config_hwwqe(&dev->tim, tim_ring->ring_id, 
&hwwqe_cfg);
+}
+
 static void
 cnxk_tim_set_fp_ops(struct cnxk_tim_ring *tim_ring)
 {
+       struct cnxk_tim_evdev *dev = cnxk_tim_priv_get();
        uint8_t prod_flag = !tim_ring->prod_type_sp;
 
        /* [STATS] [DFB/FB] [SP][MP]*/
@@ -98,6 +114,16 @@ cnxk_tim_set_fp_ops(struct cnxk_tim_ring *tim_ring)
 #undef FP
        };
 
+       if (dev == NULL)
+               return;
+
+       if (dev->tim.feat.hwwqe) {
+               cnxk_tim_ops.arm_burst = cnxk_tim_arm_burst_hwwqe;
+               cnxk_tim_ops.arm_tmo_tick_burst = cnxk_tim_arm_tmo_burst_hwwqe;
+               cnxk_tim_ops.cancel_burst = cnxk_tim_timer_cancel_burst_hwwqe;
+               return;
+       }
+
        cnxk_tim_ops.arm_burst =
                arm_burst[tim_ring->enable_stats][tim_ring->ena_dfb][prod_flag];
        cnxk_tim_ops.arm_tmo_tick_burst =
@@ -224,12 +250,13 @@ cnxk_tim_ring_create(struct rte_event_timer_adapter 
*adptr)
                }
        }
 
-       if (tim_ring->disable_npa) {
+       if (!dev->tim.feat.hwwqe && tim_ring->disable_npa) {
                tim_ring->nb_chunks =
                        tim_ring->nb_timers /
                        CNXK_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz);
                tim_ring->nb_chunks = tim_ring->nb_chunks * tim_ring->nb_bkts;
        } else {
+               tim_ring->disable_npa = 0;
                tim_ring->nb_chunks = tim_ring->nb_timers;
        }
 
@@ -255,6 +282,14 @@ cnxk_tim_ring_create(struct rte_event_timer_adapter *adptr)
                goto tim_chnk_free;
        }
 
+       if (dev->tim.feat.hwwqe) {
+               rc = cnxk_tim_enable_hwwqe(dev, tim_ring);
+               if (rc < 0) {
+                       plt_err("Failed to enable hwwqe");
+                       goto tim_chnk_free;
+               }
+       }
+
        plt_write64((uint64_t)tim_ring->bkt, tim_ring->base + TIM_LF_RING_BASE);
        plt_write64(tim_ring->aura, tim_ring->base + TIM_LF_RING_AURA);
 
diff --git a/drivers/event/cnxk/cnxk_tim_evdev.h 
b/drivers/event/cnxk/cnxk_tim_evdev.h
index c5b3d67eb8..114a89ee5a 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.h
+++ b/drivers/event/cnxk/cnxk_tim_evdev.h
@@ -15,6 +15,7 @@
 #include <rte_malloc.h>
 #include <rte_memzone.h>
 #include <rte_reciprocal.h>
+#include <rte_vect.h>
 
 #define NSECPERSEC              1E9
 #define USECPERSEC              1E6
@@ -29,6 +30,8 @@
 #define CNXK_TIM_MIN_CHUNK_SLOTS    (0x1)
 #define CNXK_TIM_MAX_CHUNK_SLOTS    (0x1FFE)
 #define CNXK_TIM_MAX_POOL_CACHE_SZ  (16)
+#define CNXK_TIM_HWWQE_RES_OFFSET_B (24)
+#define CNXK_TIM_ENT_PER_LMT       (7)
 
 #define CN9K_TIM_MIN_TMO_TKS (256)
 
@@ -124,6 +127,7 @@ struct __rte_cache_aligned cnxk_tim_ring {
        uintptr_t tbase;
        uint64_t (*tick_fn)(uint64_t tbase);
        uint64_t ring_start_cyc;
+       uint64_t lmt_base;
        struct cnxk_tim_bkt *bkt;
        struct rte_mempool *chunk_pool;
        struct rte_reciprocal_u64 fast_div;
@@ -310,11 +314,21 @@ TIM_ARM_FASTPATH_MODES
 TIM_ARM_TMO_FASTPATH_MODES
 #undef FP
 
+uint16_t cnxk_tim_arm_burst_hwwqe(const struct rte_event_timer_adapter *adptr,
+                                 struct rte_event_timer **tim, const uint16_t 
nb_timers);
+
+uint16_t cnxk_tim_arm_tmo_burst_hwwqe(const struct rte_event_timer_adapter 
*adptr,
+                                     struct rte_event_timer **tim, const 
uint64_t timeout_tick,
+                                     const uint16_t nb_timers);
+
 uint16_t
 cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
                            struct rte_event_timer **tim,
                            const uint16_t nb_timers);
 
+uint16_t cnxk_tim_timer_cancel_burst_hwwqe(const struct 
rte_event_timer_adapter *adptr,
+                                          struct rte_event_timer **tim, const 
uint16_t nb_timers);
+
 int cnxk_tim_remaining_ticks_get(const struct rte_event_timer_adapter *adapter,
                                 const struct rte_event_timer *evtim, uint64_t 
*ticks_remaining);
 
diff --git a/drivers/event/cnxk/cnxk_tim_worker.c 
b/drivers/event/cnxk/cnxk_tim_worker.c
index 5e96f6f188..42d376d375 100644
--- a/drivers/event/cnxk/cnxk_tim_worker.c
+++ b/drivers/event/cnxk/cnxk_tim_worker.c
@@ -32,15 +32,6 @@ cnxk_tim_arm_checks(const struct cnxk_tim_ring *const 
tim_ring,
        return -EINVAL;
 }
 
-static inline void
-cnxk_tim_format_event(const struct rte_event_timer *const tim,
-                     struct cnxk_tim_ent *const entry)
-{
-       entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 |
-                   (tim->ev.event & 0xFFFFFFFFF);
-       entry->wqe = tim->ev.u64;
-}
-
 static __rte_always_inline uint16_t
 cnxk_tim_timer_arm_burst(const struct rte_event_timer_adapter *adptr,
                         struct rte_event_timer **tim, const uint16_t nb_timers,
@@ -77,6 +68,24 @@ cnxk_tim_timer_arm_burst(const struct 
rte_event_timer_adapter *adptr,
        return index;
 }
 
+uint16_t
+cnxk_tim_arm_burst_hwwqe(const struct rte_event_timer_adapter *adptr, struct 
rte_event_timer **tim,
+                        const uint16_t nb_timers)
+{
+       struct cnxk_tim_ring *tim_ring = adptr->data->adapter_priv;
+       uint16_t index;
+
+       for (index = 0; index < nb_timers; index++) {
+               if (cnxk_tim_arm_checks(tim_ring, tim[index]))
+                       break;
+
+               if (cnxk_tim_add_entry_hwwqe(tim_ring, tim[index]))
+                       break;
+       }
+
+       return index;
+}
+
 #define FP(_name, _f3, _f2, _f1, _flags)                                       
\
        uint16_t __rte_noinline cnxk_tim_arm_burst_##_name(                    \
                const struct rte_event_timer_adapter *adptr,                   \
@@ -132,6 +141,29 @@ cnxk_tim_timer_arm_tmo_brst(const struct 
rte_event_timer_adapter *adptr,
        return set_timers;
 }
 
+uint16_t
+cnxk_tim_arm_tmo_burst_hwwqe(const struct rte_event_timer_adapter *adptr,
+                            struct rte_event_timer **tim, const uint64_t 
timeout_tick,
+                            const uint16_t nb_timers)
+{
+       struct cnxk_tim_ring *tim_ring = adptr->data->adapter_priv;
+       uint16_t idx;
+
+       if (unlikely(!timeout_tick || timeout_tick > tim_ring->nb_bkts)) {
+               const enum rte_event_timer_state state = timeout_tick ?
+                                                                
RTE_EVENT_TIMER_ERROR_TOOLATE :
+                                                                
RTE_EVENT_TIMER_ERROR_TOOEARLY;
+               for (idx = 0; idx < nb_timers; idx++)
+                       tim[idx]->state = state;
+
+               rte_errno = EINVAL;
+               return 0;
+       }
+
+       return cnxk_tim_add_entry_tmo_hwwqe(tim_ring, tim, timeout_tick * 
tim_ring->tck_int,
+                                           nb_timers);
+}
+
 #define FP(_name, _f2, _f1, _flags)                                            
\
        uint16_t __rte_noinline cnxk_tim_arm_tmo_tick_burst_##_name(           \
                const struct rte_event_timer_adapter *adptr,                   \
@@ -174,6 +206,38 @@ cnxk_tim_timer_cancel_burst(const struct 
rte_event_timer_adapter *adptr,
        return index;
 }
 
+uint16_t
+cnxk_tim_timer_cancel_burst_hwwqe(const struct rte_event_timer_adapter *adptr,
+                                 struct rte_event_timer **tim, const uint16_t 
nb_timers)
+{
+       uint64_t __rte_atomic *status;
+       uint16_t i;
+
+       RTE_SET_USED(adptr);
+       for (i = 0; i < nb_timers; i++) {
+               if (tim[i]->state == RTE_EVENT_TIMER_CANCELED) {
+                       rte_errno = EALREADY;
+                       break;
+               }
+
+               if (tim[i]->state != RTE_EVENT_TIMER_ARMED) {
+                       rte_errno = EINVAL;
+                       break;
+               }
+
+               status = (uint64_t __rte_atomic *)&tim[i]->impl_opaque[1];
+               if (!rte_atomic_compare_exchange_strong_explicit(status, 
(uint64_t *)&tim[i], 0,
+                                                                
rte_memory_order_release,
+                                                                
rte_memory_order_relaxed)) {
+                       rte_errno = ENOENT;
+                       break;
+               }
+               tim[i]->state = RTE_EVENT_TIMER_CANCELED;
+       }
+
+       return i;
+}
+
 int
 cnxk_tim_remaining_ticks_get(const struct rte_event_timer_adapter *adapter,
                             const struct rte_event_timer *evtim, uint64_t 
*ticks_remaining)
diff --git a/drivers/event/cnxk/cnxk_tim_worker.h 
b/drivers/event/cnxk/cnxk_tim_worker.h
index e52eadbc08..be6744db51 100644
--- a/drivers/event/cnxk/cnxk_tim_worker.h
+++ b/drivers/event/cnxk/cnxk_tim_worker.h
@@ -132,6 +132,13 @@ cnxk_tim_bkt_fast_mod(uint64_t n, uint64_t d, struct 
rte_reciprocal_u64 R)
        return (n - (d * rte_reciprocal_divide_u64(n, &R)));
 }
 
+static inline void
+cnxk_tim_format_event(const struct rte_event_timer *const tim, struct 
cnxk_tim_ent *const entry)
+{
+       entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 | (tim->ev.event & 
0xFFFFFFFFF);
+       entry->wqe = tim->ev.u64;
+}
+
 static __rte_always_inline void
 cnxk_tim_get_target_bucket(struct cnxk_tim_ring *const tim_ring,
                           const uint32_t rel_bkt, struct cnxk_tim_bkt **bkt,
@@ -573,6 +580,200 @@ cnxk_tim_add_entry_brst(struct cnxk_tim_ring *const 
tim_ring,
        return nb_timers;
 }
 
+static int
+cnxk_tim_add_entry_hwwqe(struct cnxk_tim_ring *const tim_ring, struct 
rte_event_timer *const tim)
+{
+       uint64_t __rte_atomic *status;
+       uint64_t wdata, pa;
+       uintptr_t lmt_addr;
+       uint16_t lmt_id;
+       uint64_t *lmt;
+       uint64_t rsp;
+       int rc = 0;
+
+       status = (uint64_t __rte_atomic *)&tim->impl_opaque[0];
+       status[0] = 0;
+       status[1] = 0;
+
+       lmt_addr = tim_ring->lmt_base;
+       ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id);
+       lmt = (uint64_t *)lmt_addr;
+
+       lmt[0] = tim->timeout_ticks * tim_ring->tck_int;
+       lmt[1] = 0x1;
+       lmt[2] = (tim->ev.event & 0xFFC000000000) >> 6 | (tim->ev.event & 
0xFFFFFFFFF);
+       lmt[3] = (uint64_t)tim;
+
+       /* One LMT line is used, CNTM1 is 0 and SIZE_VEC is not included. */
+       wdata = lmt_id;
+       /* SIZEM1 is 0 */
+       pa = (tim_ring->tbase & ~0xFF) + TIM_LF_SCHED_TIMER0;
+       pa |= (1UL << 4);
+       roc_lmt_submit_steorl(wdata, pa);
+
+       do {
+               rsp = rte_atomic_load_explicit(status, 
rte_memory_order_relaxed);
+               rsp &= 0xF0UL;
+       } while (!rsp);
+
+       rsp >>= 4;
+       switch (rsp) {
+       case 0x3:
+               tim->state = RTE_EVENT_TIMER_ERROR_TOOEARLY;
+               rc = !rc;
+               break;
+       case 0x4:
+               tim->state = RTE_EVENT_TIMER_ERROR_TOOLATE;
+               rc = !rc;
+               break;
+       case 0x1:
+               tim->state = RTE_EVENT_TIMER_ARMED;
+               break;
+       default:
+               tim->state = RTE_EVENT_TIMER_ERROR;
+               rc = !rc;
+               break;
+       }
+
+       return rc;
+}
+
+static int
+cnxk_tim_add_entry_tmo_hwwqe(struct cnxk_tim_ring *const tim_ring,
+                            struct rte_event_timer **const tim, uint64_t 
intvl, uint16_t nb_timers)
+{
+       uint64_t __rte_atomic *status;
+       uint16_t cnt, i, j, done;
+       uint64_t wdata, pa;
+       uintptr_t lmt_addr;
+       uint16_t lmt_id;
+       uint64_t *lmt;
+       uint64_t rsp;
+
+       /* We have 32 LMTLINES per core, but use only 1 line as we need to 
check status */
+       lmt_addr = tim_ring->lmt_base;
+       ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id);
+
+       done = 0;
+       lmt = (uint64_t *)lmt_addr;
+       /* We can do upto 7 timers per LMTLINE */
+       cnt = nb_timers / CNXK_TIM_ENT_PER_LMT;
+
+       lmt[0] = intvl;
+       lmt[1] = 0x1; /* Always relative */
+       /* One LMT line is used, CNTM1 is 0 and SIZE_VEC is not included. */
+       wdata = lmt_id;
+       /* SIZEM1 is 0 */
+       pa = (tim_ring->tbase & ~0xFF) + TIM_LF_SCHED_TIMER0;
+       pa |= (uint64_t)(CNXK_TIM_ENT_PER_LMT << 4);
+       for (i = 0; i < cnt; i++) {
+               status = (uint64_t __rte_atomic *)&tim[i * 
CNXK_TIM_ENT_PER_LMT]->impl_opaque[0];
+
+               for (j = 0; j < CNXK_TIM_ENT_PER_LMT; j++) {
+                       cnxk_tim_format_event(tim[(i * CNXK_TIM_ENT_PER_LMT) + 
j],
+                                             (struct cnxk_tim_ent *)&lmt[(j << 
1) + 2]);
+                       tim[(i * CNXK_TIM_ENT_PER_LMT) + j]->impl_opaque[0] = 0;
+                       tim[(i * CNXK_TIM_ENT_PER_LMT) + j]->impl_opaque[1] = 0;
+                       tim[(i * CNXK_TIM_ENT_PER_LMT) + j]->state = 
RTE_EVENT_TIMER_ARMED;
+               }
+
+               roc_lmt_submit_steorl(wdata, pa);
+               do {
+                       rsp = rte_atomic_load_explicit(status, 
rte_memory_order_relaxed);
+                       rsp &= 0xFUL;
+               } while (!rsp);
+
+               done += CNXK_TIM_ENT_PER_LMT;
+               rsp &= 0xF;
+               if (rsp != 0x1) {
+                       switch (rsp) {
+                       case 0x3:
+                               for (j = 0; j < CNXK_TIM_ENT_PER_LMT; j++)
+                                       tim[(i * CNXK_TIM_ENT_PER_LMT) + 
j]->state =
+                                               RTE_EVENT_TIMER_ERROR_TOOEARLY;
+                               done -= CNXK_TIM_ENT_PER_LMT;
+                               break;
+                       case 0x4:
+                               for (j = 0; j < CNXK_TIM_ENT_PER_LMT; j++)
+                                       tim[(i * CNXK_TIM_ENT_PER_LMT) + 
j]->state =
+                                               RTE_EVENT_TIMER_ERROR_TOOLATE;
+                               done -= CNXK_TIM_ENT_PER_LMT;
+                               break;
+                       case 0x2:
+                       default:
+                               for (j = 0; j < CNXK_TIM_ENT_PER_LMT; j++) {
+                                       if ((rte_atomic_load_explicit(
+                                                    (uint64_t __rte_atomic
+                                                             *)&tim[(i * 
CNXK_TIM_ENT_PER_LMT) + j]
+                                                            ->impl_opaque[0],
+                                                    rte_memory_order_relaxed) &
+                                            0xF0) != 0x10) {
+                                               tim[(i * CNXK_TIM_ENT_PER_LMT) 
+ j]->state =
+                                                       RTE_EVENT_TIMER_ERROR;
+                                               done--;
+                                       }
+                               }
+                               break;
+                       }
+                       goto done;
+               }
+       }
+
+       /* SIZEM1 is 0 */
+       pa = (tim_ring->tbase & ~0xFF) + TIM_LF_SCHED_TIMER0;
+       pa |= (uint64_t)((nb_timers - cnt) << 4);
+       if (nb_timers - cnt) {
+               status = (uint64_t __rte_atomic *)&tim[cnt]->impl_opaque[0];
+
+               for (i = 0; i < nb_timers - cnt; i++) {
+                       cnxk_tim_format_event(tim[cnt + i],
+                                             (struct cnxk_tim_ent *)&lmt[(i << 
1) + 2]);
+                       tim[cnt + i]->impl_opaque[0] = 0;
+                       tim[cnt + i]->impl_opaque[1] = 0;
+                       tim[cnt + i]->state = RTE_EVENT_TIMER_ARMED;
+               }
+
+               roc_lmt_submit_steorl(wdata, pa);
+               do {
+                       rsp = rte_atomic_load_explicit(status, 
rte_memory_order_relaxed);
+                       rsp &= 0xFUL;
+               } while (!rsp);
+
+               done += (nb_timers - cnt);
+               rsp &= 0xF;
+               if (rsp != 0x1) {
+                       switch (rsp) {
+                       case 0x3:
+                               for (j = 0; j < nb_timers - cnt; j++)
+                                       tim[cnt + j]->state = 
RTE_EVENT_TIMER_ERROR_TOOEARLY;
+                               done -= (nb_timers - cnt);
+                               break;
+                       case 0x4:
+                               for (j = 0; j < nb_timers - cnt; j++)
+                                       tim[cnt + j]->state = 
RTE_EVENT_TIMER_ERROR_TOOLATE;
+                               done -= (nb_timers - cnt);
+                               break;
+                       case 0x2:
+                       default:
+                               for (j = 0; j < nb_timers - cnt; j++) {
+                                       if ((rte_atomic_load_explicit(
+                                                    (uint64_t __rte_atomic 
*)&tim[cnt + j]
+                                                            ->impl_opaque[0],
+                                                    rte_memory_order_relaxed) &
+                                            0xF0) != 0x10) {
+                                               tim[cnt + j]->state = 
RTE_EVENT_TIMER_ERROR;
+                                               done--;
+                                       }
+                               }
+                               break;
+                       }
+               }
+       }
+
+done:
+       return done;
+}
+
 static int
 cnxk_tim_rm_entry(struct rte_event_timer *tim)
 {
-- 
2.25.1

Reply via email to