On Tue, May 16, 2023 at 8:18 PM <pbhagavat...@marvell.com> wrote: > > From: Pavan Nikhilesh <pbhagavat...@marvell.com> > > Add support to get the remaining ticks to expire for a > given event timer. > > Signed-off-by: Pavan Nikhilesh <pbhagavat...@marvell.com>
Please resend this as CI is failing. On next version, Please update doc/guides/rel_notes/release_23_11.rst as * **Updated Marvell cnxk eventdev driver.** * Oneline for new feature. > --- > drivers/event/cnxk/cn10k_worker.h | 6 +++++ > drivers/event/cnxk/cn9k_worker.h | 4 ++++ > drivers/event/cnxk/cnxk_tim_evdev.c | 1 + > drivers/event/cnxk/cnxk_tim_evdev.h | 3 +++ > drivers/event/cnxk/cnxk_tim_worker.c | 35 ++++++++++++++++++++++++++++ > 5 files changed, 49 insertions(+) > > diff --git a/drivers/event/cnxk/cn10k_worker.h > b/drivers/event/cnxk/cn10k_worker.h > index 06c71c6092..3907919135 100644 > --- a/drivers/event/cnxk/cn10k_worker.h > +++ b/drivers/event/cnxk/cn10k_worker.h > @@ -5,7 +5,9 @@ > #ifndef __CN10K_WORKER_H__ > #define __CN10K_WORKER_H__ > > +#include <rte_event_timer_adapter.h> > #include <rte_eventdev.h> > + > #include "cn10k_cryptodev_event_dp.h" > #include "cn10k_rx.h" > #include "cnxk_worker.h" > @@ -213,6 +215,10 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, > uint64_t *u64, > /* Mark vector mempool object as get */ > RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void > *)u64[1]), > (void **)&u64[1], 1, 1); > + } else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_TIMER) { > + struct rte_event_timer *tim = (void *)u64[1]; > + > + tim->state = RTE_EVENT_TIMER_NOT_ARMED; > } > } > > diff --git a/drivers/event/cnxk/cn9k_worker.h > b/drivers/event/cnxk/cn9k_worker.h > index 1ce4b044e8..04be35de8a 100644 > --- a/drivers/event/cnxk/cn9k_worker.h > +++ b/drivers/event/cnxk/cn9k_worker.h > @@ -215,6 +215,10 @@ cn9k_sso_hws_post_process(uint64_t *u64, uint64_t mbuf, > const uint32_t flags, > if (flags & NIX_RX_OFFLOAD_TSTAMP_F) > cn9k_sso_process_tstamp(u64[1], mbuf, tstamp[port]); > u64[1] = mbuf; > + } else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_TIMER) { > + struct rte_event_timer *tim = (void *)u64[1]; > + > + tim->state = RTE_EVENT_TIMER_NOT_ARMED; > } > } > > diff --git a/drivers/event/cnxk/cnxk_tim_evdev.c > b/drivers/event/cnxk/cnxk_tim_evdev.c > index 121480df15..6d59fdf909 100644 > --- a/drivers/event/cnxk/cnxk_tim_evdev.c > +++ b/drivers/event/cnxk/cnxk_tim_evdev.c > @@ -392,6 +392,7 @@ cnxk_tim_caps_get(const struct rte_eventdev *evdev, > uint64_t flags, > cnxk_tim_ops.start = cnxk_tim_ring_start; > cnxk_tim_ops.stop = cnxk_tim_ring_stop; > cnxk_tim_ops.get_info = cnxk_tim_ring_info_get; > + cnxk_tim_ops.remaining_ticks_get = cnxk_tim_remaining_ticks_get; > sso_set_priv_mem_fn = priv_mem_fn; > > if (dev->enable_stats) { > diff --git a/drivers/event/cnxk/cnxk_tim_evdev.h > b/drivers/event/cnxk/cnxk_tim_evdev.h > index 3a0b036cb4..b91fcb3aca 100644 > --- a/drivers/event/cnxk/cnxk_tim_evdev.h > +++ b/drivers/event/cnxk/cnxk_tim_evdev.h > @@ -320,6 +320,9 @@ cnxk_tim_timer_cancel_burst(const struct > rte_event_timer_adapter *adptr, > struct rte_event_timer **tim, > const uint16_t nb_timers); > > +int cnxk_tim_remaining_ticks_get(const struct rte_event_timer_adapter > *adapter, > + const struct rte_event_timer *evtim, > uint64_t *ticks_remaining); > + > int cnxk_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags, > uint32_t *caps, > const struct event_timer_adapter_ops **ops, > diff --git a/drivers/event/cnxk/cnxk_tim_worker.c > b/drivers/event/cnxk/cnxk_tim_worker.c > index 923a72093b..d1dab0552f 100644 > --- a/drivers/event/cnxk/cnxk_tim_worker.c > +++ b/drivers/event/cnxk/cnxk_tim_worker.c > @@ -171,3 +171,38 @@ cnxk_tim_timer_cancel_burst(const struct > rte_event_timer_adapter *adptr, > > return index; > } > + > +int > +cnxk_tim_remaining_ticks_get(const struct rte_event_timer_adapter *adapter, > + const struct rte_event_timer *evtim, uint64_t > *ticks_remaining) > +{ > + struct cnxk_tim_ring *tim_ring = adapter->data->adapter_priv; > + struct cnxk_tim_bkt *bkt, *current_bkt; > + struct cnxk_tim_ent *entry; > + uint64_t bkt_cyc, bucket; > + uint64_t sema; > + > + if (evtim->impl_opaque[1] == 0 || evtim->impl_opaque[0] == 0) > + return -ENOENT; > + > + entry = (struct cnxk_tim_ent *)(uintptr_t)evtim->impl_opaque[0]; > + if (entry->wqe != evtim->ev.u64) > + return -ENOENT; > + > + if (evtim->state != RTE_EVENT_TIMER_ARMED) > + return -ENOENT; > + > + bkt = (struct cnxk_tim_bkt *)evtim->impl_opaque[1]; > + sema = __atomic_load_n(&bkt->w1, __ATOMIC_ACQUIRE); > + if (cnxk_tim_bkt_get_hbt(sema) || !cnxk_tim_bkt_get_nent(sema)) > + return -ENOENT; > + > + bkt_cyc = tim_ring->tick_fn(tim_ring->tbase) - > tim_ring->ring_start_cyc; > + bucket = rte_reciprocal_divide_u64(bkt_cyc, &tim_ring->fast_div); > + current_bkt = &tim_ring->bkt[bucket]; > + > + *ticks_remaining = RTE_MAX(bkt, current_bkt) - RTE_MIN(bkt, > current_bkt); > + /* Assume that the current bucket is yet to expire */ > + *ticks_remaining += 1; > + return 0; > +} > -- > 2.25.1 >