Signed-off-by: Pavan Nikhilesh <pbhagavat...@caviumnetworks.com>
---
 config/common_base                    |   1 +
 drivers/event/octeontx/timvf_worker.h | 105 ++++++++++++++++++++++++++++++++++
 2 files changed, 106 insertions(+)

diff --git a/config/common_base b/config/common_base
index 00010de92..2f8c21871 100644
--- a/config/common_base
+++ b/config/common_base
@@ -563,6 +563,7 @@ CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV=y
 #
 CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF=y
 CONFIG_RTE_PMD_OCTEONTX_TIMVF_USE_FPAVF=n
+CONFIG_RTE_PMD_OCTEONTX_EVENT_TIMER_SW_TRAVERSAL=n
 
 #
 # Compile PMD for OPDL event device
diff --git a/drivers/event/octeontx/timvf_worker.h 
b/drivers/event/octeontx/timvf_worker.h
index c3f37372a..1d5e27631 100644
--- a/drivers/event/octeontx/timvf_worker.h
+++ b/drivers/event/octeontx/timvf_worker.h
@@ -7,6 +7,23 @@
 
 #include "timvf_evdev.h"
 
+#ifdef RTE_PMD_OCTEONTX_EVENT_TIMER_SW_TRAVERSAL
+#if defined(RTE_ARCH_ARM64)
+#define timvf_store_pair(val0, val1, addr) ({          \
+                       asm volatile(                   \
+                       "stp %x[x0], %x[x1], [%x[p1]]"  \
+                       ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
+                       ); })
+#else
+#define timvf_store_pair(val0, val1, addr)             \
+do {                                                   \
+       rte_write64(val0, addr);                        \
+       rte_write64(val1, (((uint8_t *)addr) + 8));     \
+} while (0)
+#endif
+#endif
+
+
 static inline int16_t
 timr_bkt_fetch_rem(uint64_t w1)
 {
@@ -162,6 +179,49 @@ timr_clr_bkt(struct timvf_ring *timr, struct 
tim_mem_bucket *bkt)
 }
 #endif
 
+#ifdef RTE_PMD_OCTEONTX_EVENT_TIMER_SW_TRAVERSAL
+static __rte_always_inline void __hot
+timvf_sw_trav(struct timvf_ring *timr, struct tim_mem_bucket *bkt)
+{
+       uint16_t i;
+       struct tim_mem_entry *chunk;
+       struct tim_mem_entry *wrk;
+       void *grp_addr;
+       chunk = (struct tim_mem_entry *)bkt->first_chunk;
+       timvf_log_info("Software traversing bucket.");
+       /* Only one sw thread can be here. */
+       while (chunk) {
+               for (i = 0; i < nb_chunk_slots && bkt->nb_entry; i++) {
+                       wrk = chunk + i;
+                       if (!wrk->wqe)
+                               continue;
+                       grp_addr = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP,
+                                       (uint8_t)((wrk->w0 >> 34) & 0xFF), 2);
+                       timvf_store_pair(wrk->w0, wrk->wqe, grp_addr);
+                       wrk->wqe = 0;
+                       bkt->nb_entry--;
+               }
+               wrk = (struct tim_mem_entry *)((chunk + nb_chunk_slots)->w0);
+               rte_mempool_put(timr->meta.chunk_pool, chunk);
+               chunk = (struct tim_mem_entry *)wrk;
+       }
+}
+#endif
+
+static inline __hot void
+timr_clr_bkt_full(struct timvf_ring *timr, struct tim_mem_bucket *bkt)
+{
+       struct tim_mem_entry *chunk;
+       struct tim_mem_entry *pnext;
+       chunk = (struct tim_mem_entry *)bkt->first_chunk;
+
+       while (chunk) {
+               pnext = (struct tim_mem_entry *)((chunk + nb_chunk_slots)->w0);
+               rte_mempool_put(timr->meta.chunk_pool, chunk);
+               chunk = pnext;
+       }
+}
+
 /* Burst mode functions */
 static inline int __hot
 timvf_add_entry_brst(struct timvf_ring *timr, const uint16_t rel_bkt,
@@ -205,6 +265,20 @@ timvf_add_entry_brst(struct timvf_ring *timr, const 
uint16_t rel_bkt,
                goto __retry;
        }
 
+       /* Check for bsk & do software traversal. */
+#ifdef RTE_PMD_OCTEONTX_EVENT_TIMER_SW_TRAVERSAL
+       if (unlikely(timr_bkt_get_bsk(lock_sema))) {
+               lock_sema = timr_bkt_set_sbt(bkt);
+               if (unlikely(timr_bkt_get_sbt(lock_sema))) {
+                       /* Should never hit. */
+                       goto __retry;
+               }
+               timvf_sw_trav(timr, bkt);
+               timr_bkt_clr_bsk(bkt);
+               goto __retry;
+       }
+#endif
+
        chunk_remainder = timr_bkt_fetch_rem(lock_sema);
        rem = chunk_remainder - nb_timers;
        if (rem < 0) {
@@ -342,6 +416,18 @@ timvf_add_entry_sp(struct timvf_ring *timr, const uint32_t 
rel_bkt,
        if (unlikely(timr_bkt_get_shbt(lock_sema)))
                goto __retry;
 
+#ifdef RTE_PMD_OCTEONTX_EVENT_TIMER_SW_TRAVERSAL
+       if (unlikely(timr_bkt_get_bsk(lock_sema))) {
+               lock_sema = timr_bkt_set_sbt(bkt);
+               if (unlikely(timr_bkt_get_sbt(lock_sema))) {
+                       /* Should never hit. */
+                       goto __retry;
+               }
+               timvf_sw_trav(timr, bkt);
+               timr_bkt_clr_bsk(bkt);
+               goto __retry;
+       }
+#endif
        /* Insert the work. */
        rem = timr_bkt_fetch_rem(lock_sema);
 
@@ -430,6 +516,25 @@ timvf_add_entry_mp(struct timvf_ring *timr, const uint32_t 
rel_bkt,
                }
 
                RTE_SET_USED(lock_cnt);
+#ifdef RTE_PMD_OCTEONTX_EVENT_TIMER_SW_TRAVERSAL
+               lock_cnt = (uint8_t)
+                       ((lock_sema >> TIM_BUCKET_W1_S_LOCK) &
+                        TIM_BUCKET_W1_M_LOCK);
+
+               if (unlikely(!lock_cnt && timr_bkt_get_bsk(lock_sema))) {
+                       /* Only first thread processes the bucket.*/
+                       lock_sema = timr_bkt_set_sbt(bkt);
+                       if (unlikely(timr_bkt_get_sbt(lock_sema))) {
+                               timr_bkt_dec_lock(bkt);
+                               goto __retry;
+                       }
+                       timvf_sw_trav(timr, bkt);
+                       timr_bkt_clr_bsk(bkt);
+                       timr_bkt_dec_lock(bkt);
+                       continue;
+               }
+#endif
+
                /* Insert the work. */
                rem = timr_bkt_fetch_rem(lock_sema);
 
-- 
2.16.1

Reply via email to