there is a possibility of deadlock in this api, as same spinlock is tried to be acquired in nested manner.
This patch removes the acquisition of nested locking. Signed-off-by: Naga Harish K S V <s.v.naga.haris...@intel.com> --- lib/timer/rte_timer.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/lib/timer/rte_timer.c b/lib/timer/rte_timer.c index 9994813d0d..cfbc8cb028 100644 --- a/lib/timer/rte_timer.c +++ b/lib/timer/rte_timer.c @@ -987,21 +987,16 @@ rte_timer_stop_all(uint32_t timer_data_id, unsigned int *walk_lcores, walk_lcore = walk_lcores[i]; priv_timer = &timer_data->priv_timer[walk_lcore]; - rte_spinlock_lock(&priv_timer->list_lock); - for (tim = priv_timer->pending_head.sl_next[0]; tim != NULL; tim = next_tim) { next_tim = tim->sl_next[0]; - /* Call timer_stop with lock held */ - __rte_timer_stop(tim, 1, timer_data); + __rte_timer_stop(tim, 0, timer_data); if (f) f(tim, f_arg); } - - rte_spinlock_unlock(&priv_timer->list_lock); } return 0; -- 2.25.1