Add helper functions to launch and wait for n cores to complete the
operation with deadlock detection.

Signed-off-by: Jerin Jacob <jerin.ja...@caviumnetworks.com>
---
 app/test/test_eventdev_octeontx.c | 88 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 88 insertions(+)

diff --git a/app/test/test_eventdev_octeontx.c 
b/app/test/test_eventdev_octeontx.c
index 4a63d15..1b2a5e1 100644
--- a/app/test/test_eventdev_octeontx.c
+++ b/app/test/test_eventdev_octeontx.c
@@ -63,6 +63,12 @@ struct event_attr {
        uint8_t port;
 };
 
+struct test_core_param {
+       rte_atomic32_t *total_events;
+       uint64_t dequeue_tmo_ticks;
+       uint8_t port;
+       uint8_t sched_type;
+};
 
 static uint32_t seqn_list_index;
 static int seqn_list[NUM_PACKETS];
@@ -525,6 +531,88 @@ test_multi_queue_priority(void)
        return consume_events(0, max_evts_roundoff, validate_queue_priority);
 }
 
+static inline int
+wait_workers_to_join(int lcore, const rte_atomic32_t *count)
+{
+       uint64_t cycles, print_cycles;
+
+       print_cycles = cycles = rte_get_timer_cycles();
+       while (rte_eal_get_lcore_state(lcore) != FINISHED) {
+               uint64_t new_cycles = rte_get_timer_cycles();
+
+               if (new_cycles - print_cycles > rte_get_timer_hz()) {
+                       printf("\r%s: events %d\n", __func__,
+                               rte_atomic32_read(count));
+                       print_cycles = new_cycles;
+               }
+               if (new_cycles - cycles > rte_get_timer_hz() * 10) {
+                       printf("%s: No schedules for seconds, deadlock (%d)\n",
+                               __func__,
+                               rte_atomic32_read(count));
+                       rte_event_dev_dump(evdev, stdout);
+                       cycles = new_cycles;
+                       return TEST_FAILED;
+               }
+       }
+       rte_eal_mp_wait_lcore();
+       return TEST_SUCCESS;
+}
+
+
+static inline int
+launch_workers_and_wait(int (*master_worker)(void *),
+                       int (*slave_workers)(void *), uint32_t total_events,
+                       uint8_t nb_workers, uint8_t sched_type)
+{
+       uint8_t port = 0;
+       int w_lcore;
+       int ret;
+       struct test_core_param *param;
+       rte_atomic32_t atomic_total_events;
+       uint64_t dequeue_tmo_ticks;
+
+       if (!nb_workers)
+               return 0;
+
+       rte_atomic32_set(&atomic_total_events, total_events);
+       seqn_list_init();
+
+       param = malloc(sizeof(struct test_core_param) * nb_workers);
+       if (!param)
+               return TEST_FAILED;
+
+       ret = rte_event_dequeue_timeout_ticks(evdev,
+               rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
+       if (ret)
+               return TEST_FAILED;
+
+       param[0].total_events = &atomic_total_events;
+       param[0].sched_type = sched_type;
+       param[0].port = 0;
+       param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
+       rte_smp_wmb();
+
+       w_lcore = rte_get_next_lcore(
+                       /* start core */ -1,
+                       /* skip master */ 1,
+                       /* wrap */ 0);
+       rte_eal_remote_launch(master_worker, &param[0], w_lcore);
+
+       for (port = 1; port < nb_workers; port++) {
+               param[port].total_events = &atomic_total_events;
+               param[port].sched_type = sched_type;
+               param[port].port = port;
+               param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
+               rte_smp_wmb();
+               w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
+               rte_eal_remote_launch(slave_workers, &param[port], w_lcore);
+       }
+
+       ret = wait_workers_to_join(w_lcore, &atomic_total_events);
+       free(param);
+       return ret;
+}
+
 
 static struct unit_test_suite eventdev_octeontx_testsuite  = {
        .suite_name = "eventdev octeontx unit test suite",
-- 
2.5.5

Reply via email to