> -----Original Message----- > From: Jerin Jacob [mailto:jerin.ja...@caviumnetworks.com] > Sent: Sunday, May 28, 2017 2:59 PM > To: dev@dpdk.org > Cc: Richardson, Bruce <bruce.richard...@intel.com>; Van Haaren, Harry > <harry.van.haa...@intel.com>; hemant.agra...@nxp.com; Eads, Gage > <gage.e...@intel.com>; nipun.gu...@nxp.com; Vangati, Narender > <narender.vang...@intel.com>; Rao, Nikhil <nikhil....@intel.com>; > gprathyu...@caviumnetworks.com; Jerin Jacob > <jerin.ja...@caviumnetworks.com> > Subject: [dpdk-dev] [PATCH 15/33] app/testeventdev: order: launch lcores > > The event producer and master lcore's test end and failure detection logic > are > common for the queue and all types queue test.Move them as the common > function. > > Signed-off-by: Jerin Jacob <jerin.ja...@caviumnetworks.com> > --- > app/test-eventdev/test_order_common.c | 114 > ++++++++++++++++++++++++++++++++++ > app/test-eventdev/test_order_common.h | 2 + > 2 files changed, 116 insertions(+) > > diff --git a/app/test-eventdev/test_order_common.c b/app/test- > eventdev/test_order_common.c > index 935c5a3fd..a7160f3dc 100644 > --- a/app/test-eventdev/test_order_common.c > +++ b/app/test-eventdev/test_order_common.c > @@ -41,6 +41,57 @@ order_test_result(struct evt_test *test, struct > evt_options *opt) > return t->result; > } > > +static inline int > +order_producer(void *arg) > +{ > + struct prod_data *p = arg; > + struct test_order *t = p->t; > + struct evt_options *opt = t->opt; > + const uint8_t dev_id = p->dev_id; > + const uint8_t port = p->port_id; > + struct rte_mempool *pool = t->pool; > + const uint64_t nb_pkts = t->nb_pkts; > + uint32_t *producer_flow_seq = t->producer_flow_seq; > + const uint32_t nb_flows = t->nb_flows; > + uint64_t count = 0; > + struct rte_mbuf *m; > + struct rte_event ev; > + > + if (opt->verbose_level > 1) > + printf("%s(): lcore %d dev_id %d port=%d queue=%d\n", > + __func__, rte_lcore_id(), dev_id, port, p->queue_id); > + > + ev.event = 0; > + ev.op = RTE_EVENT_OP_NEW; > + ev.queue_id = p->queue_id; > + ev.sched_type = RTE_SCHED_TYPE_ORDERED; > + ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; > + ev.event_type = RTE_EVENT_TYPE_CPU; > + ev.sub_event_type = 0; /* stage 0 */ > + > + while (count < nb_pkts && t->err == false) { > + m = rte_pktmbuf_alloc(pool); > + if (m == NULL) > + continue; > + > + const uint32_t flow = (uintptr_t)m % nb_flows; > + /* Maintain seq number per flow */ > + m->seqn = producer_flow_seq[flow]++; > + > + ev.flow_id = flow; > + ev.mbuf = m; > + > + while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) { > + if (t->err) > + break; > + rte_pause(); > + } > + > + count++; > + } > + return 0; > +} > + > int > order_opt_check(struct evt_options *opt) { @@ -185,6 +236,69 @@ > order_opt_dump(struct evt_options *opt) } > > int > +order_launch_lcores(struct evt_test *test, struct evt_options *opt, > + int (*worker)(void *)) > +{ > + int ret, lcore_id; > + struct test_order *t = evt_test_priv(test); > + > + int wkr_idx = 0; > + /* launch workers */ > + RTE_LCORE_FOREACH_SLAVE(lcore_id) { > + if (!(opt->wlcores[lcore_id])) > + continue; > + > + ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx], > + lcore_id); > + if (ret) { > + evt_err("failed to launch worker %d", lcore_id); > + return ret; > + } > + wkr_idx++; > + } > + > + /* launch producer */ > + ret = rte_eal_remote_launch(order_producer, &t->prod, opt->plcore); > + if (ret) { > + evt_err("failed to launch order_producer %d", opt->plcore); > + return ret; > + } > + > + uint64_t cycles = rte_get_timer_cycles(); > + int64_t old_remining = -1;
s/remining/remaining/g This spelling also occurs in test_perf_common.c in patch 23.