Signed-off-by: Pavan Nikhilesh <pbhagavat...@caviumnetworks.com>
---
 app/test-eventdev/test_pipeline_common.h |  80 +++++++
 app/test-eventdev/test_pipeline_queue.c  | 367 ++++++++++++++++++++++++++++++-
 2 files changed, 446 insertions(+), 1 deletion(-)

diff --git a/app/test-eventdev/test_pipeline_common.h 
b/app/test-eventdev/test_pipeline_common.h
index 26d265a3d..009b20a7d 100644
--- a/app/test-eventdev/test_pipeline_common.h
+++ b/app/test-eventdev/test_pipeline_common.h
@@ -78,6 +78,86 @@ struct test_pipeline {
        uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
 } __rte_cache_aligned;
 
+#define BURST_SIZE 16
+
+static __rte_always_inline void
+pipeline_fwd_event(struct rte_event *ev, uint8_t sched)
+{
+       ev->event_type = RTE_EVENT_TYPE_CPU;
+       ev->op = RTE_EVENT_OP_FORWARD;
+       ev->sched_type = sched;
+}
+
+static __rte_always_inline void
+pipeline_event_enqueue(const uint8_t dev, const uint8_t port,
+               struct rte_event *ev)
+{
+       while (rte_event_enqueue_burst(dev, port, ev, 1) != 1)
+               rte_pause();
+}
+
+static __rte_always_inline void
+pipeline_event_enqueue_burst(const uint8_t dev, const uint8_t port,
+               struct rte_event *ev, const uint16_t nb_rx)
+{
+       uint16_t enq;
+
+       enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
+       while (enq < nb_rx) {
+               enq += rte_event_enqueue_burst(dev, port,
+                                               ev + enq, nb_rx - enq);
+       }
+}
+
+static __rte_always_inline void
+pipeline_tx_pkt_safe(struct rte_mbuf *mbuf)
+{
+       while (rte_eth_tx_burst(mbuf->port, 0, &mbuf, 1) != 1)
+               rte_pause();
+}
+
+static __rte_always_inline void
+pipeline_tx_pkt_unsafe(struct rte_mbuf *mbuf, struct test_pipeline *t)
+{
+       rte_spinlock_t *lk = &t->tx_lk[mbuf->port];
+
+       rte_spinlock_lock(lk);
+       pipeline_tx_pkt_safe(mbuf);
+       rte_spinlock_unlock(lk);
+}
+
+static __rte_always_inline void
+pipeline_tx_unsafe_burst(struct rte_mbuf *mbuf, struct test_pipeline *t)
+{
+       uint16_t port = mbuf->port;
+       rte_spinlock_t *lk = &t->tx_lk[port];
+
+       rte_spinlock_lock(lk);
+       rte_eth_tx_buffer(port, 0, t->tx_buf[port], mbuf);
+       rte_spinlock_unlock(lk);
+}
+
+static __rte_always_inline void
+pipeline_tx_flush(struct test_pipeline *t, const uint8_t nb_ports)
+{
+       int i;
+       rte_spinlock_t *lk;
+
+       for (i = 0; i < nb_ports; i++) {
+               lk = &t->tx_lk[i];
+
+               rte_spinlock_lock(lk);
+               rte_eth_tx_buffer_flush(i, 0, t->tx_buf[i]);
+               rte_spinlock_unlock(lk);
+       }
+}
+
+static inline int
+pipeline_nb_event_ports(struct evt_options *opt)
+{
+       return evt_nr_active_lcores(opt->wlcores);
+}
+
 int pipeline_test_result(struct evt_test *test, struct evt_options *opt);
 int pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues);
 int pipeline_test_setup(struct evt_test *test, struct evt_options *opt);
diff --git a/app/test-eventdev/test_pipeline_queue.c 
b/app/test-eventdev/test_pipeline_queue.c
index 851027cb7..f89adc4b4 100644
--- a/app/test-eventdev/test_pipeline_queue.c
+++ b/app/test-eventdev/test_pipeline_queue.c
@@ -42,10 +42,375 @@ pipeline_queue_nb_event_queues(struct evt_options *opt)
        return (eth_count * opt->nb_stages) + eth_count;
 }
 
+static int
+pipeline_queue_worker_single_stage_safe(void *arg)
+{
+       struct worker_data *w  = arg;
+       struct test_pipeline *t = w->t;
+       const uint8_t dev = w->dev_id;
+       const uint8_t port = w->port_id;
+       struct rte_event ev;
+
+       while (t->done == false) {
+               uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+               if (!event) {
+                       rte_pause();
+                       continue;
+               }
+
+               if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+                       pipeline_tx_pkt_safe(ev.mbuf);
+                       w->processed_pkts++;
+               } else {
+                       ev.queue_id++;
+                       pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+                       pipeline_event_enqueue(dev, port, &ev);
+               }
+       }
+
+       return 0;
+}
+
+static int
+pipeline_queue_worker_single_stage_unsafe(void *arg)
+{
+       struct worker_data *w  = arg;
+       struct test_pipeline *t = w->t;
+       const uint8_t dev = w->dev_id;
+       const uint8_t port = w->port_id;
+       struct rte_event ev;
+
+       while (t->done == false) {
+               uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+               if (!event) {
+                       rte_pause();
+                       continue;
+               }
+
+               if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+                       pipeline_tx_pkt_unsafe(ev.mbuf, t);
+                       w->processed_pkts++;
+               } else {
+                       ev.queue_id++;
+                       pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+                       pipeline_event_enqueue(dev, port, &ev);
+               }
+       }
+
+       return 0;
+}
+
+static int
+pipeline_queue_worker_single_stage_burst_safe(void *arg)
+{
+       int i;
+       struct worker_data *w  = arg;
+       struct test_pipeline *t = w->t;
+       const uint8_t dev = w->dev_id;
+       const uint8_t port = w->port_id;
+       struct rte_event ev[BURST_SIZE];
+
+       while (t->done == false) {
+               uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+                               BURST_SIZE, 0);
+
+               if (!nb_rx) {
+                       rte_pause();
+                       continue;
+               }
+
+               for (i = 0; i < nb_rx; i++) {
+                       rte_prefetch0(ev[i + 1].mbuf);
+                       if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
+
+                               pipeline_tx_pkt_safe(ev[i].mbuf);
+                               ev[i].op = RTE_EVENT_OP_RELEASE;
+                               w->processed_pkts++;
+                       } else {
+                               ev[i].queue_id++;
+                               pipeline_fwd_event(&ev[i],
+                                               RTE_SCHED_TYPE_ATOMIC);
+                       }
+               }
+
+               pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+       }
+
+       return 0;
+}
+
+static int
+pipeline_queue_worker_single_stage_burst_unsafe(void *arg)
+{
+       int i;
+       struct worker_data *w  = arg;
+       struct test_pipeline *t = w->t;
+       const uint8_t dev = w->dev_id;
+       const uint8_t port = w->port_id;
+       struct rte_event ev[BURST_SIZE];
+       const uint16_t nb_ports = rte_eth_dev_count();
+
+       while (t->done == false) {
+               uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+                               BURST_SIZE, 0);
+
+               if (!nb_rx) {
+                       pipeline_tx_flush(t, nb_ports);
+                       rte_pause();
+                       continue;
+               }
+
+               for (i = 0; i < nb_rx; i++) {
+                       rte_prefetch0(ev[i + 1].mbuf);
+                       if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
+
+                               pipeline_tx_unsafe_burst(ev[i].mbuf, t);
+                               ev[i].op = RTE_EVENT_OP_RELEASE;
+                               w->processed_pkts++;
+                       } else {
+
+                               ev[i].queue_id++;
+                               pipeline_fwd_event(&ev[i],
+                                               RTE_SCHED_TYPE_ATOMIC);
+                       }
+               }
+
+               pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+       }
+
+       return 0;
+}
+
+
+static int
+pipeline_queue_worker_multi_stage_safe(void *arg)
+{
+       struct worker_data *w  = arg;
+       struct test_pipeline *t = w->t;
+       const uint8_t dev = w->dev_id;
+       const uint8_t port = w->port_id;
+       const uint8_t last_queue = t->opt->nb_stages - 1;
+       const uint8_t nb_stages = t->opt->nb_stages + 1;
+       uint8_t *const sched_type_list = &t->sched_type_list[0];
+       uint8_t cq_id;
+       struct rte_event ev;
+
+
+       while (t->done == false) {
+               uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+               if (!event) {
+                       rte_pause();
+                       continue;
+               }
+
+               cq_id = ev.queue_id % nb_stages;
+
+               if (cq_id >= last_queue) {
+                       if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+
+                               pipeline_tx_pkt_safe(ev.mbuf);
+                               w->processed_pkts++;
+                               continue;
+                       }
+                       ev.queue_id += (cq_id == last_queue) ? 1 : 0;
+                       pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+               } else {
+                       ev.queue_id++;
+                       pipeline_fwd_event(&ev, sched_type_list[cq_id]);
+               }
+
+               pipeline_event_enqueue(dev, port, &ev);
+       }
+       return 0;
+}
+
+static int
+pipeline_queue_worker_multi_stage_unsafe(void *arg)
+{
+       struct worker_data *w  = arg;
+       struct test_pipeline *t = w->t;
+       const uint8_t dev = w->dev_id;
+       const uint8_t port = w->port_id;
+       const uint8_t last_queue = t->opt->nb_stages - 1;
+       const uint8_t nb_stages = t->opt->nb_stages + 1;
+       uint8_t *const sched_type_list = &t->sched_type_list[0];
+       uint8_t cq_id;
+       struct rte_event ev;
+
+
+       while (t->done == false) {
+               uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+               if (!event) {
+                       rte_pause();
+                       continue;
+               }
+
+               cq_id = ev.queue_id % nb_stages;
+
+               if (cq_id >= last_queue) {
+                       if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+
+                               pipeline_tx_pkt_unsafe(ev.mbuf, t);
+                               w->processed_pkts++;
+                               continue;
+                       }
+                       ev.queue_id += (cq_id == last_queue) ? 1 : 0;
+                       pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+               } else {
+                       ev.queue_id++;
+                       pipeline_fwd_event(&ev, sched_type_list[cq_id]);
+               }
+
+               pipeline_event_enqueue(dev, port, &ev);
+       }
+       return 0;
+}
+
+static int
+pipeline_queue_worker_multi_stage_burst_safe(void *arg)
+{
+       int i;
+       struct worker_data *w  = arg;
+       struct test_pipeline *t = w->t;
+       const uint8_t dev = w->dev_id;
+       const uint8_t port = w->port_id;
+       uint8_t *const sched_type_list = &t->sched_type_list[0];
+       const uint8_t last_queue = t->opt->nb_stages - 1;
+       const uint8_t nb_stages = t->opt->nb_stages + 1;
+       uint8_t cq_id;
+       struct rte_event ev[BURST_SIZE + 1];
+
+       while (t->done == false) {
+               uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+                               BURST_SIZE, 0);
+
+               if (!nb_rx) {
+                       rte_pause();
+                       continue;
+               }
+
+               for (i = 0; i < nb_rx; i++) {
+                       rte_prefetch0(ev[i + 1].mbuf);
+                       cq_id = ev[i].queue_id % nb_stages;
+
+                       if (cq_id >= last_queue) {
+                               if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
+
+                                       pipeline_tx_pkt_safe(ev[i].mbuf);
+                                       ev[i].op = RTE_EVENT_OP_RELEASE;
+                                       w->processed_pkts++;
+                                       continue;
+                               }
+
+                               ev[i].queue_id += (cq_id == last_queue) ? 1 : 0;
+                               pipeline_fwd_event(&ev[i],
+                                               RTE_SCHED_TYPE_ATOMIC);
+                       } else {
+                               ev[i].queue_id++;
+                               pipeline_fwd_event(&ev[i],
+                                               sched_type_list[cq_id]);
+                       }
+
+               }
+
+               pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+       }
+       return 0;
+}
+
+static int
+pipeline_queue_worker_multi_stage_burst_unsafe(void *arg)
+{
+       int i;
+       struct worker_data *w  = arg;
+       struct test_pipeline *t = w->t;
+       const uint8_t dev = w->dev_id;
+       const uint8_t port = w->port_id;
+       uint8_t *const sched_type_list = &t->sched_type_list[0];
+       const uint8_t last_queue = t->opt->nb_stages - 1;
+       const uint8_t nb_stages = t->opt->nb_stages + 1;
+       uint8_t cq_id;
+       struct rte_event ev[BURST_SIZE + 1];
+       const uint16_t nb_ports = rte_eth_dev_count();
+
+       while (t->done == false) {
+               uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+                               BURST_SIZE, 0);
+
+               if (!nb_rx) {
+                       pipeline_tx_flush(t, nb_ports);
+                       rte_pause();
+                       continue;
+               }
+
+               for (i = 0; i < nb_rx; i++) {
+                       rte_prefetch0(ev[i + 1].mbuf);
+                       cq_id = ev[i].queue_id % nb_stages;
+
+                       if (cq_id >= last_queue) {
+                               if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
+
+                                       pipeline_tx_unsafe_burst(ev[i].mbuf, t);
+                                       ev[i].op = RTE_EVENT_OP_RELEASE;
+                                       w->processed_pkts++;
+                                       continue;
+                               }
+
+                               ev[i].queue_id += (cq_id == last_queue) ? 1 : 0;
+                               pipeline_fwd_event(&ev[i],
+                                               RTE_SCHED_TYPE_ATOMIC);
+                       } else {
+                               ev[i].queue_id++;
+                               pipeline_fwd_event(&ev[i],
+                                               sched_type_list[cq_id]);
+                       }
+               }
+
+               pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+       }
+       return 0;
+}
+
 static int
 worker_wrapper(void *arg)
 {
-       RTE_SET_USED(arg);
+       struct worker_data *w  = arg;
+       struct evt_options *opt = w->t->opt;
+       const bool burst = evt_has_burst_mode(w->dev_id);
+       const bool mt_safe = !w->t->mt_unsafe;
+       const uint8_t nb_stages = opt->nb_stages;
+       RTE_SET_USED(opt);
+
+       /* allow compiler to optimize */
+       if (nb_stages == 1) {
+               if (!burst && mt_safe)
+                       return pipeline_queue_worker_single_stage_safe(arg);
+               else if (!burst && !mt_safe)
+                       return pipeline_queue_worker_single_stage_unsafe(
+                                       arg);
+               else if (burst && mt_safe)
+                       return pipeline_queue_worker_single_stage_burst_safe(
+                                       arg);
+               else if (burst && !mt_safe)
+                       return pipeline_queue_worker_single_stage_burst_unsafe(
+                                       arg);
+       } else {
+               if (!burst && mt_safe)
+                       return pipeline_queue_worker_multi_stage_safe(arg);
+               else if (!burst && !mt_safe)
+                       return pipeline_queue_worker_multi_stage_unsafe(arg);
+               if (burst && mt_safe)
+                       return pipeline_queue_worker_multi_stage_burst_safe(
+                                       arg);
+               else if (burst && !mt_safe)
+                       return pipeline_queue_worker_multi_stage_burst_unsafe(
+                                       arg);
+
+       }
        rte_panic("invalid worker\n");
 }
 
-- 
2.14.1

Reply via email to