Added configurable option to make queue type as all type queues i.e.
RTE_EVENT_QUEUE_CFG_ALL_TYPES based on event dev capability
RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES.

This can be enabled by supplying '-a' as a cmdline argument.

Signed-off-by: Pavan Nikhilesh <pbhagavat...@caviumnetworks.com>
---
 examples/eventdev_pipeline_sw_pmd/main.c           |   7 +-
 .../eventdev_pipeline_sw_pmd/pipeline_common.h     |   1 +
 .../pipeline_worker_generic.c                      |   5 +
 .../eventdev_pipeline_sw_pmd/pipeline_worker_tx.c  | 134 +++++++++++++++++++--
 4 files changed, 139 insertions(+), 8 deletions(-)

diff --git a/examples/eventdev_pipeline_sw_pmd/main.c 
b/examples/eventdev_pipeline_sw_pmd/main.c
index 3be981c15..289f7204d 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -149,6 +149,7 @@ static struct option long_options[] = {
        {"parallel", no_argument, 0, 'p'},
        {"ordered", no_argument, 0, 'o'},
        {"quiet", no_argument, 0, 'q'},
+       {"use-atq", no_argument, 0, 'a'},
        {"dump", no_argument, 0, 'D'},
        {0, 0, 0, 0}
 };
@@ -172,6 +173,7 @@ usage(void)
                "  -o, --ordered                Use ordered scheduling\n"
                "  -p, --parallel               Use parallel scheduling\n"
                "  -q, --quiet                  Minimize printed output\n"
+               "  -a, --use-atq                Use all type queues\n"
                "  -D, --dump                   Print detailed statistics 
before exit"
                "\n";
        fprintf(stderr, "%s", usage_str);
@@ -192,7 +194,7 @@ parse_app_args(int argc, char **argv)
        int i;
 
        for (;;) {
-               c = getopt_long(argc, argv, "r:t:e:c:w:n:f:s:poPqDW:",
+               c = getopt_long(argc, argv, "r:t:e:c:w:n:f:s:paoPqDW:",
                                long_options, &option_index);
                if (c == -1)
                        break;
@@ -225,6 +227,9 @@ parse_app_args(int argc, char **argv)
                case 'p':
                        cdata.queue_type = RTE_SCHED_TYPE_PARALLEL;
                        break;
+               case 'a':
+                       cdata.all_type_queues = 1;
+                       break;
                case 'q':
                        cdata.quiet = 1;
                        break;
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h 
b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
index 0b27d1eb0..62755f6d0 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
@@ -106,6 +106,7 @@ struct config_data {
        int quiet;
        int dump_dev;
        int dump_dev_signal;
+       int all_type_queues;
        unsigned int num_stages;
        unsigned int worker_cq_depth;
        unsigned int rx_stride;
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c 
b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
index 5998aae95..908d64c87 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_generic.c
@@ -525,6 +525,11 @@ generic_opt_check(void)
        memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
        rte_event_dev_info_get(0, &eventdev_info);
 
+       if (cdata.all_type_queues && !(eventdev_info.event_dev_cap &
+                               RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES))
+               rte_exit(EXIT_FAILURE,
+                               "Event dev doesn't support all type queues\n");
+
        for (i = 0; i < rte_eth_dev_count(); i++) {
                ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
                if (ret)
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c 
b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
index a824f1f49..e25a06027 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
@@ -119,6 +119,51 @@ worker_do_tx(void *arg)
        return 0;
 }
 
+static int
+worker_do_tx_atq(void *arg)
+{
+       struct rte_event ev;
+
+       struct worker_data *data = (struct worker_data *)arg;
+       const uint8_t dev = data->dev_id;
+       const uint8_t port = data->port_id;
+       const uint8_t lst_qid = cdata.num_stages - 1;
+       size_t fwd = 0, received = 0, tx = 0;
+
+       while (!fdata->done) {
+
+               if (!rte_event_dequeue_burst(dev, port, &ev, 1, 0)) {
+                       rte_pause();
+                       continue;
+               }
+
+               received++;
+               const uint8_t cq_id = ev.queue_id % cdata.num_stages;
+
+               if (cq_id == lst_qid) {
+                       if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+                               worker_tx_pkt(ev.mbuf);
+                               tx++;
+                               continue;
+                       }
+                       worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+               } else {
+                       ev.queue_id = cdata.next_qid[ev.queue_id];
+                       worker_fwd_event(&ev, cdata.queue_type);
+               }
+               work(ev.mbuf);
+
+               worker_event_enqueue(dev, port, &ev);
+               fwd++;
+       }
+
+       if (!cdata.quiet)
+               printf("  worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
+                               rte_lcore_id(), received, fwd, tx);
+
+       return 0;
+}
+
 static int
 worker_do_tx_burst(void *arg)
 {
@@ -178,6 +223,61 @@ worker_do_tx_burst(void *arg)
        return 0;
 }
 
+static int
+worker_do_tx_burst_atq(void *arg)
+{
+       struct rte_event ev[BATCH_SIZE];
+
+       struct worker_data *data = (struct worker_data *)arg;
+       uint8_t dev = data->dev_id;
+       uint8_t port = data->port_id;
+       uint8_t lst_qid = cdata.num_stages - 1;
+       size_t fwd = 0, received = 0, tx = 0;
+
+       while (!fdata->done) {
+               uint16_t i;
+
+               const uint16_t nb_rx = rte_event_dequeue_burst(dev, port,
+                               ev, BATCH_SIZE, 0);
+
+               if (nb_rx == 0) {
+                       rte_pause();
+                       continue;
+               }
+               received += nb_rx;
+
+               for (i = 0; i < nb_rx; i++) {
+                       const uint8_t cq_id = ev[i].queue_id % cdata.num_stages;
+
+                       if (cq_id == lst_qid) {
+                               if (ev[i].sched_type ==
+                                               RTE_SCHED_TYPE_ATOMIC) {
+                                       worker_tx_pkt(ev[i].mbuf);
+                                       tx++;
+                                       ev[i].op = RTE_EVENT_OP_RELEASE;
+                                       continue;
+                               }
+                               worker_fwd_event(&ev[i],
+                                               RTE_SCHED_TYPE_ATOMIC);
+                       } else {
+                               ev[i].queue_id = cdata.next_qid[
+                                       ev[i].queue_id];
+                               worker_fwd_event(&ev[i],
+                                               cdata.queue_type);
+                       }
+               }
+
+               worker_event_enqueue_burst(dev, port, ev, nb_rx);
+               fwd += nb_rx;
+       }
+
+       if (!cdata.quiet)
+               printf("  worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
+                               rte_lcore_id(), received, fwd, tx);
+
+       return 0;
+}
+
 static int
 setup_eventdev_w(struct prod_data *prod_data,
                struct cons_data *cons_data,
@@ -186,10 +286,12 @@ setup_eventdev_w(struct prod_data *prod_data,
        RTE_SET_USED(prod_data);
        RTE_SET_USED(cons_data);
        uint8_t i;
+       const uint8_t atq = cdata.all_type_queues ? 1 : 0;
        const uint8_t dev_id = 0;
        const uint8_t nb_ports = cdata.num_workers;
        uint8_t nb_slots = 0;
        uint8_t nb_queues = rte_eth_dev_count() * cdata.num_stages;
+       nb_queues +=  atq ? 0 : rte_eth_dev_count();
 
        struct rte_event_dev_config config = {
                        .nb_event_queues = nb_queues,
@@ -241,12 +343,19 @@ setup_eventdev_w(struct prod_data *prod_data,
        printf("  Stages:\n");
        for (i = 0; i < nb_queues; i++) {
 
-               uint8_t slot;
+               if (atq) {
+
+                       nb_slots = cdata.num_stages;
+                       wkr_q_conf.event_queue_cfg =
+                               RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+               } else {
+                       uint8_t slot;
 
-               nb_slots = cdata.num_stages + 1;
-               slot = i % nb_slots;
-               wkr_q_conf.schedule_type = slot == cdata.num_stages ?
-                       RTE_SCHED_TYPE_ATOMIC : cdata.queue_type;
+                       nb_slots = cdata.num_stages + 1;
+                       slot = i % nb_slots;
+                       wkr_q_conf.schedule_type = slot == cdata.num_stages ?
+                               RTE_SCHED_TYPE_ATOMIC : cdata.queue_type;
+               }
 
                if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
                        printf("%d: error creating qid %d\n", __LINE__, i);
@@ -464,6 +573,11 @@ opt_check(void)
        memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
        rte_event_dev_info_get(0, &eventdev_info);
 
+       if (cdata.all_type_queues && !(eventdev_info.event_dev_cap &
+                               RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES))
+               rte_exit(EXIT_FAILURE,
+                               "Event dev doesn't support all type queues\n");
+
        for (i = 0; i < rte_eth_dev_count(); i++) {
                ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
                if (ret)
@@ -494,9 +608,15 @@ opt_check(void)
 void
 set_worker_tx_setup_data(struct setup_data *caps, bool burst)
 {
-       if (burst)
+       uint8_t atq = cdata.all_type_queues ? 1 : 0;
+
+       if (burst && atq)
+               caps->worker_loop = worker_do_tx_burst_atq;
+       if (burst && !atq)
                caps->worker_loop = worker_do_tx_burst;
-       if (!burst)
+       if (!burst && atq)
+               caps->worker_loop = worker_do_tx_atq;
+       if (!burst && !atq)
                caps->worker_loop = worker_do_tx;
 
        caps->opt_check = opt_check;
-- 
2.14.1

Reply via email to