Scheduling mode for each event queue is dependent on the same of app
stage. Configure event queue taking this also into account.

Signed-off-by: Anoob Joseph <ano...@marvell.com>
Signed-off-by: Lukasz Bartosik <lbarto...@marvell.com>
---
 lib/librte_eventdev/rte_eventmode_helper.c         | 24 ++++++++++++++++++++--
 .../rte_eventmode_helper_internal.h                |  8 ++++++++
 2 files changed, 30 insertions(+), 2 deletions(-)

diff --git a/lib/librte_eventdev/rte_eventmode_helper.c 
b/lib/librte_eventdev/rte_eventmode_helper.c
index ec0be44..30bb357 100644
--- a/lib/librte_eventdev/rte_eventmode_helper.c
+++ b/lib/librte_eventdev/rte_eventmode_helper.c
@@ -85,6 +85,8 @@ em_parse_transfer_mode(struct rte_eventmode_helper_conf *conf,
 static void
 em_initialize_helper_conf(struct rte_eventmode_helper_conf *conf)
 {
+       struct eventmode_conf *em_conf = NULL;
+
        /* Set default conf */
 
        /* Packet transfer mode: poll */
@@ -92,6 +94,13 @@ em_initialize_helper_conf(struct rte_eventmode_helper_conf 
*conf)
 
        /* Keep all ethernet ports enabled by default */
        conf->eth_portmask = -1;
+
+       /* Get eventmode conf */
+       em_conf = (struct eventmode_conf *)(conf->mode_params);
+
+       /* Schedule type: ordered */
+       /* FIXME */
+       em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
 }
 
 struct rte_eventmode_helper_conf * __rte_experimental
@@ -233,8 +242,19 @@ rte_eventmode_helper_initialize_eventdev(struct 
eventmode_conf *em_conf)
                        eventq_conf.event_queue_cfg =
                                        eventdev_config->ev_queue_mode;
 
-                       /* Set schedule type as ATOMIC */
-                       eventq_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
+                       /*
+                        * All queues need to be set with sched_type as
+                        * schedule type for the application stage. One queue
+                        * would be reserved for the final eth tx stage. This
+                        * will be an atomic queue.
+                        */
+                       if (j == nb_eventqueue-1) {
+                               eventq_conf.schedule_type =
+                                       RTE_SCHED_TYPE_ATOMIC;
+                       } else {
+                               eventq_conf.schedule_type =
+                                       em_conf->ext_params.sched_type;
+                       }
 
                        /* Set max atomic flows to 1024 */
                        eventq_conf.nb_atomic_flows = 1024;
diff --git a/lib/librte_eventdev/rte_eventmode_helper_internal.h 
b/lib/librte_eventdev/rte_eventmode_helper_internal.h
index ee41833..2a6cd90 100644
--- a/lib/librte_eventdev/rte_eventmode_helper_internal.h
+++ b/lib/librte_eventdev/rte_eventmode_helper_internal.h
@@ -61,6 +61,14 @@ struct eventmode_conf {
        struct rte_eventmode_helper_event_link_info
                        link[EVENT_MODE_MAX_LCORE_LINKS];
                /**< Per link conf */
+       union {
+               struct {
+                       uint64_t sched_type                     : 2;
+               /**< Schedule type */
+               };
+               uint64_t u64;
+       } ext_params;
+               /**< 64 bit field to specify extended params */
 };
 
 #endif /* _RTE_EVENTMODE_HELPER_INTERNAL_H_ */
-- 
2.7.4

Reply via email to