> This patch introduces the task scheduler mechanism to enable dispatching > tasks to another worker cores. Currently, there is only a local work > queue for one graph to walk. We introduce a scheduler worker queue in > each worker core for dispatching tasks. It will perform the walk on > scheduler work queue first, then handle the local work queue. > > Signed-off-by: Haiyue Wang <haiyue.w...@intel.com> > Signed-off-by: Cunming Liang <cunming.li...@intel.com> > Signed-off-by: Zhirun Yan <zhirun....@intel.com> > Acked-by: Jerin Jacob <jer...@marvell.com>
Acked-by: Pavan Nikhilesh <pbhagavat...@marvell.com> > --- > lib/graph/rte_graph_model_mcore_dispatch.h | 44 > ++++++++++++++++++++++ > 1 file changed, 44 insertions(+) > > diff --git a/lib/graph/rte_graph_model_mcore_dispatch.h > b/lib/graph/rte_graph_model_mcore_dispatch.h > index 6163f96c37..c78a3bbdf9 100644 > --- a/lib/graph/rte_graph_model_mcore_dispatch.h > +++ b/lib/graph/rte_graph_model_mcore_dispatch.h > @@ -83,6 +83,50 @@ __rte_experimental > int rte_graph_model_mcore_dispatch_node_lcore_affinity_set(const char > *name, > unsigned int > lcore_id); > > +/** > + * Perform graph walk on the circular buffer and invoke the process > function > + * of the nodes and collect the stats. > + * > + * @param graph > + * Graph pointer returned from rte_graph_lookup function. > + * > + * @see rte_graph_lookup() > + */ > +__rte_experimental > +static inline void > +rte_graph_walk_mcore_dispatch(struct rte_graph *graph) > +{ > + const rte_graph_off_t *cir_start = graph->cir_start; > + const rte_node_t mask = graph->cir_mask; > + uint32_t head = graph->head; > + struct rte_node *node; > + > + RTE_ASSERT(graph->parent_id != RTE_GRAPH_ID_INVALID); > + if (graph->dispatch.wq != NULL) > + __rte_graph_mcore_dispatch_sched_wq_process(graph); > + > + while (likely(head != graph->tail)) { > + node = (struct rte_node *)RTE_PTR_ADD(graph, > cir_start[(int32_t)head++]); > + > + /* skip the src nodes which not bind with current worker */ > + if ((int32_t)head < 0 && node->dispatch.lcore_id != graph- > >dispatch.lcore_id) > + continue; > + > + /* Schedule the node until all task/objs are done */ > + if (node->dispatch.lcore_id != RTE_MAX_LCORE && > + graph->dispatch.lcore_id != node->dispatch.lcore_id && > + graph->dispatch.rq != NULL && > + > __rte_graph_mcore_dispatch_sched_node_enqueue(node, graph- > >dispatch.rq)) > + continue; > + > + __rte_node_process(graph, node); > + > + head = likely((int32_t)head > 0) ? head & mask : head; > + } > + > + graph->tail = 0; > +} > + > #ifdef __cplusplus > } > #endif > -- > 2.37.2