Add graph_mcore_dispatch_wq_node to hold graph scheduling workqueue
node.

Signed-off-by: Haiyue Wang <haiyue.w...@intel.com>
Signed-off-by: Cunming Liang <cunming.li...@intel.com>
Signed-off-by: Zhirun Yan <zhirun....@intel.com>
---
 lib/graph/graph.c                   |  2 ++
 lib/graph/graph_populate.c          |  1 +
 lib/graph/graph_private.h           | 12 ++++++++++++
 lib/graph/rte_graph_worker_common.h | 29 +++++++++++++++++++++++++++++
 4 files changed, 44 insertions(+)

diff --git a/lib/graph/graph.c b/lib/graph/graph.c
index 1b34f0e543..968cbbf86c 100644
--- a/lib/graph/graph.c
+++ b/lib/graph/graph.c
@@ -291,6 +291,7 @@ rte_graph_model_mcore_dispatch_core_bind(rte_graph_t id, 
int lcore)
                goto fail;
 
        graph->lcore_id = lcore;
+       graph->graph->dispatch.lcore_id = graph->lcore_id;
        graph->socket = rte_lcore_to_socket_id(lcore);
 
        /* check the availability of source node */
@@ -314,6 +315,7 @@ rte_graph_model_mcore_dispatch_core_unbind(rte_graph_t id)
                        break;
 
        graph->lcore_id = RTE_MAX_LCORE;
+       graph->graph->dispatch.lcore_id = RTE_MAX_LCORE;
 
 fail:
        return;
diff --git a/lib/graph/graph_populate.c b/lib/graph/graph_populate.c
index 2c0844ce92..ed596a7711 100644
--- a/lib/graph/graph_populate.c
+++ b/lib/graph/graph_populate.c
@@ -89,6 +89,7 @@ graph_nodes_populate(struct graph *_graph)
                }
                node->id = graph_node->node->id;
                node->parent_id = pid;
+               node->dispatch.lcore_id = graph_node->node->lcore_id;
                nb_edges = graph_node->node->nb_edges;
                node->nb_edges = nb_edges;
                off += sizeof(struct rte_node);
diff --git a/lib/graph/graph_private.h b/lib/graph/graph_private.h
index 354dc8ac0a..d84174b667 100644
--- a/lib/graph/graph_private.h
+++ b/lib/graph/graph_private.h
@@ -64,6 +64,18 @@ struct node {
        char next_nodes[][RTE_NODE_NAMESIZE]; /**< Names of next nodes. */
 };
 
+/**
+ * @internal
+ *
+ * Structure that holds the graph scheduling workqueue node stream.
+ * Used for mcore dispatch model.
+ */
+struct graph_mcore_dispatch_wq_node {
+       rte_graph_off_t node_off;
+       uint16_t nb_objs;
+       void *objs[RTE_GRAPH_BURST_SIZE];
+} __rte_cache_aligned;
+
 /**
  * @internal
  *
diff --git a/lib/graph/rte_graph_worker_common.h 
b/lib/graph/rte_graph_worker_common.h
index 3a32001e35..9e519b4d9d 100644
--- a/lib/graph/rte_graph_worker_common.h
+++ b/lib/graph/rte_graph_worker_common.h
@@ -36,12 +36,20 @@ extern "C" {
 /**< Dispatch model to support cross-core dispatching within core affinity. */
 #define RTE_GRAPH_MODEL_DEFAULT RTE_GRAPH_MODEL_RTC /**< Default graph model. 
*/
 
+/**
+ * @internal
+ *
+ * Singly-linked list head for graph schedule run-queue.
+ */
+SLIST_HEAD(rte_graph_rq_head, rte_graph);
+
 /**
  * @internal
  *
  * Data structure to hold graph data.
  */
 struct rte_graph {
+       /* Fast path area. */
        uint32_t tail;               /**< Tail of circular buffer. */
        uint32_t head;               /**< Head of circular buffer. */
        uint32_t cir_mask;           /**< Circular buffer wrap around mask. */
@@ -51,6 +59,20 @@ struct rte_graph {
        uint8_t model;               /**< graph model */
        uint8_t reserved1;           /**< Reserved for future use. */
        uint16_t reserved2;          /**< Reserved for future use. */
+       RTE_STD_C11
+       union {
+               /* Fast schedule area for mcore dispatch model */
+               struct {
+                       struct rte_graph_rq_head *rq __rte_cache_aligned; /* 
The run-queue */
+                       struct rte_graph_rq_head rq_head; /* The head for 
run-queue list */
+
+                       unsigned int lcore_id;  /**< The graph running Lcore. */
+                       struct rte_ring *wq;    /**< The work-queue for pending 
streams. */
+                       struct rte_mempool *mp; /**< The mempool for scheduling 
streams. */
+               } dispatch; /** Only used by dispatch model */
+       };
+       SLIST_ENTRY(rte_graph) next;   /* The next for rte_graph list */
+       /* End of Fast path area.*/
        rte_graph_t id; /**< Graph identifier. */
        int socket;     /**< Socket ID where memory is allocated. */
        char name[RTE_GRAPH_NAMESIZE];  /**< Name of the graph. */
@@ -83,6 +105,13 @@ struct rte_node {
        /** Original process function when pcap is enabled. */
        rte_node_process_t original_process;
 
+       RTE_STD_C11
+       union {
+               /* Fast schedule area for mcore dispatch model */
+               struct {
+                       unsigned int lcore_id;  /**< Node running lcore. */
+               } dispatch;
+       };
        /* Fast path area  */
 #define RTE_NODE_CTX_SZ 16
        uint8_t ctx[RTE_NODE_CTX_SZ] __rte_cache_aligned; /**< Node Context. */
-- 
2.37.2

Reply via email to