Add graph_sched_wq_node to hold graph scheduling workqueue
node.

Signed-off-by: Haiyue Wang <haiyue.w...@intel.com>
Signed-off-by: Cunming Liang <cunming.li...@intel.com>
Signed-off-by: Zhirun Yan <zhirun....@intel.com>
---
 lib/graph/graph.c                   |  1 +
 lib/graph/graph_populate.c          |  1 +
 lib/graph/graph_private.h           | 12 ++++++++++++
 lib/graph/rte_graph_worker_common.h | 21 +++++++++++++++++++++
 4 files changed, 35 insertions(+)

diff --git a/lib/graph/graph.c b/lib/graph/graph.c
index 90eaad0378..dd3d69dbf7 100644
--- a/lib/graph/graph.c
+++ b/lib/graph/graph.c
@@ -284,6 +284,7 @@ rte_graph_model_dispatch_core_bind(rte_graph_t id, int 
lcore)
                        break;
 
        graph->lcore_id = lcore;
+       graph->graph->lcore_id = graph->lcore_id;
        graph->socket = rte_lcore_to_socket_id(lcore);
 
        /* check the availability of source node */
diff --git a/lib/graph/graph_populate.c b/lib/graph/graph_populate.c
index 2c0844ce92..7dcf1420c1 100644
--- a/lib/graph/graph_populate.c
+++ b/lib/graph/graph_populate.c
@@ -89,6 +89,7 @@ graph_nodes_populate(struct graph *_graph)
                }
                node->id = graph_node->node->id;
                node->parent_id = pid;
+               node->lcore_id = graph_node->node->lcore_id;
                nb_edges = graph_node->node->nb_edges;
                node->nb_edges = nb_edges;
                off += sizeof(struct rte_node);
diff --git a/lib/graph/graph_private.h b/lib/graph/graph_private.h
index d28a5af93e..b66b18ebbc 100644
--- a/lib/graph/graph_private.h
+++ b/lib/graph/graph_private.h
@@ -60,6 +60,18 @@ struct node {
        char next_nodes[][RTE_NODE_NAMESIZE]; /**< Names of next nodes. */
 };
 
+/**
+ * @internal
+ *
+ * Structure that holds the graph scheduling workqueue node stream.
+ * Used for mcore dispatch model.
+ */
+struct graph_sched_wq_node {
+       rte_graph_off_t node_off;
+       uint16_t nb_objs;
+       void *objs[RTE_GRAPH_BURST_SIZE];
+} __rte_cache_aligned;
+
 /**
  * @internal
  *
diff --git a/lib/graph/rte_graph_worker_common.h 
b/lib/graph/rte_graph_worker_common.h
index 64d777bd5f..70cfde7015 100644
--- a/lib/graph/rte_graph_worker_common.h
+++ b/lib/graph/rte_graph_worker_common.h
@@ -29,6 +29,13 @@
 extern "C" {
 #endif
 
+/**
+ * @internal
+ *
+ * Singly-linked list head for graph schedule run-queue.
+ */
+SLIST_HEAD(rte_graph_rq_head, rte_graph);
+
 /**
  * @internal
  *
@@ -40,6 +47,15 @@ struct rte_graph {
        uint32_t cir_mask;           /**< Circular buffer wrap around mask. */
        rte_node_t nb_nodes;         /**< Number of nodes in the graph. */
        rte_graph_off_t *cir_start;  /**< Pointer to circular buffer. */
+       /* Graph schedule */
+       struct rte_graph_rq_head *rq __rte_cache_aligned; /* The run-queue */
+       struct rte_graph_rq_head rq_head; /* The head for run-queue list */
+
+       SLIST_ENTRY(rte_graph) rq_next;   /* The next for run-queue list */
+       unsigned int lcore_id;  /**< The graph running Lcore. */
+       struct rte_ring *wq;    /**< The work-queue for pending streams. */
+       struct rte_mempool *mp; /**< The mempool for scheduling streams. */
+       /* Graph schedule area */
        rte_graph_off_t nodes_start; /**< Offset at which node memory starts. */
        rte_graph_t id; /**< Graph identifier. */
        int socket;     /**< Socket ID where memory is allocated. */
@@ -73,6 +89,11 @@ struct rte_node {
        /** Original process function when pcap is enabled. */
        rte_node_process_t original_process;
 
+       RTE_STD_C11
+               union {
+               /* Fast schedule area for mcore dispatch model */
+               unsigned int lcore_id;  /**< Node running lcore. */
+               };
        /* Fast path area  */
 #define RTE_NODE_CTX_SZ 16
        uint8_t ctx[RTE_NODE_CTX_SZ] __rte_cache_aligned; /**< Node Context. */
-- 
2.37.2

Reply via email to