The members "dispatch" and "xstat_off" of the structure "rte_node" can be min cache aligned to make room for future expansion and to make sure have better performance. Add corresponding comments.
Signed-off-by: Huichao Cai <chcch...@163.com> --- doc/guides/rel_notes/release_24_11.rst | 2 ++ lib/graph/rte_graph_worker_common.h | 10 +++++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst index 5063badf39..32800e8cb0 100644 --- a/doc/guides/rel_notes/release_24_11.rst +++ b/doc/guides/rel_notes/release_24_11.rst @@ -491,6 +491,8 @@ ABI Changes added new structure ``rte_node_xstats`` to ``rte_node_register`` and added ``xstat_off`` to ``rte_node``. +* graph: The members ``dispatch`` and ``xstat_off`` of the structure ``rte_node`` have been + marked as RTE_CACHE_LINE_MIN_SIZE bytes aligned. Known Issues ------------ diff --git a/lib/graph/rte_graph_worker_common.h b/lib/graph/rte_graph_worker_common.h index a518af2b2a..d3ec88519d 100644 --- a/lib/graph/rte_graph_worker_common.h +++ b/lib/graph/rte_graph_worker_common.h @@ -104,16 +104,20 @@ struct __rte_cache_aligned rte_node { /** Original process function when pcap is enabled. */ rte_node_process_t original_process; + /** Fast schedule area for mcore dispatch model. */ union { - /* Fast schedule area for mcore dispatch model */ - struct { + alignas(RTE_CACHE_LINE_MIN_SIZE) struct { unsigned int lcore_id; /**< Node running lcore. */ uint64_t total_sched_objs; /**< Number of objects scheduled. */ uint64_t total_sched_fail; /**< Number of scheduled failure. */ } dispatch; }; + + /** Fast path area cache line 1. */ + alignas(RTE_CACHE_LINE_MIN_SIZE) rte_graph_off_t xstat_off; /**< Offset to xstat counters. */ - /* Fast path area */ + + /** Fast path area cache line 2. */ __extension__ struct __rte_cache_aligned { #define RTE_NODE_CTX_SZ 16 union { -- 2.27.0