The l2_len and l3_len fields are used for TX offloads and so should be
put on the second cache line, along with the other fields only used on
TX.

Updates in V2:
* The l2 and l3 lengths can be accessed as a single uint16_t for
  performance, as well as individually.

Signed-off-by: Bruce Richardson <bruce.richardson at intel.com>
---
 lib/librte_mbuf/rte_mbuf.h | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 508021b..1c6e115 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -159,13 +159,7 @@ struct rte_mbuf {
        uint16_t reserved2;       /**< Unused field. Required for padding */
        uint16_t data_len;        /**< Amount of data in segment buffer. */
        uint32_t pkt_len;         /**< Total pkt len: sum of all segments. */
-       union {
-               uint16_t l2_l3_len; /**< combined l2/l3 lengths as single var */
-               struct {
-                       uint16_t l3_len:9;      /**< L3 (IP) Header Length. */
-                       uint16_t l2_len:7;      /**< L2 (MAC) Header Length. */
-               };
-       };
+       uint16_t reserved;
        uint16_t vlan_tci;        /**< VLAN Tag Control Identifier (CPU order) 
*/
        union {
                uint32_t rss;     /**< RSS hash result if RSS enabled */
@@ -181,6 +175,14 @@ struct rte_mbuf {
        struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */
        struct rte_mbuf *next;    /**< Next segment of scattered packet. */

+       /* fields to support TX offloads */
+       union {
+               uint16_t l2_l3_len; /**< combined l2/l3 lengths as single var */
+               struct {
+                       uint16_t l3_len:9;      /**< L3 (IP) Header Length. */
+                       uint16_t l2_len:7;      /**< L2 (MAC) Header Length. */
+               };
+       };
 } __rte_cache_aligned;

 /**
-- 
1.9.3

Reply via email to