This change splits the mbuf in two to move the pool and next pointers to
the second cache line. This frees up 16 bytes in first cache line.

Signed-off-by: Bruce Richardson <bruce.richardson at intel.com>
---
 app/test/test_mbuf.c       | 2 +-
 lib/librte_mbuf/rte_mbuf.h | 5 +++++
 2 files changed, 6 insertions(+), 1 deletion(-)

diff --git a/app/test/test_mbuf.c b/app/test/test_mbuf.c
index 31ef9fa..65d62dc 100644
--- a/app/test/test_mbuf.c
+++ b/app/test/test_mbuf.c
@@ -782,7 +782,7 @@ test_failing_mbuf_sanity_check(void)
 int
 test_mbuf(void)
 {
-       RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) != 64);
+       RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) != CACHE_LINE_SIZE * 2);

        /* create pktmbuf pool if it does not exist */
        if (pktmbuf_pool == NULL) {
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 566bb7e..a3e3e4f 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -137,6 +137,9 @@ union rte_vlan_macip {
  * The generic rte_mbuf, containing a packet mbuf.
  */
 struct rte_mbuf {
+       /** dummy field marking start of first cache line */
+       void *cache_line0[0];
+
        void *buf_addr;           /**< Virtual address of segment buffer. */
        phys_addr_t buf_physaddr; /**< Physical address of segment buffer. */

@@ -183,6 +186,8 @@ struct rte_mbuf {
        } hash;                 /**< hash information */

        /* second cache line, fields only used in slow path or on TX */
+       /** dummy field marking start of second cache line */
+       void *cache_line1[0] __rte_cache_aligned;
        struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */
        struct rte_mbuf *next;    /**< Next segment of scattered packet. */

-- 
1.9.3

Reply via email to