This change splits the mbuf in two to move the pool and next pointers to
the second cache line. This frees up 16 bytes in first cache line.
Signed-off-by: Bruce Richardson <bruce.richardson at intel.com>
---
app/test/test_mbuf.c | 2 +-
lib/librte_mbuf/rte_mbuf.h | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/app/test/test_mbuf.c b/app/test/test_mbuf.c
index e3d896b..82136de 100644
--- a/app/test/test_mbuf.c
+++ b/app/test/test_mbuf.c
@@ -782,7 +782,7 @@ test_failing_mbuf_sanity_check(void)
static int
test_mbuf(void)
{
- RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) != 64);
+ RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) != CACHE_LINE_SIZE * 2);
/* create pktmbuf pool if it does not exist */
if (pktmbuf_pool == NULL) {
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 591be95..db079ac 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -171,7 +171,8 @@ struct rte_mbuf {
uint32_t sched; /**< Hierarchical scheduler */
} hash; /**< hash information */
- /* fields only used in slow path or on TX */
+ /* second cache line - fields only used in slow path or on TX */
+ MARKER cacheline1 __rte_cache_aligned;
struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */
struct rte_mbuf *next; /**< Next segment of scattered packet. */
--
1.9.3