Added cache guard after the table holding the ring elements, to avoid
false sharing conflicts caused by next-line hardware prefetchers when
accessing elements at the end of the ring table.

Signed-off-by: Morten Brørup <[email protected]>
---
 lib/ring/rte_ring.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/lib/ring/rte_ring.c b/lib/ring/rte_ring.c
index f10050a1c4..9ccc62cd42 100644
--- a/lib/ring/rte_ring.c
+++ b/lib/ring/rte_ring.c
@@ -73,8 +73,11 @@ rte_ring_get_memsize_elem(unsigned int esize, unsigned int 
count)
                return -EINVAL;
        }
 
+       static_assert(sizeof(struct rte_ring) == 
RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_ring)),
+                       "Size of struct rte_ring not cache line aligned");
        sz = sizeof(struct rte_ring) + (ssize_t)count * esize;
        sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
+       sz += RTE_CACHE_GUARD_LINES * RTE_CACHE_LINE_SIZE;
        return sz;
 }
 
-- 
2.43.0

Reply via email to