If page size is large (like 64K on ARM) and object size is small
then don't waste lots of memory by rounding up to page size.
Instead, round up so that 1 or more objects all fit in a page.
This preserves the requirement that an object must not a page
or virt2phys would break, and makes sure 62K is not wasted per mbuf.
Signed-off-by: Stephen Hemminger <stephen at networkplumber.org>
---
lib/librte_mempool/rte_mempool.c | 16 +++++++++++-----
1 file changed, 11 insertions(+), 5 deletions(-)
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index f8781e1..8fa855a 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -300,18 +300,24 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t
flags,
if (! rte_eal_has_hugepages()) {
/*
* compute trailer size so that pool elements fit exactly in
- * a standard page
+ * a standard page. If elements are smaller than a page
+ * then allow multiple elements per page
*/
- int page_size = getpagesize();
- int new_size = page_size - sz->header_size - sz->elt_size;
- if (new_size < 0 || (unsigned int)new_size < sz->trailer_size) {
+ unsigned page_size = getpagesize();
+ uint32_t orig_size, new_size;
+
+ orig_size = sz->header_size + sz->elt_size;
+ new_size = rte_align32pow2(orig_size);
+ if (new_size > page_size) {
printf("When hugepages are disabled, pool objects "
"can't exceed PAGE_SIZE: %d + %d + %d > %d\n",
sz->header_size, sz->elt_size, sz->trailer_size,
page_size);
return 0;
}
- sz->trailer_size = new_size;
+
+ sz->trailer_size = (new_size - orig_size)
+ / (page_size / new_size);
}
/* this is the size of an object, including header and trailer */
--
2.1.4