From: Matias Elo <matias....@nokia.com>

Allocating enough memory for storing each packet in the packet pool in each
RX/TX block wastes a lot of memory. Also, the following mmap() call in
mmap_sock() can start failing when the number of memory blocks increases
(large number of CPU cores). Reduce the amount of required memory by
limiting the size of memory blocks.

Signed-off-by: Matias Elo <matias....@nokia.com>
---
/** Email created from pull request 397 (matiaselo:dev/pktio_fixes)
 ** https://github.com/Linaro/odp/pull/397
 ** Patch: https://github.com/Linaro/odp/pull/397.patch
 ** Base sha: 520c170d758f2d37554631bf1467ec50e027cd3e
 ** Merge commit sha: 926e7716387610a8be67fbc051845450640f8df6
 **/
 platform/linux-generic/pktio/socket_mmap.c | 16 +++++++++++-----
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/platform/linux-generic/pktio/socket_mmap.c 
b/platform/linux-generic/pktio/socket_mmap.c
index c63a25398..13707e8f2 100644
--- a/platform/linux-generic/pktio/socket_mmap.c
+++ b/platform/linux-generic/pktio/socket_mmap.c
@@ -37,6 +37,9 @@
 #include <protocols/eth.h>
 #include <protocols/ip.h>
 
+/* Maximum number of packets to store in each RX/TX block */
+#define MAX_PKTS_PER_BLOCK 512
+
 static int disable_pktio; /** !0 this pktio disabled, 0 enabled */
 
 static int set_pkt_sock_fanout_mmap(pkt_sock_mmap_t *const pkt_sock,
@@ -348,6 +351,7 @@ static inline unsigned pkt_mmap_v2_tx(int sock, struct ring 
*ring,
 
 static void mmap_fill_ring(struct ring *ring, odp_pool_t pool_hdl, int fanout)
 {
+       uint32_t num_frames;
        int pz = getpagesize();
        pool_t *pool;
 
@@ -361,11 +365,13 @@ static void mmap_fill_ring(struct ring *ring, odp_pool_t 
pool_hdl, int fanout)
                                   pool->tailroom + TPACKET_HDRLEN +
                                   TPACKET_ALIGNMENT + + (pz - 1)) & (-pz);
 
-       /* Calculate how many pages do we need to hold all pool packets
-       *  and align size to page boundary.
-       */
-       ring->req.tp_block_size = (ring->req.tp_frame_size *
-                                  pool->num + (pz - 1)) & (-pz);
+       /* Calculate how many pages we need to hold at most MAX_PKTS_PER_BLOCK
+        * packets and align size to page boundary.
+        */
+       num_frames = pool->num < MAX_PKTS_PER_BLOCK ? pool->num :
+                       MAX_PKTS_PER_BLOCK;
+       ring->req.tp_block_size = (ring->req.tp_frame_size * num_frames +
+                                  (pz - 1)) & (-pz);
 
        if (!fanout) {
                /* Single socket is in use. Use 1 block with buf_num frames. */

Reply via email to