Add a new flag to remove the constraint of having physically contiguous
objects inside a mempool.

Add this flag to the log history mempool to start, but we could add
it in most cases where objects are not mbufs.

Signed-off-by: Olivier Matz <olivier.matz at 6wind.com>
---
 lib/librte_eal/common/eal_common_log.c |  2 +-
 lib/librte_mempool/rte_mempool.c       | 23 ++++++++++++++++++++---
 lib/librte_mempool/rte_mempool.h       |  5 +++++
 3 files changed, 26 insertions(+), 4 deletions(-)

diff --git a/lib/librte_eal/common/eal_common_log.c 
b/lib/librte_eal/common/eal_common_log.c
index 1ae8de7..9122b34 100644
--- a/lib/librte_eal/common/eal_common_log.c
+++ b/lib/librte_eal/common/eal_common_log.c
@@ -322,7 +322,7 @@ rte_eal_common_log_init(FILE *default_log)
                                LOG_ELT_SIZE, 0, 0,
                                NULL, NULL,
                                NULL, NULL,
-                               SOCKET_ID_ANY, 0);
+                               SOCKET_ID_ANY, MEMPOOL_F_NO_PHYS_CONTIG);

        if ((log_history_mp == NULL) &&
            ((log_history_mp = rte_mempool_lookup(LOG_HISTORY_MP_NAME)) == 
NULL)){
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index 1f998ef..7d4cabe 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -410,7 +410,11 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char 
*vaddr,

        while (off + total_elt_sz <= len && mp->populated_size < mp->size) {
                off += mp->header_size;
-               mempool_add_elem(mp, (char *)vaddr + off, paddr + off);
+               if (paddr == RTE_BAD_PHYS_ADDR)
+                       mempool_add_elem(mp, (char *)vaddr + off,
+                               RTE_BAD_PHYS_ADDR);
+               else
+                       mempool_add_elem(mp, (char *)vaddr + off, paddr + off);
                off += mp->elt_size + mp->trailer_size;
                i++;
        }
@@ -439,6 +443,10 @@ rte_mempool_populate_phys_tab(struct rte_mempool *mp, char 
*vaddr,
        if (mp->nb_mem_chunks != 0)
                return -EEXIST;

+       if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
+               return rte_mempool_populate_phys(mp, vaddr, RTE_BAD_PHYS_ADDR,
+                       pg_num * pg_sz, free_cb, opaque);
+
        for (i = 0; i < pg_num && mp->populated_size < mp->size; i += n) {

                /* populate with the largest group of contiguous pages */
@@ -479,6 +487,10 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char 
*addr,
        if (RTE_ALIGN_CEIL(len, pg_sz) != len)
                return -EINVAL;

+       if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
+               return rte_mempool_populate_phys(mp, addr, RTE_BAD_PHYS_ADDR,
+                       len, free_cb, opaque);
+
        for (off = 0; off + pg_sz <= len &&
                     mp->populated_size < mp->size; off += phys_len) {

@@ -528,6 +540,7 @@ rte_mempool_populate_default(struct rte_mempool *mp)
        char mz_name[RTE_MEMZONE_NAMESIZE];
        const struct rte_memzone *mz;
        size_t size, total_elt_sz, align, pg_sz, pg_shift;
+       phys_addr_t paddr;
        unsigned mz_id, n;
        int ret;

@@ -567,10 +580,14 @@ rte_mempool_populate_default(struct rte_mempool *mp)
                        goto fail;
                }

-               /* use memzone physical address if it is valid */
+               if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
+                       paddr = RTE_BAD_PHYS_ADDR;
+               else
+                       paddr = mz->phys_addr;
+
                if (rte_eal_has_hugepages() && !rte_xen_dom0_supported())
                        ret = rte_mempool_populate_phys(mp, mz->addr,
-                               mz->phys_addr, mz->len,
+                               paddr, mz->len,
                                rte_mempool_memchunk_mz_free,
                                (void *)(uintptr_t)mz);
                else
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index fe4e6fd..e6a257f 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -235,6 +235,7 @@ struct rte_mempool {
 #define MEMPOOL_F_SP_PUT         0x0004 /**< Default put is 
"single-producer".*/
 #define MEMPOOL_F_SC_GET         0x0008 /**< Default get is 
"single-consumer".*/
 #define MEMPOOL_F_RING_CREATED   0x0010 /**< Internal: ring is created */
+#define MEMPOOL_F_NO_PHYS_CONTIG 0x0020 /**< Don't need physically contiguous 
objs. */

 /**
  * @internal When debug is enabled, store some statistics.
@@ -417,6 +418,8 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, 
void *);
  *   - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
  *     when using rte_mempool_get() or rte_mempool_get_bulk() is
  *     "single-consumer". Otherwise, it is "multi-consumers".
+ *   - MEMPOOL_F_NO_PHYS_CONTIG: If set, allocated objects won't
+ *     necessarilly be contiguous in physical memory.
  * @return
  *   The pointer to the new allocated mempool, on success. NULL on error
  *   with rte_errno set appropriately. Possible rte_errno values include:
@@ -1222,6 +1225,8 @@ rte_mempool_empty(const struct rte_mempool *mp)
  *   A pointer (virtual address) to the element of the pool.
  * @return
  *   The physical address of the elt element.
+ *   If the mempool was created with MEMPOOL_F_NO_PHYS_CONTIG, the
+ *   returned value is RTE_BAD_PHYS_ADDR.
  */
 static inline phys_addr_t
 rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void 
*elt)
-- 
2.1.4

Reply via email to