Introduce a free callback that is passed to the populate* functions,
which is used when freeing a mempool. This is unused now, but as next
commits will populate the mempool with several chunks of memory, we
need a way to free them properly on error.

Later in the series, we will also introduce a public rte_mempool_free()
and the ability for the user to populate a mempool with its own memory.
For that, we also need a free callback.

Signed-off-by: Olivier Matz <olivier.matz at 6wind.com>
---
 lib/librte_mempool/rte_mempool.c | 27 ++++++++++++++++++++++-----
 lib/librte_mempool/rte_mempool.h |  8 ++++++++
 2 files changed, 30 insertions(+), 5 deletions(-)

diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index f2f7846..0ae899b 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -388,6 +388,15 @@ rte_mempool_ring_create(struct rte_mempool *mp)
        return 0;
 }

+/* free a memchunk allocated with rte_memzone_reserve() */
+__rte_unused static void
+rte_mempool_memchunk_mz_free(__rte_unused struct rte_mempool_memhdr *memhdr,
+       void *opaque)
+{
+       const struct rte_memzone *mz = opaque;
+       rte_memzone_free(mz);
+}
+
 /* Free memory chunks used by a mempool. Objects must be in pool */
 static void
 rte_mempool_free_memchunks(struct rte_mempool *mp)
@@ -405,6 +414,8 @@ rte_mempool_free_memchunks(struct rte_mempool *mp)
        while (!STAILQ_EMPTY(&mp->mem_list)) {
                memhdr = STAILQ_FIRST(&mp->mem_list);
                STAILQ_REMOVE_HEAD(&mp->mem_list, next);
+               if (memhdr->free_cb != NULL)
+                       memhdr->free_cb(memhdr, memhdr->opaque);
                rte_free(memhdr);
                mp->nb_mem_chunks--;
        }
@@ -415,7 +426,8 @@ rte_mempool_free_memchunks(struct rte_mempool *mp)
  * on error. */
 static int
 rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
-       phys_addr_t paddr, size_t len)
+       phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+       void *opaque)
 {
        unsigned total_elt_sz;
        unsigned i = 0;
@@ -436,6 +448,8 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char 
*vaddr,
        memhdr->addr = vaddr;
        memhdr->phys_addr = paddr;
        memhdr->len = len;
+       memhdr->free_cb = free_cb;
+       memhdr->opaque = opaque;

        if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
                off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr;
@@ -462,7 +476,8 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char 
*vaddr,
  * number of objects added, or a negative value on error. */
 static int
 rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
-       const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift)
+       const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
+       rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
 {
        uint32_t i, n;
        int ret, cnt = 0;
@@ -480,11 +495,13 @@ rte_mempool_populate_phys_tab(struct rte_mempool *mp, 
char *vaddr,
                        ;

                ret = rte_mempool_populate_phys(mp, vaddr + i * pg_sz,
-                       paddr[i], n * pg_sz);
+                       paddr[i], n * pg_sz, free_cb, opaque);
                if (ret < 0) {
                        rte_mempool_free_memchunks(mp);
                        return ret;
                }
+               /* no need to call the free callback for next chunks */
+               free_cb = NULL;
                cnt += ret;
        }
        return cnt;
@@ -666,12 +683,12 @@ rte_mempool_xmem_create(const char *name, unsigned n, 
unsigned elt_size,

                ret = rte_mempool_populate_phys(mp, obj,
                        mp->phys_addr + ((char *)obj - (char *)mp),
-                       objsz.total_size * n);
+                       objsz.total_size * n, NULL, NULL);
                if (ret != (int)mp->size)
                        goto exit_unlock;
        } else {
                ret = rte_mempool_populate_phys_tab(mp, vaddr,
-                       paddr, pg_num, pg_shift);
+                       paddr, pg_num, pg_shift, NULL, NULL);
                if (ret != (int)mp->size)
                        goto exit_unlock;
        }
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 0e4641e..e06ccfc 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -187,6 +187,12 @@ struct rte_mempool_objtlr {
 STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr);

 /**
+ * Callback used to free a memory chunk
+ */
+typedef void (rte_mempool_memchunk_free_cb_t)(struct rte_mempool_memhdr 
*memhdr,
+       void *opaque);
+
+/**
  * Mempool objects memory header structure
  *
  * The memory chunks where objects are stored. Each chunk is virtually
@@ -198,6 +204,8 @@ struct rte_mempool_memhdr {
        void *addr;              /**< Virtual address of the chunk */
        phys_addr_t phys_addr;   /**< Physical address of the chunk */
        size_t len;              /**< length of the chunk */
+       rte_mempool_memchunk_free_cb_t *free_cb; /**< Free callback */
+       void *opaque;            /**< Argument passed to the free callback */
 };

 /**
-- 
2.1.4

Reply via email to