Pass the same flags as in rte_mempool_create().

Signed-off-by: Lazaros Koromilas <l at nofutznetworks.com>
---
 app/test/test_mempool.c          | 18 +++++------
 app/test/test_mempool_perf.c     |  4 +--
 lib/librte_mempool/rte_mempool.h | 66 +++++++++++++++++++++-------------------
 3 files changed, 45 insertions(+), 43 deletions(-)

diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c
index 2dc0cf2..445f450 100644
--- a/app/test/test_mempool.c
+++ b/app/test/test_mempool.c
@@ -121,7 +121,7 @@ test_mempool_basic(void)
        rte_mempool_dump(stdout, mp);

        printf("get an object\n");
-       if (rte_mempool_generic_get(mp, &obj, 1, cache, 1) < 0)
+       if (rte_mempool_generic_get(mp, &obj, 1, cache, 0) < 0)
                return -1;
        rte_mempool_dump(stdout, mp);

@@ -149,21 +149,21 @@ test_mempool_basic(void)
                return -1;

        printf("put the object back\n");
-       rte_mempool_generic_put(mp, &obj, 1, cache, 1);
+       rte_mempool_generic_put(mp, &obj, 1, cache, 0);
        rte_mempool_dump(stdout, mp);

        printf("get 2 objects\n");
-       if (rte_mempool_generic_get(mp, &obj, 1, cache, 1) < 0)
+       if (rte_mempool_generic_get(mp, &obj, 1, cache, 0) < 0)
                return -1;
-       if (rte_mempool_generic_get(mp, &obj2, 1, cache, 1) < 0) {
-               rte_mempool_generic_put(mp, &obj, 1, cache, 1);
+       if (rte_mempool_generic_get(mp, &obj2, 1, cache, 0) < 0) {
+               rte_mempool_generic_put(mp, &obj, 1, cache, 0);
                return -1;
        }
        rte_mempool_dump(stdout, mp);

        printf("put the objects back\n");
-       rte_mempool_generic_put(mp, &obj, 1, cache, 1);
-       rte_mempool_generic_put(mp, &obj2, 1, cache, 1);
+       rte_mempool_generic_put(mp, &obj, 1, cache, 0);
+       rte_mempool_generic_put(mp, &obj2, 1, cache, 0);
        rte_mempool_dump(stdout, mp);

        /*
@@ -176,7 +176,7 @@ test_mempool_basic(void)
        }

        for (i=0; i<MEMPOOL_SIZE; i++) {
-               if (rte_mempool_generic_get(mp, &objtable[i], 1, cache, 1) < 0)
+               if (rte_mempool_generic_get(mp, &objtable[i], 1, cache, 0) < 0)
                        break;
        }

@@ -198,7 +198,7 @@ test_mempool_basic(void)
                                ret = -1;
                }

-               rte_mempool_generic_put(mp, &objtable[i], 1, cache, 1);
+               rte_mempool_generic_put(mp, &objtable[i], 1, cache, 0);
        }

        free(objtable);
diff --git a/app/test/test_mempool_perf.c b/app/test/test_mempool_perf.c
index e917f4d..fa7e817 100644
--- a/app/test/test_mempool_perf.c
+++ b/app/test/test_mempool_perf.c
@@ -172,7 +172,7 @@ per_lcore_mempool_test(__attribute__((unused)) void *arg)
                        while (idx < n_keep) {
                                ret = rte_mempool_generic_get(mp, 
&obj_table[idx],
                                                              n_get_bulk,
-                                                             cache, 1);
+                                                             cache, 0);
                                if (unlikely(ret < 0)) {
                                        rte_mempool_dump(stdout, mp);
                                        rte_ring_dump(stdout, mp->ring);
@@ -187,7 +187,7 @@ per_lcore_mempool_test(__attribute__((unused)) void *arg)
                        while (idx < n_keep) {
                                rte_mempool_generic_put(mp, &obj_table[idx],
                                                        n_put_bulk,
-                                                       cache, 1);
+                                                       cache, 0);
                                idx += n_put_bulk;
                        }
                }
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 21d43e2..fe4fed9 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -812,12 +812,13 @@ rte_mempool_default_cache(struct rte_mempool *mp, 
unsigned lcore_id)
  *   positive.
  * @param cache
  *   A pointer to a mempool cache structure. May be NULL if not needed.
- * @param is_mp
- *   Mono-producer (0) or multi-producers (1).
+ * @param flags
+ *   The flags used for the mempool creation.
+ *   Single-producer (MEMPOOL_F_SP_PUT flag) or multi-producers.
  */
 static inline void __attribute__((always_inline))
 __mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
-                     unsigned n, struct rte_mempool_cache *cache, int is_mp)
+                     unsigned n, struct rte_mempool_cache *cache, int flags)
 {
        uint32_t index;
        void **cache_objs;
@@ -826,7 +827,8 @@ __mempool_generic_put(struct rte_mempool *mp, void * const 
*obj_table,
        __MEMPOOL_STAT_ADD(mp, put, n);

        /* No cache provided or cache is not enabled or single producer */
-       if (unlikely(cache == NULL || cache->size == 0 || is_mp == 0))
+       if (unlikely(cache == NULL || cache->size == 0 ||
+                    flags & MEMPOOL_F_SP_PUT))
                goto ring_enqueue;

        /* Go straight to ring if put would overflow mem allocated for cache */
@@ -860,19 +862,18 @@ ring_enqueue:

        /* push remaining objects in ring */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-       if (is_mp) {
-               if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)
-                       rte_panic("cannot put objects in mempool\n");
-       }
-       else {
+       if (flags & MEMPOOL_F_SP_PUT) {
                if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0)
                        rte_panic("cannot put objects in mempool\n");
+       } else {
+               if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)
+                       rte_panic("cannot put objects in mempool\n");
        }
 #else
-       if (is_mp)
-               rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n);
-       else
+       if (flags & MEMPOOL_F_SP_PUT)
                rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n);
+       else
+               rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n);
 #endif
 }

@@ -888,15 +889,16 @@ ring_enqueue:
  *   positive.
  * @param cache
  *   A pointer to a mempool cache structure. May be NULL if not needed.
- * @param is_mp
- *   Mono-producer (0) or multi-producers (1).
+ * @param flags
+ *   The flags used for the mempool creation.
+ *   Single-producer (MEMPOOL_F_SP_PUT flag) or multi-producers.
  */
 static inline void __attribute__((always_inline))
 rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
-                       unsigned n, struct rte_mempool_cache *cache, int is_mp)
+                       unsigned n, struct rte_mempool_cache *cache, int flags)
 {
        __mempool_check_cookies(mp, obj_table, n, 0);
-       __mempool_generic_put(mp, obj_table, n, cache, is_mp);
+       __mempool_generic_put(mp, obj_table, n, cache, flags);
 }

 /**
@@ -920,8 +922,7 @@ rte_mempool_put_bulk(struct rte_mempool *mp, void * const 
*obj_table,
        struct rte_mempool_cache *cache;

        cache = rte_mempool_default_cache(mp, rte_lcore_id());
-       rte_mempool_generic_put(mp, obj_table, n, cache,
-                               !(mp->flags & MEMPOOL_F_SP_PUT));
+       rte_mempool_generic_put(mp, obj_table, n, cache, mp->flags);
 }

 /**
@@ -952,23 +953,24 @@ rte_mempool_put(struct rte_mempool *mp, void *obj)
  *   The number of objects to get, must be strictly positive.
  * @param cache
  *   A pointer to a mempool cache structure. May be NULL if not needed.
- * @param is_mc
- *   Mono-consumer (0) or multi-consumers (1).
+ * @param flags
+ *   The flags used for the mempool creation.
+ *   Single-consumer (MEMPOOL_F_SC_GET flag) or multi-consumers.
  * @return
  *   - >=0: Success; number of objects supplied.
  *   - <0: Error; code of ring dequeue function.
  */
 static inline int __attribute__((always_inline))
 __mempool_generic_get(struct rte_mempool *mp, void **obj_table,
-                     unsigned n, struct rte_mempool_cache *cache, int is_mc)
+                     unsigned n, struct rte_mempool_cache *cache, int flags)
 {
        int ret;
        uint32_t index, len;
        void **cache_objs;

        /* No cache provided or cache is not enabled or single consumer */
-       if (unlikely(cache == NULL || cache->size == 0 || is_mc == 0 ||
-                    n >= cache->size))
+       if (unlikely(cache == NULL || cache->size == 0 ||
+                    flags & MEMPOOL_F_SC_GET || n >= cache->size))
                goto ring_dequeue;

        cache_objs = cache->objs;
@@ -1006,10 +1008,10 @@ __mempool_generic_get(struct rte_mempool *mp, void 
**obj_table,
 ring_dequeue:

        /* get remaining objects from ring */
-       if (is_mc)
-               ret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n);
-       else
+       if (flags & MEMPOOL_F_SC_GET)
                ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n);
+       else
+               ret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n);

        if (ret < 0)
                __MEMPOOL_STAT_ADD(mp, get_fail, n);
@@ -1030,18 +1032,19 @@ ring_dequeue:
  *   The number of objects to get, must be strictly positive.
  * @param cache
  *   A pointer to a mempool cache structure. May be NULL if not needed.
- * @param is_mc
- *   Mono-consumer (0) or multi-consumers (1).
+ * @param flags
+ *   The flags used for the mempool creation.
+ *   Single-consumer (MEMPOOL_F_SC_GET flag) or multi-consumers.
  * @return
  *   - >=0: Success; number of objects supplied.
  *   - <0: Error; code of ring dequeue function.
  */
 static inline int __attribute__((always_inline))
 rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table,
-                       unsigned n, struct rte_mempool_cache *cache, int is_mc)
+                       unsigned n, struct rte_mempool_cache *cache, int flags)
 {
        int ret;
-       ret = __mempool_generic_get(mp, obj_table, n, cache, is_mc);
+       ret = __mempool_generic_get(mp, obj_table, n, cache, flags);
        if (ret == 0)
                __mempool_check_cookies(mp, obj_table, n, 1);
        return ret;
@@ -1075,8 +1078,7 @@ rte_mempool_get_bulk(struct rte_mempool *mp, void 
**obj_table, unsigned n)
        struct rte_mempool_cache *cache;

        cache = rte_mempool_default_cache(mp, rte_lcore_id());
-       return rte_mempool_generic_get(mp, obj_table, n, cache,
-                                      !(mp->flags & MEMPOOL_F_SC_GET));
+       return rte_mempool_generic_get(mp, obj_table, n, cache, mp->flags);
 }

 /**
-- 
1.9.1

Reply via email to