[dpdk-dev] [PATCH v2 1/2] mempool: allow for user-owned mempool caches

2016-06-14 Thread Lazaros Koromilas
Hi Olivier,

I have it in my queue, I'll do my best to have it before the deadline.

Thanks!
Lazaros.

On Mon, Jun 13, 2016 at 1:21 PM, Olivier Matz  wrote:
> Hi Lazaros,
>
> On 05/11/2016 11:56 AM, Olivier MATZ wrote:
>> Hi Lazaros,
>>
>> Sorry for the late review. Please find some comments,
>> in addition to what Konstantin already said.
>>
>
> Will you have the time to send a v3 before the end of the
> integration deadline at the end of the week?
>
> I think it should be rebased on top of latest mempool series
> from David Hunt:
> http://dpdk.org/ml/archives/dev/2016-June/040897.html
>
> Regards,
> Olivier


[dpdk-dev] [PATCH v2 1/2] mempool: allow for user-owned mempool caches

2016-06-13 Thread Olivier Matz
Hi Lazaros,

On 05/11/2016 11:56 AM, Olivier MATZ wrote:
> Hi Lazaros,
> 
> Sorry for the late review. Please find some comments,
> in addition to what Konstantin already said.
> 

Will you have the time to send a v3 before the end of the
integration deadline at the end of the week?

I think it should be rebased on top of latest mempool series
from David Hunt:
http://dpdk.org/ml/archives/dev/2016-June/040897.html

Regards,
Olivier


[dpdk-dev] [PATCH v2 1/2] mempool: allow for user-owned mempool caches

2016-05-11 Thread Olivier MATZ
Hi Lazaros,

Sorry for the late review. Please find some comments,
in addition to what Konstantin already said.

On 04/04/2016 05:43 PM, Lazaros Koromilas wrote:

> --- a/app/test/test_mempool.c
> +++ b/app/test/test_mempool.c
> @@ -79,6 +79,7 @@
>
>   static struct rte_mempool *mp;
>   static struct rte_mempool *mp_cache, *mp_nocache;
> +static int use_external_cache;
>
>   static rte_atomic32_t synchro;
>
> @@ -107,19 +108,33 @@ test_mempool_basic(void)
>   char *obj_data;
>   int ret = 0;
>   unsigned i, j;
> + struct rte_mempool_cache *cache;
> +
> + if (use_external_cache)
> + /* Create a user-owned mempool cache. */
> + cache = rte_mempool_cache_create(RTE_MEMPOOL_CACHE_MAX_SIZE,
> +  SOCKET_ID_ANY);
> + else
> + cache = rte_mempool_default_cache(mp, rte_lcore_id());

Shouldn't we return an error if rte_mempool_default_cache()
failed? Even if the cache can be NULL for get/put, it would
crash on the flush() operation, so it's better to return an
error if the cache cannot be allocated.

I also think the resource should be freed on error, maybe
by doing "goto fail" instead of "return -1" in the subsequent
checks. Note that I also reworked this test in my patchset, see:
http://dpdk.org/dev/patchwork/patch/12069/

I think the "use_external_cache" parameter should be a parameter
instead of a global variable, like I've done for the mempool pointer.


> --- a/app/test/test_mempool_perf.c
> +++ b/app/test/test_mempool_perf.c
> @@ -98,6 +101,8 @@
>
>   static struct rte_mempool *mp;
>   static struct rte_mempool *mp_cache, *mp_nocache;
> +static int use_external_cache;
> +static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
>
>   static rte_atomic32_t synchro;
>

The same comment (global vs parameter) could apply here, but it would
require to rework the full test file... so maybe it's off topic.


> @@ -137,6 +142,14 @@ per_lcore_mempool_test(__attribute__((unused)) void *arg)
>   int ret;
>   uint64_t start_cycles, end_cycles;
>   uint64_t time_diff = 0, hz = rte_get_timer_hz();
> + struct rte_mempool_cache *cache;
> +
> + if (use_external_cache)
> + /* Create a user-owned mempool cache. */
> + cache = rte_mempool_cache_create(external_cache_size,
> +  SOCKET_ID_ANY);
> + else
> + cache = rte_mempool_default_cache(mp, lcore_id);
>
>   /* n_get_bulk and n_put_bulk must be divisors of n_keep */
>   if (((n_keep / n_get_bulk) * n_get_bulk) != n_keep)

Same comments than above (check return value != NULL).

The cache creation could be moved some lines below to avoid
to free the resource on error.


> --- a/lib/librte_eal/common/eal_common_log.c
> +++ b/lib/librte_eal/common/eal_common_log.c
> @@ -125,7 +125,7 @@ rte_log_add_in_history(const char *buf, size_t size)
>   }
>   }
>   else {
> - if (rte_mempool_mc_get(log_history_mp, ) < 0)
> + if (rte_mempool_get(log_history_mp, ) < 0)
>   obj = NULL;
>   hist_buf = obj;
>   }

After seeing many changes like this, I wonder if it's possible
to move these in a separate commit:
"mempool: deprecate specific get/put functions"

It would remove some noise in the "interesting" part. I suggest
the following order:
   mempool: deprecate specific get/put functions
   mempool: use bit flags instead of is_mp and is_mc
   mempool: allow for user-owned mempoolcaches

What do you think?


>   static inline void __attribute__((always_inline))
> -__mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
> - unsigned n, int is_mp)
> +__mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
> +   unsigned n, struct rte_mempool_cache *cache, int is_mp)
>   {
> - struct rte_mempool_cache *cache;
>   uint32_t index;
>   void **cache_objs;
> - unsigned lcore_id = rte_lcore_id();
> - uint32_t cache_size = mp->cache_size;
> - uint32_t flushthresh = mp->cache_flushthresh;
>
>   /* increment stat now, adding in mempool always success */
>   __MEMPOOL_STAT_ADD(mp, put, n);
>
> - /* cache is not enabled or single producer or non-EAL thread */
> - if (unlikely(cache_size == 0 || is_mp == 0 ||
> -  lcore_id >= RTE_MAX_LCORE))
> + /* No cache provided or cache is not enabled or single producer */
> + if (unlikely(cache == NULL || cache->size == 0 || is_mp == 0))
>   goto ring_enqueue;

Is it possible that cache->size == 0?
I suggest to remove that test and ensure that size != 0 at cache
creation.

> -__mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
> - unsigned n, int is_mp)

> +__mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
> +   unsigned n, struct rte_mempool_cache *cache, int 

[dpdk-dev] [PATCH v2 1/2] mempool: allow for user-owned mempool caches

2016-04-19 Thread Lazaros Koromilas
Hi Konstantin,
Thanks for the review.

Regards,
Lazaros.

On Mon, Apr 18, 2016 at 4:17 PM, Ananyev, Konstantin
 wrote:
> Hi Lazaros,
>
> Looks ok to me in general, few comments below.
> One more generic question - did you observe any performance impact
> caused by these changes?
> Konstantin

I didn't observe any notable difference to the default per-lcore cache
case. Here is an excerpt from the mempool_perf test:

$ egrep '(^start|n_get_bulk=32 n_put_bulk=32 n_keep=128)'
x86_64-native-linuxapp-gcc.log
start performance test (without cache)
mempool_autotest cache=0 cores=1 n_get_bulk=32 n_put_bulk=32
n_keep=128 rate_persec=714958438
mempool_autotest cache=0 cores=2 n_get_bulk=32 n_put_bulk=32
n_keep=128 rate_persec=795738111
mempool_autotest cache=0 cores=4 n_get_bulk=32 n_put_bulk=32
n_keep=128 rate_persec=313655295
start performance test (with cache)
mempool_autotest cache=512 cores=1 n_get_bulk=32 n_put_bulk=32
n_keep=128 rate_persec=780455116
mempool_autotest cache=512 cores=2 n_get_bulk=32 n_put_bulk=32
n_keep=128 rate_persec=1046937599
mempool_autotest cache=512 cores=4 n_get_bulk=32 n_put_bulk=32
n_keep=128 rate_persec=1988362238
start performance test (with user-owned cache)
mempool_autotest cache=512 cores=1 n_get_bulk=32 n_put_bulk=32
n_keep=128 rate_persec=787519897
mempool_autotest cache=512 cores=2 n_get_bulk=32 n_put_bulk=32
n_keep=128 rate_persec=1047029350
mempool_autotest cache=512 cores=4 n_get_bulk=32 n_put_bulk=32
n_keep=128 rate_persec=1965896498

>
>> -Original Message-
>> From: dev [mailto:dev-bounces at dpdk.org] On Behalf Of Lazaros Koromilas
>> Sent: Monday, April 04, 2016 4:43 PM
>> To: dev at dpdk.org
>> Subject: [dpdk-dev] [PATCH v2 1/2] mempool: allow for user-owned mempool 
>> caches
>>
>> The mempool cache is only available to EAL threads as a per-lcore
>> resource. Change this so that the user can create and provide their own
>> cache on mempool get and put operations. This works with non-EAL threads
>> too. This commit introduces the new API calls:
>>
>> rte_mempool_cache_create(size, socket_id)
>> rte_mempool_cache_flush(cache, mp)
>> rte_mempool_cache_free(cache)
>> rte_mempool_default_cache(mp, lcore_id)
>> rte_mempool_generic_put(mp, obj_table, n, cache, is_mp)
>> rte_mempool_generic_get(mp, obj_table, n, cache, is_mc)
>>
>> Removes the API calls:
>>
>> rte_mempool_sp_put_bulk(mp, obj_table, n)
>> rte_mempool_sc_get_bulk(mp, obj_table, n)
>> rte_mempool_sp_put(mp, obj)
>> rte_mempool_sc_get(mp, obj)
>
>
> Hmm, shouldn't we deprecate it first for a release before removing completely?
> Let say for now you can just make them macros that calls the remaining 
> functions or so.

How do we mark the calls as deprecated? The librte_compat stuff don't
apply here as we don't have a different version of the same symbol or
something. Do I need to put them as a notice?

>
>>
>> And the remaining API calls use the per-lcore default local cache:
>>
>> rte_mempool_put_bulk(mp, obj_table, n)
>> rte_mempool_get_bulk(mp, obj_table, n)
>> rte_mempool_put(mp, obj)
>> rte_mempool_get(mp, obj)
>>
>> Signed-off-by: Lazaros Koromilas 
>> ---
>>  app/test/test_mempool.c|  58 +--
>>  app/test/test_mempool_perf.c   |  46 +-
>>  lib/librte_eal/common/eal_common_log.c |   8 +-
>>  lib/librte_mempool/rte_mempool.c   |  76 -
>>  lib/librte_mempool/rte_mempool.h   | 291 
>> +
>>  5 files changed, 275 insertions(+), 204 deletions(-)
>>
>>
>> diff --git a/lib/librte_mempool/rte_mempool.c 
>> b/lib/librte_mempool/rte_mempool.c
>> index 73ca770..4d977c1 100644
>> --- a/lib/librte_mempool/rte_mempool.c
>> +++ b/lib/librte_mempool/rte_mempool.c
>> @@ -375,6 +375,63 @@ rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, 
>> size_t elt_sz,
>>   return usz;
>>  }
>>
>> +static void
>> +mempool_cache_init(struct rte_mempool_cache *cache, uint32_t size)
>> +{
>> + cache->size = size;
>> + cache->flushthresh = CALC_CACHE_FLUSHTHRESH(size);
>> + cache->len = 0;
>> +}
>> +
>> +/*
>> + * Create and initialize a cache for objects that are retrieved from and
>> + * returned to an underlying mempool. This structure is identical to the
>> + * local_cache[lcore_id] pointed to by the mempool structure.
>> + */
>> +struct rte_mempool_cache *
>> +rte_mempool_cache_create(uint32_t size, int socket_id)
>> +{
>> + struct rte_mempool_cache *cache;
>> +
>> + if (

[dpdk-dev] [PATCH v2 1/2] mempool: allow for user-owned mempool caches

2016-04-19 Thread Thomas Monjalon
2016-04-19 18:39, Lazaros Koromilas:
> >> Removes the API calls:
> >>
> >> rte_mempool_sp_put_bulk(mp, obj_table, n)
> >> rte_mempool_sc_get_bulk(mp, obj_table, n)
> >> rte_mempool_sp_put(mp, obj)
> >> rte_mempool_sc_get(mp, obj)
> >
> > Hmm, shouldn't we deprecate it first for a release before removing 
> > completely?
> > Let say for now you can just make them macros that calls the remaining 
> > functions or so.
> 
> How do we mark the calls as deprecated? The librte_compat stuff don't
> apply here as we don't have a different version of the same symbol or
> something. Do I need to put them as a notice?

Yes a notice and add __rte_deprecated.



[dpdk-dev] [PATCH v2 1/2] mempool: allow for user-owned mempool caches

2016-04-18 Thread Ananyev, Konstantin
Hi Lazaros,

Looks ok to me in general, few comments below.
One more generic question - did you observe any performance impact 
caused by these changes?
Konstantin

> -Original Message-
> From: dev [mailto:dev-bounces at dpdk.org] On Behalf Of Lazaros Koromilas
> Sent: Monday, April 04, 2016 4:43 PM
> To: dev at dpdk.org
> Subject: [dpdk-dev] [PATCH v2 1/2] mempool: allow for user-owned mempool 
> caches
> 
> The mempool cache is only available to EAL threads as a per-lcore
> resource. Change this so that the user can create and provide their own
> cache on mempool get and put operations. This works with non-EAL threads
> too. This commit introduces the new API calls:
> 
> rte_mempool_cache_create(size, socket_id)
> rte_mempool_cache_flush(cache, mp)
> rte_mempool_cache_free(cache)
> rte_mempool_default_cache(mp, lcore_id)
> rte_mempool_generic_put(mp, obj_table, n, cache, is_mp)
> rte_mempool_generic_get(mp, obj_table, n, cache, is_mc)
> 
> Removes the API calls:
> 
> rte_mempool_sp_put_bulk(mp, obj_table, n)
> rte_mempool_sc_get_bulk(mp, obj_table, n)
> rte_mempool_sp_put(mp, obj)
> rte_mempool_sc_get(mp, obj)


Hmm, shouldn't we deprecate it first for a release before removing completely?
Let say for now you can just make them macros that calls the remaining 
functions or so.

> 
> And the remaining API calls use the per-lcore default local cache:
> 
> rte_mempool_put_bulk(mp, obj_table, n)
> rte_mempool_get_bulk(mp, obj_table, n)
> rte_mempool_put(mp, obj)
> rte_mempool_get(mp, obj)
> 
> Signed-off-by: Lazaros Koromilas 
> ---
>  app/test/test_mempool.c|  58 +--
>  app/test/test_mempool_perf.c   |  46 +-
>  lib/librte_eal/common/eal_common_log.c |   8 +-
>  lib/librte_mempool/rte_mempool.c   |  76 -
>  lib/librte_mempool/rte_mempool.h   | 291 
> +
>  5 files changed, 275 insertions(+), 204 deletions(-)
> 
> 
> diff --git a/lib/librte_mempool/rte_mempool.c 
> b/lib/librte_mempool/rte_mempool.c
> index 73ca770..4d977c1 100644
> --- a/lib/librte_mempool/rte_mempool.c
> +++ b/lib/librte_mempool/rte_mempool.c
> @@ -375,6 +375,63 @@ rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, 
> size_t elt_sz,
>   return usz;
>  }
> 
> +static void
> +mempool_cache_init(struct rte_mempool_cache *cache, uint32_t size)
> +{
> + cache->size = size;
> + cache->flushthresh = CALC_CACHE_FLUSHTHRESH(size);
> + cache->len = 0;
> +}
> +
> +/*
> + * Create and initialize a cache for objects that are retrieved from and
> + * returned to an underlying mempool. This structure is identical to the
> + * local_cache[lcore_id] pointed to by the mempool structure.
> + */
> +struct rte_mempool_cache *
> +rte_mempool_cache_create(uint32_t size, int socket_id)
> +{
> + struct rte_mempool_cache *cache;
> +
> + if (size > RTE_MEMPOOL_CACHE_MAX_SIZE) {
> + rte_errno = EINVAL;
> + return NULL;
> + }
> +
> + cache = rte_zmalloc_socket("MEMPOOL_CACHE", sizeof(*cache),
> +RTE_CACHE_LINE_SIZE, socket_id);
> + if (cache == NULL) {
> + RTE_LOG(ERR, MEMPOOL, "Cannot allocate mempool cache!\n");
> + rte_errno = ENOMEM;
> + return NULL;
> + }
> +
> + mempool_cache_init(cache, size);
> +
> + return cache;
> +}
> +
> +/*
> + * Free a cache. It's the responsibility of the user to make sure that any
> + * remaining objects in the cache are flushed to the corresponding
> + * mempool.
> + */
> +void
> +rte_mempool_cache_free(struct rte_mempool_cache *cache)
> +{
> + rte_free(cache);
> +}
> +
> +/*
> + * Put all objects in the cache to the specified mempool's ring.
> + */
> +void
> +rte_mempool_cache_flush(struct rte_mempool_cache *cache,
> + struct rte_mempool *mp)
> +{
> + rte_ring_enqueue_bulk(mp->ring, cache->objs, cache->len);

Shouldn't you also reset cache->len too here?
cache->len = 0;
Another thought - might be that function deserved to be inline one.

> +}
> +
>  #ifndef RTE_LIBRTE_XEN_DOM0
>  /* stub if DOM0 support not configured */
>  struct rte_mempool *
> @@ -448,6 +505,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, 
> unsigned elt_size,
>   struct rte_mempool_objsz objsz;
>   void *startaddr;
>   int page_size = getpagesize();
> + unsigned lcore_id;
> 
>   /* compilation-time checks */
>   RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
> @@ -583,8 +641,8 @@ rte_mempool_xmem_create(

[dpdk-dev] [PATCH v2 1/2] mempool: allow for user-owned mempool caches

2016-04-05 Thread Lazaros Koromilas
Hi all,

I forgot to mention that this series applies on top of:

http://www.dpdk.org/dev/patchwork/patch/10492/

Thanks,
Lazaros.

On Mon, Apr 4, 2016 at 6:43 PM, Lazaros Koromilas  
wrote:
> The mempool cache is only available to EAL threads as a per-lcore
> resource. Change this so that the user can create and provide their own
> cache on mempool get and put operations. This works with non-EAL threads
> too. This commit introduces the new API calls:
>
> rte_mempool_cache_create(size, socket_id)
> rte_mempool_cache_flush(cache, mp)
> rte_mempool_cache_free(cache)
> rte_mempool_default_cache(mp, lcore_id)
> rte_mempool_generic_put(mp, obj_table, n, cache, is_mp)
> rte_mempool_generic_get(mp, obj_table, n, cache, is_mc)
>
> Removes the API calls:
>
> rte_mempool_sp_put_bulk(mp, obj_table, n)
> rte_mempool_sc_get_bulk(mp, obj_table, n)
> rte_mempool_sp_put(mp, obj)
> rte_mempool_sc_get(mp, obj)
>
> And the remaining API calls use the per-lcore default local cache:
>
> rte_mempool_put_bulk(mp, obj_table, n)
> rte_mempool_get_bulk(mp, obj_table, n)
> rte_mempool_put(mp, obj)
> rte_mempool_get(mp, obj)
>
> Signed-off-by: Lazaros Koromilas 
> ---
>  app/test/test_mempool.c|  58 +--
>  app/test/test_mempool_perf.c   |  46 +-
>  lib/librte_eal/common/eal_common_log.c |   8 +-
>  lib/librte_mempool/rte_mempool.c   |  76 -
>  lib/librte_mempool/rte_mempool.h   | 291 
> +
>  5 files changed, 275 insertions(+), 204 deletions(-)
>
> diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c
> index 10e1fa4..2dc0cf2 100644
> --- a/app/test/test_mempool.c
> +++ b/app/test/test_mempool.c
> @@ -79,6 +79,7 @@
>
>  static struct rte_mempool *mp;
>  static struct rte_mempool *mp_cache, *mp_nocache;
> +static int use_external_cache;
>
>  static rte_atomic32_t synchro;
>
> @@ -107,19 +108,33 @@ test_mempool_basic(void)
> char *obj_data;
> int ret = 0;
> unsigned i, j;
> +   struct rte_mempool_cache *cache;
> +
> +   if (use_external_cache)
> +   /* Create a user-owned mempool cache. */
> +   cache = rte_mempool_cache_create(RTE_MEMPOOL_CACHE_MAX_SIZE,
> +SOCKET_ID_ANY);
> +   else
> +   cache = rte_mempool_default_cache(mp, rte_lcore_id());
>
> /* dump the mempool status */
> rte_mempool_dump(stdout, mp);
>
> printf("get an object\n");
> -   if (rte_mempool_get(mp, ) < 0)
> +   if (rte_mempool_generic_get(mp, , 1, cache, 1) < 0)
> return -1;
> rte_mempool_dump(stdout, mp);
>
> /* tests that improve coverage */
> printf("get object count\n");
> -   if (rte_mempool_count(mp) != MEMPOOL_SIZE - 1)
> -   return -1;
> +   if (use_external_cache) {
> +   /* We have to count the extra caches, one in this case. */
> +   if (rte_mempool_count(mp) + cache->len != MEMPOOL_SIZE - 1)
> +   return -1;
> +   } else {
> +   if (rte_mempool_count(mp) != MEMPOOL_SIZE - 1)
> +   return -1;
> +   }
>
> printf("get private data\n");
> if (rte_mempool_get_priv(mp) != (char *)mp +
> @@ -134,21 +149,21 @@ test_mempool_basic(void)
> return -1;
>
> printf("put the object back\n");
> -   rte_mempool_put(mp, obj);
> +   rte_mempool_generic_put(mp, , 1, cache, 1);
> rte_mempool_dump(stdout, mp);
>
> printf("get 2 objects\n");
> -   if (rte_mempool_get(mp, ) < 0)
> +   if (rte_mempool_generic_get(mp, , 1, cache, 1) < 0)
> return -1;
> -   if (rte_mempool_get(mp, ) < 0) {
> -   rte_mempool_put(mp, obj);
> +   if (rte_mempool_generic_get(mp, , 1, cache, 1) < 0) {
> +   rte_mempool_generic_put(mp, , 1, cache, 1);
> return -1;
> }
> rte_mempool_dump(stdout, mp);
>
> printf("put the objects back\n");
> -   rte_mempool_put(mp, obj);
> -   rte_mempool_put(mp, obj2);
> +   rte_mempool_generic_put(mp, , 1, cache, 1);
> +   rte_mempool_generic_put(mp, , 1, cache, 1);
> rte_mempool_dump(stdout, mp);
>
> /*
> @@ -161,7 +176,7 @@ test_mempool_basic(void)
> }
>
> for (i=0; i -   if (rte_mempool_get(mp, [i]) < 0)
> +   if (rte_mempool_generic_get(mp, [i], 1, cache, 1) < 
> 0)
> break;
> }
>
> @@ -183,13 +198,18 @@ test_mempool_basic(void)
> ret = -1;
> }
>
> -   rte_mempool_put(mp, objtable[i]);
> +   rte_mempool_generic_put(mp, [i], 1, cache, 1);
> }
>
> free(objtable);
> if (ret == -1)
> printf("objects were modified!\n");
>
> +   if 

[dpdk-dev] [PATCH v2 1/2] mempool: allow for user-owned mempool caches

2016-04-04 Thread Lazaros Koromilas
The mempool cache is only available to EAL threads as a per-lcore
resource. Change this so that the user can create and provide their own
cache on mempool get and put operations. This works with non-EAL threads
too. This commit introduces the new API calls:

rte_mempool_cache_create(size, socket_id)
rte_mempool_cache_flush(cache, mp)
rte_mempool_cache_free(cache)
rte_mempool_default_cache(mp, lcore_id)
rte_mempool_generic_put(mp, obj_table, n, cache, is_mp)
rte_mempool_generic_get(mp, obj_table, n, cache, is_mc)

Removes the API calls:

rte_mempool_sp_put_bulk(mp, obj_table, n)
rte_mempool_sc_get_bulk(mp, obj_table, n)
rte_mempool_sp_put(mp, obj)
rte_mempool_sc_get(mp, obj)

And the remaining API calls use the per-lcore default local cache:

rte_mempool_put_bulk(mp, obj_table, n)
rte_mempool_get_bulk(mp, obj_table, n)
rte_mempool_put(mp, obj)
rte_mempool_get(mp, obj)

Signed-off-by: Lazaros Koromilas 
---
 app/test/test_mempool.c|  58 +--
 app/test/test_mempool_perf.c   |  46 +-
 lib/librte_eal/common/eal_common_log.c |   8 +-
 lib/librte_mempool/rte_mempool.c   |  76 -
 lib/librte_mempool/rte_mempool.h   | 291 +
 5 files changed, 275 insertions(+), 204 deletions(-)

diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c
index 10e1fa4..2dc0cf2 100644
--- a/app/test/test_mempool.c
+++ b/app/test/test_mempool.c
@@ -79,6 +79,7 @@

 static struct rte_mempool *mp;
 static struct rte_mempool *mp_cache, *mp_nocache;
+static int use_external_cache;

 static rte_atomic32_t synchro;

@@ -107,19 +108,33 @@ test_mempool_basic(void)
char *obj_data;
int ret = 0;
unsigned i, j;
+   struct rte_mempool_cache *cache;
+
+   if (use_external_cache)
+   /* Create a user-owned mempool cache. */
+   cache = rte_mempool_cache_create(RTE_MEMPOOL_CACHE_MAX_SIZE,
+SOCKET_ID_ANY);
+   else
+   cache = rte_mempool_default_cache(mp, rte_lcore_id());

/* dump the mempool status */
rte_mempool_dump(stdout, mp);

printf("get an object\n");
-   if (rte_mempool_get(mp, ) < 0)
+   if (rte_mempool_generic_get(mp, , 1, cache, 1) < 0)
return -1;
rte_mempool_dump(stdout, mp);

/* tests that improve coverage */
printf("get object count\n");
-   if (rte_mempool_count(mp) != MEMPOOL_SIZE - 1)
-   return -1;
+   if (use_external_cache) {
+   /* We have to count the extra caches, one in this case. */
+   if (rte_mempool_count(mp) + cache->len != MEMPOOL_SIZE - 1)
+   return -1;
+   } else {
+   if (rte_mempool_count(mp) != MEMPOOL_SIZE - 1)
+   return -1;
+   }

printf("get private data\n");
if (rte_mempool_get_priv(mp) != (char *)mp +
@@ -134,21 +149,21 @@ test_mempool_basic(void)
return -1;

printf("put the object back\n");
-   rte_mempool_put(mp, obj);
+   rte_mempool_generic_put(mp, , 1, cache, 1);
rte_mempool_dump(stdout, mp);

printf("get 2 objects\n");
-   if (rte_mempool_get(mp, ) < 0)
+   if (rte_mempool_generic_get(mp, , 1, cache, 1) < 0)
return -1;
-   if (rte_mempool_get(mp, ) < 0) {
-   rte_mempool_put(mp, obj);
+   if (rte_mempool_generic_get(mp, , 1, cache, 1) < 0) {
+   rte_mempool_generic_put(mp, , 1, cache, 1);
return -1;
}
rte_mempool_dump(stdout, mp);

printf("put the objects back\n");
-   rte_mempool_put(mp, obj);
-   rte_mempool_put(mp, obj2);
+   rte_mempool_generic_put(mp, , 1, cache, 1);
+   rte_mempool_generic_put(mp, , 1, cache, 1);
rte_mempool_dump(stdout, mp);

/*
@@ -161,7 +176,7 @@ test_mempool_basic(void)
}

for (i=0; i