Zero-copy access to mempool caches is beneficial for PMD performance, and
must be provided by the mempool library to fix [Bug 1052] without a
performance regression.

[Bug 1052]: https://bugs.dpdk.org/show_bug.cgi?id=1052

v4:
* Fix checkpatch warnings.
v3:
* Bugfix: Respect the cache size; compare to the flush threshold instead
  of RTE_MEMPOOL_CACHE_MAX_SIZE.
* Added 'rewind' function for incomplete 'put' operations. (Konstantin)
* Replace RTE_ASSERTs with runtime checks of the request size.
  Instead of failing, return NULL if the request is too big. (Konstantin)
* Modified comparison to prevent overflow if n is really huge and len is
  non-zero.
* Updated the comments in the code.
v2:
* Fix checkpatch warnings.
* Fix missing registration of trace points.
* The functions are inline, so they don't go into the map file.
v1 changes from the RFC:
* Removed run-time parameter checks. (Honnappa)
  This is a hot fast path function; requiring correct application
  behaviour, i.e. function parameters must be valid.
* Added RTE_ASSERT for parameters instead.
  Code for this is only generated if built with RTE_ENABLE_ASSERT.
* Removed fallback when 'cache' parameter is not set. (Honnappa)
* Chose the simple get function; i.e. do not move the existing objects in
  the cache to the top of the new stack, just leave them at the bottom.
* Renamed the functions. Other suggestions are welcome, of course. ;-)
* Updated the function descriptions.
* Added the functions to trace_fp and version.map.

Signed-off-by: Morten Brørup <m...@smartsharesystems.com>
---
 lib/mempool/mempool_trace_points.c |   9 ++
 lib/mempool/rte_mempool.h          | 165 +++++++++++++++++++++++++++++
 lib/mempool/rte_mempool_trace_fp.h |  23 ++++
 lib/mempool/version.map            |   5 +
 4 files changed, 202 insertions(+)

diff --git a/lib/mempool/mempool_trace_points.c 
b/lib/mempool/mempool_trace_points.c
index 4ad76deb34..83d353a764 100644
--- a/lib/mempool/mempool_trace_points.c
+++ b/lib/mempool/mempool_trace_points.c
@@ -77,3 +77,12 @@ RTE_TRACE_POINT_REGISTER(rte_mempool_trace_ops_free,
 
 RTE_TRACE_POINT_REGISTER(rte_mempool_trace_set_ops_byname,
        lib.mempool.set.ops.byname)
+
+RTE_TRACE_POINT_REGISTER(rte_mempool_trace_cache_zc_put_bulk,
+       lib.mempool.cache.zc.put.bulk)
+
+RTE_TRACE_POINT_REGISTER(rte_mempool_trace_cache_zc_put_rewind,
+       lib.mempool.cache.zc.put.rewind)
+
+RTE_TRACE_POINT_REGISTER(rte_mempool_trace_cache_zc_get_bulk,
+       lib.mempool.cache.zc.get.bulk)
diff --git a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h
index 9f530db24b..00387e7543 100644
--- a/lib/mempool/rte_mempool.h
+++ b/lib/mempool/rte_mempool.h
@@ -47,6 +47,7 @@
 #include <rte_ring.h>
 #include <rte_memcpy.h>
 #include <rte_common.h>
+#include <rte_errno.h>
 
 #include "rte_mempool_trace_fp.h"
 
@@ -1346,6 +1347,170 @@ rte_mempool_cache_flush(struct rte_mempool_cache *cache,
        cache->len = 0;
 }
 
+/**
+ * @warning
+ * @b EXPERIMENTAL: This API may change, or be removed, without prior notice.
+ *
+ * Zero-copy put objects in a user-owned mempool cache backed by the specified 
mempool.
+ *
+ * @param cache
+ *   A pointer to the mempool cache.
+ * @param mp
+ *   A pointer to the mempool.
+ * @param n
+ *   The number of objects to be put in the mempool cache.
+ * @return
+ *   The pointer to where to put the objects in the mempool cache.
+ *   NULL if the request itself is too big for the cache, i.e.
+ *   exceeds the cache flush threshold.
+ */
+__rte_experimental
+static __rte_always_inline void *
+rte_mempool_cache_zc_put_bulk(struct rte_mempool_cache *cache,
+               struct rte_mempool *mp,
+               unsigned int n)
+{
+       void **cache_objs;
+
+       RTE_ASSERT(cache != NULL);
+       RTE_ASSERT(mp != NULL);
+
+       rte_mempool_trace_cache_zc_put_bulk(cache, mp, n);
+
+       if (n <= cache->flushthresh - cache->len) {
+               /*
+                * The objects can be added to the cache without crossing the
+                * flush threshold.
+                */
+               cache_objs = &cache->objs[cache->len];
+               cache->len += n;
+       } else if (likely(n <= cache->flushthresh)) {
+               /*
+                * The request itself fits into the cache.
+                * But first, the cache must be flushed to the backend, so
+                * adding the objects does not cross the flush threshold.
+                */
+               cache_objs = &cache->objs[0];
+               rte_mempool_ops_enqueue_bulk(mp, cache_objs, cache->len);
+               cache->len = n;
+       } else {
+               /* The request itself is too big for the cache. */
+               return NULL;
+       }
+
+       RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
+       RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
+
+       return cache_objs;
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: This API may change, or be removed, without prior notice.
+ *
+ * Zero-copy un-put objects in a user-owned mempool cache.
+ *
+ * @param cache
+ *   A pointer to the mempool cache.
+ * @param n
+ *   The number of objects not put in the mempool cache after calling
+ *   rte_mempool_cache_zc_put_bulk().
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_mempool_cache_zc_put_rewind(struct rte_mempool_cache *cache,
+               unsigned int n)
+{
+       RTE_ASSERT(cache != NULL);
+       RTE_ASSERT(n <= cache->len);
+
+       rte_mempool_trace_cache_zc_put_rewind(cache, n);
+
+       cache->len -= n;
+
+       RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, (int)-n);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: This API may change, or be removed, without prior notice.
+ *
+ * Zero-copy get objects from a user-owned mempool cache backed by the 
specified mempool.
+ *
+ * @param cache
+ *   A pointer to the mempool cache.
+ * @param mp
+ *   A pointer to the mempool.
+ * @param n
+ *   The number of objects to prefetch into the mempool cache.
+ * @return
+ *   The pointer to the objects in the mempool cache.
+ *   NULL on error; i.e. the cache + the pool does not contain 'n' objects.
+ *   With rte_errno set to the error code of the mempool dequeue function,
+ *   or EINVAL if the request itself is too big for the cache, i.e.
+ *   exceeds the cache flush threshold.
+ */
+__rte_experimental
+static __rte_always_inline void *
+rte_mempool_cache_zc_get_bulk(struct rte_mempool_cache *cache,
+               struct rte_mempool *mp,
+               unsigned int n)
+{
+       unsigned int len;
+
+       RTE_ASSERT(cache != NULL);
+       RTE_ASSERT(mp != NULL);
+
+       rte_mempool_trace_cache_zc_get_bulk(cache, mp, n);
+
+       len = cache->len;
+
+       if (n <= len) {
+               /* The request can be satisfied from the cache as is. */
+               len -= n;
+       } else if (likely(n <= cache->flushthresh)) {
+               /*
+                * The request itself can be satisfied from the cache.
+                * But first, the cache must be filled from the backend;
+                * fetch size + requested - len objects.
+                */
+               int ret;
+               const unsigned int size = cache->size;
+
+               ret = rte_mempool_ops_dequeue_bulk(mp, &cache->objs[len], size 
+ n - len);
+               if (unlikely(ret < 0)) {
+                       /*
+                        * We are buffer constrained.
+                        * Do not fill the cache, just satisfy the request.
+                        */
+                       ret = rte_mempool_ops_dequeue_bulk(mp, 
&cache->objs[len], n - len);
+                       if (unlikely(ret < 0)) {
+                               /* Unable to satisfy the request. */
+
+                               RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
+                               RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
+
+                               rte_errno = -ret;
+                               return NULL;
+                       }
+
+                       len = 0;
+               } else
+                       len = size;
+       } else {
+               /* The request itself is too big for the cache. */
+               rte_errno = EINVAL;
+               return NULL;
+       }
+
+       cache->len = len;
+
+       RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
+       RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
+
+       return &cache->objs[len];
+}
+
 /**
  * @internal Put several objects back in the mempool; used internally.
  * @param mp
diff --git a/lib/mempool/rte_mempool_trace_fp.h 
b/lib/mempool/rte_mempool_trace_fp.h
index ed060e887c..14666457f7 100644
--- a/lib/mempool/rte_mempool_trace_fp.h
+++ b/lib/mempool/rte_mempool_trace_fp.h
@@ -109,6 +109,29 @@ RTE_TRACE_POINT_FP(
        rte_trace_point_emit_ptr(mempool);
 )
 
+RTE_TRACE_POINT_FP(
+       rte_mempool_trace_cache_zc_put_bulk,
+       RTE_TRACE_POINT_ARGS(void *cache, void *mempool, uint32_t nb_objs),
+       rte_trace_point_emit_ptr(cache);
+       rte_trace_point_emit_ptr(mempool);
+       rte_trace_point_emit_u32(nb_objs);
+)
+
+RTE_TRACE_POINT_FP(
+       rte_mempool_trace_cache_zc_put_rewind,
+       RTE_TRACE_POINT_ARGS(void *cache, uint32_t nb_objs),
+       rte_trace_point_emit_ptr(cache);
+       rte_trace_point_emit_u32(nb_objs);
+)
+
+RTE_TRACE_POINT_FP(
+       rte_mempool_trace_cache_zc_get_bulk,
+       RTE_TRACE_POINT_ARGS(void *cache, void *mempool, uint32_t nb_objs),
+       rte_trace_point_emit_ptr(cache);
+       rte_trace_point_emit_ptr(mempool);
+       rte_trace_point_emit_u32(nb_objs);
+)
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/mempool/version.map b/lib/mempool/version.map
index b67d7aace7..1383ae6db2 100644
--- a/lib/mempool/version.map
+++ b/lib/mempool/version.map
@@ -63,6 +63,11 @@ EXPERIMENTAL {
        __rte_mempool_trace_ops_alloc;
        __rte_mempool_trace_ops_free;
        __rte_mempool_trace_set_ops_byname;
+
+       # added in 23.03
+       __rte_mempool_trace_cache_zc_put_bulk;
+       __rte_mempool_trace_cache_zc_put_rewind;
+       __rte_mempool_trace_cache_zc_get_bulk;
 };
 
 INTERNAL {
-- 
2.17.1

Reply via email to