Used the ring data structure to implement pool. Also buffer structure was simplified to enable future driver interface. Every buffer includes a packet header, so each buffer can be used as a packet head or segment. Segmentation was disabled and segment size was fixed to a large number (64kB) to limit the number of modification in the commit.
Signed-off-by: Petri Savolainen <petri.savolai...@nokia.com> --- .../include/odp/api/plat/pool_types.h | 6 - .../linux-generic/include/odp_buffer_inlines.h | 160 +-- .../linux-generic/include/odp_buffer_internal.h | 104 +- .../include/odp_classification_datamodel.h | 2 +- .../linux-generic/include/odp_config_internal.h | 34 +- .../linux-generic/include/odp_packet_internal.h | 13 +- platform/linux-generic/include/odp_pool_internal.h | 270 +--- .../linux-generic/include/odp_timer_internal.h | 4 - platform/linux-generic/odp_buffer.c | 8 - platform/linux-generic/odp_classification.c | 25 +- platform/linux-generic/odp_crypto.c | 4 +- platform/linux-generic/odp_packet.c | 99 +- platform/linux-generic/odp_pool.c | 1441 ++++++++------------ platform/linux-generic/odp_timer.c | 1 + platform/linux-generic/pktio/socket.c | 16 +- platform/linux-generic/pktio/socket_mmap.c | 10 +- test/common_plat/performance/odp_pktio_perf.c | 2 +- test/common_plat/performance/odp_scheduling.c | 8 +- test/common_plat/validation/api/packet/packet.c | 8 +- 19 files changed, 746 insertions(+), 1469 deletions(-) diff --git a/platform/linux-generic/include/odp/api/plat/pool_types.h b/platform/linux-generic/include/odp/api/plat/pool_types.h index 1ca8f02..4e39de5 100644 --- a/platform/linux-generic/include/odp/api/plat/pool_types.h +++ b/platform/linux-generic/include/odp/api/plat/pool_types.h @@ -39,12 +39,6 @@ typedef enum odp_pool_type_t { ODP_POOL_TIMEOUT = ODP_EVENT_TIMEOUT, } odp_pool_type_t; -/** Get printable format of odp_pool_t */ -static inline uint64_t odp_pool_to_u64(odp_pool_t hdl) -{ - return _odp_pri(hdl); -} - /** * @} */ diff --git a/platform/linux-generic/include/odp_buffer_inlines.h b/platform/linux-generic/include/odp_buffer_inlines.h index 2b1ab42..2f5eb88 100644 --- a/platform/linux-generic/include/odp_buffer_inlines.h +++ b/platform/linux-generic/include/odp_buffer_inlines.h @@ -18,43 +18,20 @@ extern "C" { #endif #include <odp_buffer_internal.h> -#include <odp_pool_internal.h> -static inline odp_buffer_t odp_buffer_encode_handle(odp_buffer_hdr_t *hdr) -{ - odp_buffer_bits_t handle; - uint32_t pool_id = pool_handle_to_index(hdr->pool_hdl); - struct pool_entry_s *pool = get_pool_entry(pool_id); +odp_event_type_t _odp_buffer_event_type(odp_buffer_t buf); +void _odp_buffer_event_type_set(odp_buffer_t buf, int ev); +int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf); - handle.handle = 0; - handle.pool_id = pool_id; - handle.index = ((uint8_t *)hdr - pool->pool_mdata_addr) / - ODP_CACHE_LINE_SIZE; - handle.seg = 0; - - return handle.handle; -} +void *buffer_map(odp_buffer_hdr_t *buf, uint32_t offset, uint32_t *seglen, + uint32_t limit); static inline odp_buffer_t odp_hdr_to_buf(odp_buffer_hdr_t *hdr) { return hdr->handle.handle; } -static inline odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf) -{ - odp_buffer_bits_t handle; - uint32_t pool_id; - uint32_t index; - struct pool_entry_s *pool; - - handle.handle = buf; - pool_id = handle.pool_id; - index = handle.index; - pool = get_pool_entry(pool_id); - - return (odp_buffer_hdr_t *)(void *) - (pool->pool_mdata_addr + (index * ODP_CACHE_LINE_SIZE)); -} +odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf); static inline uint32_t pool_id_from_buf(odp_buffer_t buf) { @@ -64,131 +41,6 @@ static inline uint32_t pool_id_from_buf(odp_buffer_t buf) return handle.pool_id; } -static inline odp_buffer_hdr_t *validate_buf(odp_buffer_t buf) -{ - odp_buffer_bits_t handle; - odp_buffer_hdr_t *buf_hdr; - handle.handle = buf; - - /* For buffer handles, segment index must be 0 and pool id in range */ - if (handle.seg != 0 || handle.pool_id >= ODP_CONFIG_POOLS) - return NULL; - - pool_entry_t *pool = - odp_pool_to_entry(_odp_cast_scalar(odp_pool_t, - handle.pool_id)); - - /* If pool not created, handle is invalid */ - if (pool->s.pool_shm == ODP_SHM_INVALID) - return NULL; - - uint32_t buf_stride = pool->s.buf_stride / ODP_CACHE_LINE_SIZE; - - /* A valid buffer index must be on stride, and must be in range */ - if ((handle.index % buf_stride != 0) || - ((uint32_t)(handle.index / buf_stride) >= pool->s.params.buf.num)) - return NULL; - - buf_hdr = (odp_buffer_hdr_t *)(void *) - (pool->s.pool_mdata_addr + - (handle.index * ODP_CACHE_LINE_SIZE)); - - /* Handle is valid, so buffer is valid if it is allocated */ - return buf_hdr->allocator == ODP_FREEBUF ? NULL : buf_hdr; -} - -int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf); - -static inline void *buffer_map(odp_buffer_hdr_t *buf, - uint32_t offset, - uint32_t *seglen, - uint32_t limit) -{ - int seg_index; - int seg_offset; - - if (odp_likely(offset < buf->segsize)) { - seg_index = 0; - seg_offset = offset; - } else { - seg_index = offset / buf->segsize; - seg_offset = offset % buf->segsize; - } - if (seglen != NULL) { - uint32_t buf_left = limit - offset; - *seglen = seg_offset + buf_left <= buf->segsize ? - buf_left : buf->segsize - seg_offset; - } - - return (void *)(seg_offset + (uint8_t *)buf->addr[seg_index]); -} - -static inline odp_buffer_seg_t segment_next(odp_buffer_hdr_t *buf, - odp_buffer_seg_t seg) -{ - odp_buffer_bits_t seghandle; - seghandle.handle = (odp_buffer_t)seg; - - if (seg == ODP_SEGMENT_INVALID || - seghandle.prefix != buf->handle.prefix || - seghandle.seg >= buf->segcount - 1) - return ODP_SEGMENT_INVALID; - else { - seghandle.seg++; - return (odp_buffer_seg_t)seghandle.handle; - } -} - -static inline void *segment_map(odp_buffer_hdr_t *buf, - odp_buffer_seg_t seg, - uint32_t *seglen, - uint32_t limit, - uint32_t hr) -{ - uint32_t seg_offset, buf_left; - odp_buffer_bits_t seghandle; - uint8_t *seg_addr; - seghandle.handle = (odp_buffer_t)seg; - - if (seghandle.prefix != buf->handle.prefix || - seghandle.seg >= buf->segcount) - return NULL; - - seg_addr = (uint8_t *)buf->addr[seghandle.seg]; - seg_offset = seghandle.seg * buf->segsize; - limit += hr; - - /* Can't map this segment if it's nothing but headroom or tailroom */ - if (hr >= seg_offset + buf->segsize || seg_offset > limit) - return NULL; - - /* Adjust address & offset if this segment contains any headroom */ - if (hr > seg_offset) { - seg_addr += hr % buf->segsize; - seg_offset += hr % buf->segsize; - } - - /* Set seglen if caller is asking for it */ - if (seglen != NULL) { - buf_left = limit - seg_offset; - *seglen = buf_left < buf->segsize ? buf_left : - (seg_offset >= buf->segsize ? buf->segsize : - buf->segsize - seg_offset); - } - - return (void *)seg_addr; -} - -static inline odp_event_type_t _odp_buffer_event_type(odp_buffer_t buf) -{ - return odp_buf_to_hdr(buf)->event_type; -} - -static inline void _odp_buffer_event_type_set(odp_buffer_t buf, int ev) -{ - odp_buf_to_hdr(buf)->event_type = ev; -} - #ifdef __cplusplus } #endif diff --git a/platform/linux-generic/include/odp_buffer_internal.h b/platform/linux-generic/include/odp_buffer_internal.h index 1c09cd3..abe8591 100644 --- a/platform/linux-generic/include/odp_buffer_internal.h +++ b/platform/linux-generic/include/odp_buffer_internal.h @@ -33,72 +33,19 @@ extern "C" { #include <odp_schedule_if.h> #include <stddef.h> -#define ODP_BITSIZE(x) \ - ((x) <= 2 ? 1 : \ - ((x) <= 4 ? 2 : \ - ((x) <= 8 ? 3 : \ - ((x) <= 16 ? 4 : \ - ((x) <= 32 ? 5 : \ - ((x) <= 64 ? 6 : \ - ((x) <= 128 ? 7 : \ - ((x) <= 256 ? 8 : \ - ((x) <= 512 ? 9 : \ - ((x) <= 1024 ? 10 : \ - ((x) <= 2048 ? 11 : \ - ((x) <= 4096 ? 12 : \ - ((x) <= 8196 ? 13 : \ - ((x) <= 16384 ? 14 : \ - ((x) <= 32768 ? 15 : \ - ((x) <= 65536 ? 16 : \ - (0/0))))))))))))))))) - ODP_STATIC_ASSERT(ODP_CONFIG_PACKET_SEG_LEN_MIN >= 256, "ODP Segment size must be a minimum of 256 bytes"); -ODP_STATIC_ASSERT((ODP_CONFIG_PACKET_BUF_LEN_MAX % - ODP_CONFIG_PACKET_SEG_LEN_MIN) == 0, - "Packet max size must be a multiple of segment size"); - -#define ODP_BUFFER_MAX_SEG \ - (ODP_CONFIG_PACKET_BUF_LEN_MAX / ODP_CONFIG_PACKET_SEG_LEN_MIN) - -/* We can optimize storage of small raw buffers within metadata area */ -#define ODP_MAX_INLINE_BUF ((sizeof(void *)) * (ODP_BUFFER_MAX_SEG - 1)) - -#define ODP_BUFFER_POOL_BITS ODP_BITSIZE(ODP_CONFIG_POOLS) -#define ODP_BUFFER_SEG_BITS ODP_BITSIZE(ODP_BUFFER_MAX_SEG) -#define ODP_BUFFER_INDEX_BITS (32 - ODP_BUFFER_POOL_BITS - ODP_BUFFER_SEG_BITS) -#define ODP_BUFFER_PREFIX_BITS (ODP_BUFFER_POOL_BITS + ODP_BUFFER_INDEX_BITS) -#define ODP_BUFFER_MAX_POOLS (1 << ODP_BUFFER_POOL_BITS) -#define ODP_BUFFER_MAX_BUFFERS (1 << ODP_BUFFER_INDEX_BITS) - -#define ODP_BUFFER_MAX_INDEX (ODP_BUFFER_MAX_BUFFERS - 2) -#define ODP_BUFFER_INVALID_INDEX (ODP_BUFFER_MAX_BUFFERS - 1) typedef union odp_buffer_bits_t { - odp_buffer_t handle; + odp_buffer_t handle; + union { - uint32_t u32; - struct { -#if ODP_BYTE_ORDER == ODP_BIG_ENDIAN - uint32_t pool_id:ODP_BUFFER_POOL_BITS; - uint32_t index:ODP_BUFFER_INDEX_BITS; - uint32_t seg:ODP_BUFFER_SEG_BITS; -#else - uint32_t seg:ODP_BUFFER_SEG_BITS; - uint32_t index:ODP_BUFFER_INDEX_BITS; - uint32_t pool_id:ODP_BUFFER_POOL_BITS; -#endif - }; + uint32_t u32; struct { -#if ODP_BYTE_ORDER == ODP_BIG_ENDIAN - uint32_t prefix:ODP_BUFFER_PREFIX_BITS; - uint32_t pfxseg:ODP_BUFFER_SEG_BITS; -#else - uint32_t pfxseg:ODP_BUFFER_SEG_BITS; - uint32_t prefix:ODP_BUFFER_PREFIX_BITS; -#endif + uint32_t pool_id: 8; + uint32_t index: 24; }; }; } odp_buffer_bits_t; @@ -125,7 +72,7 @@ struct odp_buffer_hdr_t { uint32_t sustain:1; /* Sustain order */ }; } flags; - int16_t allocator; /* allocating thread id */ + int8_t type; /* buffer type */ odp_event_type_t event_type; /* for reuse as event */ uint32_t size; /* max data size */ @@ -139,7 +86,8 @@ struct odp_buffer_hdr_t { uint32_t uarea_size; /* size of user area */ uint32_t segcount; /* segment count */ uint32_t segsize; /* segment size */ - void *addr[ODP_BUFFER_MAX_SEG]; /* block addrs */ + /* block addrs */ + void *addr[ODP_CONFIG_PACKET_MAX_SEGS]; uint64_t order; /* sequence for ordered queues */ queue_entry_t *origin_qe; /* ordered queue origin */ union { @@ -149,39 +97,17 @@ struct odp_buffer_hdr_t { #ifdef _ODP_PKTIO_IPC /* ipc mapped process can not walk over pointers, * offset has to be used */ - uint64_t ipc_addr_offset[ODP_BUFFER_MAX_SEG]; + uint64_t ipc_addr_offset[ODP_CONFIG_PACKET_MAX_SEGS]; #endif -}; - -/** @internal Compile time assert that the - * allocator field can handle any allocator id*/ -ODP_STATIC_ASSERT(INT16_MAX >= ODP_THREAD_COUNT_MAX, - "ODP_BUFFER_HDR_T__ALLOCATOR__SIZE_ERROR"); - -typedef struct odp_buffer_hdr_stride { - uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_buffer_hdr_t))]; -} odp_buffer_hdr_stride; -typedef struct odp_buf_blk_t { - struct odp_buf_blk_t *next; - struct odp_buf_blk_t *prev; -} odp_buf_blk_t; - -/* Raw buffer header */ -typedef struct { - odp_buffer_hdr_t buf_hdr; /* common buffer header */ -} odp_raw_buffer_hdr_t; - -/* Free buffer marker */ -#define ODP_FREEBUF -1 + /* Data or next header */ + uint8_t data[0]; +}; /* Forward declarations */ -odp_buffer_t buffer_alloc(odp_pool_t pool, size_t size); -int buffer_alloc_multi(odp_pool_t pool_hdl, size_t size, - odp_buffer_t buf[], int num); -void buffer_free(uint32_t pool_id, const odp_buffer_t buf); -void buffer_free_multi(uint32_t pool_id, - const odp_buffer_t buf[], int num_free); +int buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], int num); +void buffer_free_multi(const odp_buffer_t buf[], int num_free); + int seg_alloc_head(odp_buffer_hdr_t *buf_hdr, int segcount); void seg_free_head(odp_buffer_hdr_t *buf_hdr, int segcount); int seg_alloc_tail(odp_buffer_hdr_t *buf_hdr, int segcount); diff --git a/platform/linux-generic/include/odp_classification_datamodel.h b/platform/linux-generic/include/odp_classification_datamodel.h index dc2190d..8505c67 100644 --- a/platform/linux-generic/include/odp_classification_datamodel.h +++ b/platform/linux-generic/include/odp_classification_datamodel.h @@ -77,7 +77,7 @@ Class Of Service */ struct cos_s { queue_entry_t *queue; /* Associated Queue */ - pool_entry_t *pool; /* Associated Buffer pool */ + odp_pool_t pool; /* Associated Buffer pool */ union pmr_u *pmr[ODP_PMR_PER_COS_MAX]; /* Chained PMR */ union cos_u *linked_cos[ODP_PMR_PER_COS_MAX]; /* Chained CoS with PMR*/ uint32_t valid; /* validity Flag */ diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h index e1bab20..e24d5ab 100644 --- a/platform/linux-generic/include/odp_config_internal.h +++ b/platform/linux-generic/include/odp_config_internal.h @@ -32,7 +32,7 @@ extern "C" { * This defines the minimum supported buffer alignment. Requests for values * below this will be rounded up to this value. */ -#define ODP_CONFIG_BUFFER_ALIGN_MIN 16 +#define ODP_CONFIG_BUFFER_ALIGN_MIN 64 /* * Maximum buffer alignment @@ -70,16 +70,7 @@ extern "C" { /* * Maximum number of segments per packet */ -#define ODP_CONFIG_PACKET_MAX_SEGS 6 - -/* - * Minimum packet segment length - * - * This defines the minimum packet segment buffer length in bytes. The user - * defined segment length (seg_len in odp_pool_param_t) will be rounded up into - * this value. - */ -#define ODP_CONFIG_PACKET_SEG_LEN_MIN 1598 +#define ODP_CONFIG_PACKET_MAX_SEGS 1 /* * Maximum packet segment length @@ -91,6 +82,15 @@ extern "C" { #define ODP_CONFIG_PACKET_SEG_LEN_MAX (64 * 1024) /* + * Minimum packet segment length + * + * This defines the minimum packet segment buffer length in bytes. The user + * defined segment length (seg_len in odp_pool_param_t) will be rounded up into + * this value. + */ +#define ODP_CONFIG_PACKET_SEG_LEN_MIN ODP_CONFIG_PACKET_SEG_LEN_MAX + +/* * Maximum packet buffer length * * This defines the maximum number of bytes that can be stored into a packet @@ -102,7 +102,7 @@ extern "C" { * - The value MUST be an integral number of segments * - The value SHOULD be large enough to accommodate jumbo packets (9K) */ -#define ODP_CONFIG_PACKET_BUF_LEN_MAX (ODP_CONFIG_PACKET_SEG_LEN_MIN * 6) +#define ODP_CONFIG_PACKET_BUF_LEN_MAX ODP_CONFIG_PACKET_SEG_LEN_MAX /* Maximum number of shared memory blocks. * @@ -133,6 +133,16 @@ extern "C" { */ #define CONFIG_BURST_SIZE 16 +/* + * Maximum number of events in a pool + */ +#define CONFIG_POOL_MAX_NUM (1 * 1024 * 1024) + +/* + * Maximum number of events in a thread local pool cache + */ +#define CONFIG_POOL_CACHE_SIZE 256 + #ifdef __cplusplus } #endif diff --git a/platform/linux-generic/include/odp_packet_internal.h b/platform/linux-generic/include/odp_packet_internal.h index 392d670..8ad664b 100644 --- a/platform/linux-generic/include/odp_packet_internal.h +++ b/platform/linux-generic/include/odp_packet_internal.h @@ -174,11 +174,10 @@ typedef struct { odp_time_t timestamp; /**< Timestamp value */ odp_crypto_generic_op_result_t op_result; /**< Result for crypto */ -} odp_packet_hdr_t; -typedef struct odp_packet_hdr_stride { - uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_packet_hdr_t))]; -} odp_packet_hdr_stride; + /* Packet data storage */ + uint8_t data[0]; +} odp_packet_hdr_t; /** * Return the packet header @@ -233,7 +232,8 @@ static inline int push_head_seg(odp_packet_hdr_t *pkt_hdr, size_t len) (len - pkt_hdr->headroom + pkt_hdr->buf_hdr.segsize - 1) / pkt_hdr->buf_hdr.segsize; - if (pkt_hdr->buf_hdr.segcount + extrasegs > ODP_BUFFER_MAX_SEG || + if (pkt_hdr->buf_hdr.segcount + extrasegs > + ODP_CONFIG_PACKET_MAX_SEGS || seg_alloc_head(&pkt_hdr->buf_hdr, extrasegs)) return -1; @@ -261,7 +261,8 @@ static inline int push_tail_seg(odp_packet_hdr_t *pkt_hdr, size_t len) (len - pkt_hdr->tailroom + pkt_hdr->buf_hdr.segsize - 1) / pkt_hdr->buf_hdr.segsize; - if (pkt_hdr->buf_hdr.segcount + extrasegs > ODP_BUFFER_MAX_SEG || + if (pkt_hdr->buf_hdr.segcount + extrasegs > + ODP_CONFIG_PACKET_MAX_SEGS || seg_alloc_tail(&pkt_hdr->buf_hdr, extrasegs)) return -1; diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h index ca59ade..278c553 100644 --- a/platform/linux-generic/include/odp_pool_internal.h +++ b/platform/linux-generic/include/odp_pool_internal.h @@ -18,240 +18,78 @@ extern "C" { #endif -#include <odp/api/std_types.h> -#include <odp/api/align.h> -#include <odp_align_internal.h> -#include <odp/api/pool.h> -#include <odp_buffer_internal.h> -#include <odp/api/hints.h> -#include <odp_config_internal.h> -#include <odp/api/debug.h> #include <odp/api/shared_memory.h> -#include <odp/api/atomic.h> -#include <odp/api/thread.h> -#include <string.h> - -/** - * Buffer initialization routine prototype - * - * @note Routines of this type MAY be passed as part of the - * _odp_buffer_pool_init_t structure to be called whenever a - * buffer is allocated to initialize the user metadata - * associated with that buffer. - */ -typedef void (_odp_buf_init_t)(odp_buffer_t buf, void *buf_init_arg); +#include <odp/api/ticketlock.h> -/** - * Buffer pool initialization parameters - * Used to communicate buffer pool initialization options. Internal for now. - */ -typedef struct _odp_buffer_pool_init_t { - size_t udata_size; /**< Size of user metadata for each buffer */ - _odp_buf_init_t *buf_init; /**< Buffer initialization routine to use */ - void *buf_init_arg; /**< Argument to be passed to buf_init() */ -} _odp_buffer_pool_init_t; /**< Type of buffer initialization struct */ - -#define POOL_MAX_LOCAL_CHUNKS 4 -#define POOL_CHUNK_SIZE (4 * CONFIG_BURST_SIZE) -#define POOL_MAX_LOCAL_BUFS (POOL_MAX_LOCAL_CHUNKS * POOL_CHUNK_SIZE) - -struct local_cache_s { - uint64_t bufallocs; /* Local buffer alloc count */ - uint64_t buffrees; /* Local buffer free count */ - - uint32_t num_buf; - odp_buffer_hdr_t *buf[POOL_MAX_LOCAL_BUFS]; -}; +#include <odp_buffer_internal.h> +#include <odp_config_internal.h> +#include <odp_ring_internal.h> -/* Local cache for buffer alloc/free acceleration */ -typedef struct local_cache_t { - union { - struct local_cache_s s; +typedef struct pool_cache_t { + uint32_t num; - uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP( - sizeof(struct local_cache_s))]; - }; -} local_cache_t; + odp_buffer_t buf[CONFIG_POOL_CACHE_SIZE]; -#include <odp/api/plat/ticketlock_inlines.h> -#define POOL_LOCK(a) _odp_ticketlock_lock(a) -#define POOL_UNLOCK(a) _odp_ticketlock_unlock(a) -#define POOL_LOCK_INIT(a) odp_ticketlock_init(a) +} pool_cache_t ODP_ALIGNED_CACHE; -/** - * ODP Pool stats - Maintain some useful stats regarding pool utilization - */ +/* Buffer header ring */ typedef struct { - odp_atomic_u64_t bufallocs; /**< Count of successful buf allocs */ - odp_atomic_u64_t buffrees; /**< Count of successful buf frees */ - odp_atomic_u64_t blkallocs; /**< Count of successful blk allocs */ - odp_atomic_u64_t blkfrees; /**< Count of successful blk frees */ - odp_atomic_u64_t bufempty; /**< Count of unsuccessful buf allocs */ - odp_atomic_u64_t blkempty; /**< Count of unsuccessful blk allocs */ - odp_atomic_u64_t buf_high_wm_count; /**< Count of high buf wm conditions */ - odp_atomic_u64_t buf_low_wm_count; /**< Count of low buf wm conditions */ - odp_atomic_u64_t blk_high_wm_count; /**< Count of high blk wm conditions */ - odp_atomic_u64_t blk_low_wm_count; /**< Count of low blk wm conditions */ -} _odp_pool_stats_t; - -struct pool_entry_s { - odp_ticketlock_t lock ODP_ALIGNED_CACHE; - odp_ticketlock_t buf_lock; - odp_ticketlock_t blk_lock; - - char name[ODP_POOL_NAME_LEN]; - odp_pool_param_t params; - uint32_t udata_size; - odp_pool_t pool_hdl; - uint32_t pool_id; - odp_shm_t pool_shm; - union { - uint32_t all; - struct { - uint32_t has_name:1; - uint32_t user_supplied_shm:1; - uint32_t unsegmented:1; - uint32_t zeroized:1; - uint32_t predefined:1; - }; - } flags; - uint32_t quiesced; - uint32_t buf_low_wm_assert; - uint32_t blk_low_wm_assert; - uint8_t *pool_base_addr; - uint8_t *pool_mdata_addr; - size_t pool_size; - uint32_t buf_align; - uint32_t buf_stride; - odp_buffer_hdr_t *buf_freelist; - void *blk_freelist; - odp_atomic_u32_t bufcount; - odp_atomic_u32_t blkcount; - _odp_pool_stats_t poolstats; - uint32_t buf_num; - uint32_t seg_size; - uint32_t blk_size; - uint32_t buf_high_wm; - uint32_t buf_low_wm; - uint32_t blk_high_wm; - uint32_t blk_low_wm; - uint32_t headroom; - uint32_t tailroom; - - local_cache_t local_cache[ODP_THREAD_COUNT_MAX] ODP_ALIGNED_CACHE; -}; - -typedef union pool_entry_u { - struct pool_entry_s s; - - uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct pool_entry_s))]; -} pool_entry_t; - -extern void *pool_entry_ptr[]; - -#if defined(ODP_CONFIG_SECURE_POOLS) && (ODP_CONFIG_SECURE_POOLS == 1) -#define buffer_is_secure(buf) (buf->flags.zeroized) -#define pool_is_secure(pool) (pool->flags.zeroized) -#else -#define buffer_is_secure(buf) 0 -#define pool_is_secure(pool) 0 -#endif - -static inline void *get_blk(struct pool_entry_s *pool) -{ - void *myhead; - uint64_t blkcount; - - POOL_LOCK(&pool->blk_lock); - - myhead = pool->blk_freelist; - - if (odp_unlikely(myhead == NULL)) { - POOL_UNLOCK(&pool->blk_lock); - odp_atomic_inc_u64(&pool->poolstats.blkempty); - } else { - pool->blk_freelist = ((odp_buf_blk_t *)myhead)->next; - POOL_UNLOCK(&pool->blk_lock); - blkcount = odp_atomic_fetch_sub_u32(&pool->blkcount, 1) - 1; - - /* Check for low watermark condition */ - if (blkcount == pool->blk_low_wm && !pool->blk_low_wm_assert) { - pool->blk_low_wm_assert = 1; - odp_atomic_inc_u64(&pool->poolstats.blk_low_wm_count); - } - - odp_atomic_inc_u64(&pool->poolstats.blkallocs); - } - - return myhead; -} - -static inline void ret_blk(struct pool_entry_s *pool, void *block) + /* Ring header */ + ring_t hdr; + + /* Ring data: buffer handles */ + uint32_t buf[CONFIG_POOL_MAX_NUM]; + +} pool_ring_t ODP_ALIGNED_CACHE; + +typedef struct pool_t { + odp_ticketlock_t lock ODP_ALIGNED_CACHE; + + char name[ODP_POOL_NAME_LEN]; + odp_pool_param_t params; + odp_pool_t pool_hdl; + uint32_t pool_idx; + uint32_t ring_mask; + odp_shm_t shm; + odp_shm_t uarea_shm; + int reserved; + uint32_t num; + uint32_t align; + uint32_t headroom; + uint32_t tailroom; + uint32_t data_size; + uint32_t max_len; + uint32_t max_seg_len; + uint32_t uarea_size; + uint32_t block_size; + uint32_t shm_size; + uint32_t uarea_shm_size; + uint8_t *base_addr; + uint8_t *uarea_base_addr; + + pool_cache_t local_cache[ODP_THREAD_COUNT_MAX]; + + pool_ring_t ring; + +} pool_t; + +pool_t *pool_entry(uint32_t pool_idx); + +static inline pool_t *odp_pool_to_entry(odp_pool_t pool_hdl) { - uint64_t blkcount; - - POOL_LOCK(&pool->blk_lock); - - ((odp_buf_blk_t *)block)->next = pool->blk_freelist; - pool->blk_freelist = block; - - POOL_UNLOCK(&pool->blk_lock); - - blkcount = odp_atomic_fetch_add_u32(&pool->blkcount, 1); - - /* Check if low watermark condition should be deasserted */ - if (blkcount == pool->blk_high_wm && pool->blk_low_wm_assert) { - pool->blk_low_wm_assert = 0; - odp_atomic_inc_u64(&pool->poolstats.blk_high_wm_count); - } - - odp_atomic_inc_u64(&pool->poolstats.blkfrees); -} - -static inline odp_pool_t pool_index_to_handle(uint32_t pool_id) -{ - return _odp_cast_scalar(odp_pool_t, pool_id); -} - -static inline uint32_t pool_handle_to_index(odp_pool_t pool_hdl) -{ - return _odp_typeval(pool_hdl); -} - -static inline void *get_pool_entry(uint32_t pool_id) -{ - return pool_entry_ptr[pool_id]; -} - -static inline pool_entry_t *odp_pool_to_entry(odp_pool_t pool) -{ - return (pool_entry_t *)get_pool_entry(pool_handle_to_index(pool)); -} - -static inline pool_entry_t *odp_buf_to_pool(odp_buffer_hdr_t *buf) -{ - return odp_pool_to_entry(buf->pool_hdl); -} - -static inline uint32_t odp_buffer_pool_segment_size(odp_pool_t pool) -{ - return odp_pool_to_entry(pool)->s.seg_size; + return pool_entry(_odp_typeval(pool_hdl)); } static inline uint32_t odp_buffer_pool_headroom(odp_pool_t pool) { - return odp_pool_to_entry(pool)->s.headroom; + return odp_pool_to_entry(pool)->headroom; } static inline uint32_t odp_buffer_pool_tailroom(odp_pool_t pool) { - return odp_pool_to_entry(pool)->s.tailroom; + return odp_pool_to_entry(pool)->tailroom; } -odp_pool_t _pool_create(const char *name, - odp_pool_param_t *params, - uint32_t shmflags); - #ifdef __cplusplus } #endif diff --git a/platform/linux-generic/include/odp_timer_internal.h b/platform/linux-generic/include/odp_timer_internal.h index b1cd73f..91b12c5 100644 --- a/platform/linux-generic/include/odp_timer_internal.h +++ b/platform/linux-generic/include/odp_timer_internal.h @@ -35,8 +35,4 @@ typedef struct { odp_timer_t timer; } odp_timeout_hdr_t; -typedef struct odp_timeout_hdr_stride { - uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_timeout_hdr_t))]; -} odp_timeout_hdr_stride; - #endif diff --git a/platform/linux-generic/odp_buffer.c b/platform/linux-generic/odp_buffer.c index ce2fdba..0ddaf95 100644 --- a/platform/linux-generic/odp_buffer.c +++ b/platform/linux-generic/odp_buffer.c @@ -31,7 +31,6 @@ void *odp_buffer_addr(odp_buffer_t buf) return hdr->addr[0]; } - uint32_t odp_buffer_size(odp_buffer_t buf) { odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf); @@ -39,12 +38,6 @@ uint32_t odp_buffer_size(odp_buffer_t buf) return hdr->size; } -int odp_buffer_is_valid(odp_buffer_t buf) -{ - return validate_buf(buf) != NULL; -} - - int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf) { odp_buffer_hdr_t *hdr; @@ -72,7 +65,6 @@ int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf) return len; } - void odp_buffer_print(odp_buffer_t buf) { int max_len = 512; diff --git a/platform/linux-generic/odp_classification.c b/platform/linux-generic/odp_classification.c index ea223bf..99301af 100644 --- a/platform/linux-generic/odp_classification.c +++ b/platform/linux-generic/odp_classification.c @@ -16,7 +16,6 @@ #include <odp_classification_datamodel.h> #include <odp_classification_inlines.h> #include <odp_classification_internal.h> -#include <odp_pool_internal.h> #include <odp/api/shared_memory.h> #include <protocols/eth.h> #include <protocols/ip.h> @@ -159,7 +158,6 @@ odp_cos_t odp_cls_cos_create(const char *name, odp_cls_cos_param_t *param) { int i, j; queue_entry_t *queue; - pool_entry_t *pool; odp_cls_drop_t drop_policy; /* Packets are dropped if Queue or Pool is invalid*/ @@ -168,11 +166,6 @@ odp_cos_t odp_cls_cos_create(const char *name, odp_cls_cos_param_t *param) else queue = queue_to_qentry(param->queue); - if (param->pool == ODP_POOL_INVALID) - pool = NULL; - else - pool = odp_pool_to_entry(param->pool); - drop_policy = param->drop_policy; for (i = 0; i < ODP_COS_MAX_ENTRY; i++) { @@ -186,7 +179,7 @@ odp_cos_t odp_cls_cos_create(const char *name, odp_cls_cos_param_t *param) cos_tbl->cos_entry[i].s.linked_cos[j] = NULL; } cos_tbl->cos_entry[i].s.queue = queue; - cos_tbl->cos_entry[i].s.pool = pool; + cos_tbl->cos_entry[i].s.pool = param->pool; cos_tbl->cos_entry[i].s.flow_set = 0; cos_tbl->cos_entry[i].s.headroom = 0; cos_tbl->cos_entry[i].s.valid = 1; @@ -550,7 +543,7 @@ odp_pmr_t odp_cls_pmr_create(const odp_pmr_param_t *terms, int num_terms, return id; } -int odp_cls_cos_pool_set(odp_cos_t cos_id, odp_pool_t pool_id) +int odp_cls_cos_pool_set(odp_cos_t cos_id, odp_pool_t pool) { cos_t *cos; @@ -560,10 +553,7 @@ int odp_cls_cos_pool_set(odp_cos_t cos_id, odp_pool_t pool_id) return -1; } - if (pool_id == ODP_POOL_INVALID) - cos->s.pool = NULL; - else - cos->s.pool = odp_pool_to_entry(pool_id); + cos->s.pool = pool; return 0; } @@ -578,10 +568,7 @@ odp_pool_t odp_cls_cos_pool(odp_cos_t cos_id) return ODP_POOL_INVALID; } - if (!cos->s.pool) - return ODP_POOL_INVALID; - - return cos->s.pool->s.pool_hdl; + return cos->s.pool; } int verify_pmr(pmr_t *pmr, const uint8_t *pkt_addr, odp_packet_hdr_t *pkt_hdr) @@ -827,10 +814,10 @@ int cls_classify_packet(pktio_entry_t *entry, const uint8_t *base, if (cos == NULL) return -EINVAL; - if (cos->s.queue == NULL || cos->s.pool == NULL) + if (cos->s.queue == NULL || cos->s.pool == ODP_POOL_INVALID) return -EFAULT; - *pool = cos->s.pool->s.pool_hdl; + *pool = cos->s.pool; pkt_hdr->p.input_flags.dst_queue = 1; pkt_hdr->dst_queue = cos->s.queue->s.handle; diff --git a/platform/linux-generic/odp_crypto.c b/platform/linux-generic/odp_crypto.c index c7431e6..8935b0d 100644 --- a/platform/linux-generic/odp_crypto.c +++ b/platform/linux-generic/odp_crypto.c @@ -40,7 +40,9 @@ static odp_crypto_global_t *global; static odp_crypto_generic_op_result_t *get_op_result_from_event(odp_event_t ev) { - return &(odp_packet_hdr(odp_packet_from_event(ev))->op_result); + odp_packet_hdr_t *hdr = odp_packet_hdr(odp_packet_from_event(ev)); + + return &hdr->op_result; } static diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c index c4cf324..03769f6 100644 --- a/platform/linux-generic/odp_packet.c +++ b/platform/linux-generic/odp_packet.c @@ -47,7 +47,7 @@ void packet_parse_reset(odp_packet_hdr_t *pkt_hdr) /** * Initialize packet */ -static void packet_init(pool_entry_t *pool, odp_packet_hdr_t *pkt_hdr, +static void packet_init(pool_t *pool, odp_packet_hdr_t *pkt_hdr, size_t size, int parse) { pkt_hdr->p.input_flags.all = 0; @@ -68,10 +68,8 @@ static void packet_init(pool_entry_t *pool, odp_packet_hdr_t *pkt_hdr, * segment occupied by the allocated length. */ pkt_hdr->frame_len = size; - pkt_hdr->headroom = pool->s.headroom; - pkt_hdr->tailroom = - (pool->s.seg_size * pkt_hdr->buf_hdr.segcount) - - (pool->s.headroom + size); + pkt_hdr->headroom = pool->headroom; + pkt_hdr->tailroom = pool->data_size - size + pool->tailroom; pkt_hdr->input = ODP_PKTIO_INVALID; } @@ -80,10 +78,10 @@ int packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len, odp_packet_t pkt[], int max_num) { odp_packet_hdr_t *pkt_hdr; - pool_entry_t *pool = odp_pool_to_entry(pool_hdl); + pool_t *pool = odp_pool_to_entry(pool_hdl); int num, i; - num = buffer_alloc_multi(pool_hdl, len, (odp_buffer_t *)pkt, max_num); + num = buffer_alloc_multi(pool_hdl, (odp_buffer_t *)pkt, max_num); for (i = 0; i < num; i++) { pkt_hdr = odp_packet_hdr(pkt[i]); @@ -98,18 +96,22 @@ int packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len, odp_packet_t odp_packet_alloc(odp_pool_t pool_hdl, uint32_t len) { - pool_entry_t *pool = odp_pool_to_entry(pool_hdl); - size_t pkt_size = len ? len : pool->s.params.buf.size; + pool_t *pool = odp_pool_to_entry(pool_hdl); + size_t pkt_size = len ? len : pool->data_size; odp_packet_t pkt; odp_packet_hdr_t *pkt_hdr; + int ret; - if (pool->s.params.type != ODP_POOL_PACKET) { + if (odp_unlikely(pool->params.type != ODP_POOL_PACKET)) { __odp_errno = EINVAL; return ODP_PACKET_INVALID; } - pkt = (odp_packet_t)buffer_alloc(pool_hdl, pkt_size); - if (pkt == ODP_PACKET_INVALID) + if (odp_unlikely(len > pool->max_len)) + return ODP_PACKET_INVALID; + + ret = buffer_alloc_multi(pool_hdl, (odp_buffer_t *)&pkt, 1); + if (ret != 1) return ODP_PACKET_INVALID; pkt_hdr = odp_packet_hdr(pkt); @@ -126,17 +128,19 @@ odp_packet_t odp_packet_alloc(odp_pool_t pool_hdl, uint32_t len) int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len, odp_packet_t pkt[], int num) { - pool_entry_t *pool = odp_pool_to_entry(pool_hdl); - size_t pkt_size = len ? len : pool->s.params.buf.size; + pool_t *pool = odp_pool_to_entry(pool_hdl); + size_t pkt_size = len ? len : pool->data_size; int count, i; - if (pool->s.params.type != ODP_POOL_PACKET) { + if (odp_unlikely(pool->params.type != ODP_POOL_PACKET)) { __odp_errno = EINVAL; return -1; } - count = buffer_alloc_multi(pool_hdl, pkt_size, - (odp_buffer_t *)pkt, num); + if (odp_unlikely(len > pool->max_len)) + return -1; + + count = buffer_alloc_multi(pool_hdl, (odp_buffer_t *)pkt, num); for (i = 0; i < count; ++i) { odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt[i]); @@ -154,25 +158,20 @@ int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len, void odp_packet_free(odp_packet_t pkt) { - uint32_t pool_id = pool_id_from_buf((odp_buffer_t)pkt); - - buffer_free(pool_id, (odp_buffer_t)pkt); + buffer_free_multi((odp_buffer_t *)&pkt, 1); } void odp_packet_free_multi(const odp_packet_t pkt[], int num) { - uint32_t pool_id = pool_id_from_buf((odp_buffer_t)pkt[0]); - - buffer_free_multi(pool_id, (const odp_buffer_t * const)pkt, num); + buffer_free_multi((const odp_buffer_t * const)pkt, num); } int odp_packet_reset(odp_packet_t pkt, uint32_t len) { odp_packet_hdr_t *const pkt_hdr = odp_packet_hdr(pkt); - pool_entry_t *pool = odp_buf_to_pool(&pkt_hdr->buf_hdr); - uint32_t totsize = pool->s.headroom + len + pool->s.tailroom; + pool_t *pool = odp_pool_to_entry(pkt_hdr->buf_hdr.pool_hdl); - if (totsize > pkt_hdr->buf_hdr.size) + if (len > pool->headroom + pool->data_size + pool->tailroom) return -1; packet_init(pool, pkt_hdr, len, 0); @@ -378,14 +377,8 @@ void *odp_packet_offset(odp_packet_t pkt, uint32_t offset, uint32_t *len, odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); void *addr = packet_map(pkt_hdr, offset, len); - if (addr != NULL && seg != NULL) { - odp_buffer_bits_t seghandle; - - seghandle.handle = (odp_buffer_t)pkt; - seghandle.seg = (pkt_hdr->headroom + offset) / - pkt_hdr->buf_hdr.segsize; - *seg = (odp_packet_seg_t)seghandle.handle; - } + if (addr != NULL && seg != NULL) + *seg = (odp_packet_seg_t)pkt; return addr; } @@ -578,20 +571,19 @@ odp_packet_seg_t odp_packet_first_seg(odp_packet_t pkt) odp_packet_seg_t odp_packet_last_seg(odp_packet_t pkt) { - odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); - odp_buffer_bits_t seghandle; + (void)pkt; - seghandle.handle = (odp_buffer_t)pkt; - seghandle.seg = pkt_hdr->buf_hdr.segcount - 1; - return (odp_packet_seg_t)seghandle.handle; + /* Only one segment */ + return (odp_packet_seg_t)pkt; } odp_packet_seg_t odp_packet_next_seg(odp_packet_t pkt, odp_packet_seg_t seg) { - odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); + (void)pkt; + (void)seg; - return (odp_packet_seg_t)segment_next(&pkt_hdr->buf_hdr, - (odp_buffer_seg_t)seg); + /* Only one segment */ + return ODP_PACKET_SEG_INVALID; } /* @@ -603,21 +595,18 @@ odp_packet_seg_t odp_packet_next_seg(odp_packet_t pkt, odp_packet_seg_t seg) void *odp_packet_seg_data(odp_packet_t pkt, odp_packet_seg_t seg) { - odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); + (void)seg; - return segment_map(&pkt_hdr->buf_hdr, (odp_buffer_seg_t)seg, NULL, - pkt_hdr->frame_len, pkt_hdr->headroom); + /* Only one segment */ + return odp_packet_data(pkt); } uint32_t odp_packet_seg_data_len(odp_packet_t pkt, odp_packet_seg_t seg) { - odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); - uint32_t seglen = 0; + (void)seg; - segment_map(&pkt_hdr->buf_hdr, (odp_buffer_seg_t)seg, &seglen, - pkt_hdr->frame_len, pkt_hdr->headroom); - - return seglen; + /* Only one segment */ + return odp_packet_seg_len(pkt); } /* @@ -957,9 +946,13 @@ void odp_packet_print(odp_packet_t pkt) int odp_packet_is_valid(odp_packet_t pkt) { - odp_buffer_hdr_t *buf = validate_buf((odp_buffer_t)pkt); + if (odp_buffer_is_valid((odp_buffer_t)pkt) == 0) + return 0; + + if (odp_event_type(odp_packet_to_event(pkt)) != ODP_EVENT_PACKET) + return 0; - return (buf != NULL && buf->type == ODP_EVENT_PACKET); + return 1; } /* diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c index 415c9fa..1286753 100644 --- a/platform/linux-generic/odp_pool.c +++ b/platform/linux-generic/odp_pool.c @@ -4,77 +4,71 @@ * SPDX-License-Identifier: BSD-3-Clause */ -#include <odp/api/std_types.h> #include <odp/api/pool.h> -#include <odp_buffer_internal.h> -#include <odp_pool_internal.h> -#include <odp_buffer_inlines.h> -#include <odp_packet_internal.h> -#include <odp_timer_internal.h> -#include <odp_align_internal.h> #include <odp/api/shared_memory.h> #include <odp/api/align.h> +#include <odp/api/ticketlock.h> + +#include <odp_pool_internal.h> #include <odp_internal.h> +#include <odp_buffer_inlines.h> +#include <odp_packet_internal.h> #include <odp_config_internal.h> -#include <odp/api/hints.h> -#include <odp/api/thread.h> #include <odp_debug_internal.h> +#include <odp_ring_internal.h> #include <string.h> -#include <stdlib.h> +#include <stdio.h> #include <inttypes.h> -#if ODP_CONFIG_POOLS > ODP_BUFFER_MAX_POOLS -#error ODP_CONFIG_POOLS > ODP_BUFFER_MAX_POOLS -#endif - - -typedef union buffer_type_any_u { - odp_buffer_hdr_t buf; - odp_packet_hdr_t pkt; - odp_timeout_hdr_t tmo; -} odp_anybuf_t; +#include <odp/api/plat/ticketlock_inlines.h> +#define LOCK(a) _odp_ticketlock_lock(a) +#define UNLOCK(a) _odp_ticketlock_unlock(a) +#define LOCK_INIT(a) odp_ticketlock_init(a) -/* Any buffer type header */ -typedef struct { - union buffer_type_any_u any_hdr; /* any buffer type */ -} odp_any_buffer_hdr_t; - -typedef struct odp_any_hdr_stride { - uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_any_buffer_hdr_t))]; -} odp_any_hdr_stride; +#define CACHE_BURST 32 +#define RING_SIZE_MIN (2 * CACHE_BURST) +ODP_STATIC_ASSERT(CONFIG_POOL_CACHE_SIZE > (2 * CACHE_BURST), + "cache_burst_size_too_large_compared_to_cache_size"); typedef struct pool_table_t { - pool_entry_t pool[ODP_CONFIG_POOLS]; + pool_t pool[ODP_CONFIG_POOLS]; + odp_shm_t shm; } pool_table_t; - -/* The pool table */ -static pool_table_t *pool_tbl; -static const char SHM_DEFAULT_NAME[] = "odp_buffer_pools"; - -/* Pool entry pointers (for inlining) */ -void *pool_entry_ptr[ODP_CONFIG_POOLS]; - /* Thread local variables */ typedef struct pool_local_t { - local_cache_t *cache[ODP_CONFIG_POOLS]; + pool_cache_t *cache[ODP_CONFIG_POOLS]; int thr_id; } pool_local_t; +static pool_table_t *pool_tbl; static __thread pool_local_t local; -static void flush_cache(local_cache_t *buf_cache, struct pool_entry_s *pool); +static inline odp_pool_t pool_index_to_handle(uint32_t pool_idx) +{ + return _odp_cast_scalar(odp_pool_t, pool_idx); +} + +pool_t *pool_entry(uint32_t pool_idx) +{ + return &pool_tbl->pool[pool_idx]; +} + +static inline pool_t *pool_entry_from_hdl(odp_pool_t pool_hdl) +{ + return &pool_tbl->pool[_odp_typeval(pool_hdl)]; +} int odp_pool_init_global(void) { uint32_t i; odp_shm_t shm; - shm = odp_shm_reserve(SHM_DEFAULT_NAME, + shm = odp_shm_reserve("_odp_pool_table", sizeof(pool_table_t), - sizeof(pool_entry_t), 0); + ODP_CACHE_LINE_SIZE, 0); pool_tbl = odp_shm_addr(shm); @@ -82,1079 +76,766 @@ int odp_pool_init_global(void) return -1; memset(pool_tbl, 0, sizeof(pool_table_t)); + pool_tbl->shm = shm; for (i = 0; i < ODP_CONFIG_POOLS; i++) { - /* init locks */ - pool_entry_t *pool = &pool_tbl->pool[i]; - POOL_LOCK_INIT(&pool->s.lock); - POOL_LOCK_INIT(&pool->s.buf_lock); - POOL_LOCK_INIT(&pool->s.blk_lock); - pool->s.pool_hdl = pool_index_to_handle(i); - pool->s.pool_id = i; - pool_entry_ptr[i] = pool; - odp_atomic_init_u32(&pool->s.bufcount, 0); - odp_atomic_init_u32(&pool->s.blkcount, 0); - - /* Initialize pool statistics counters */ - odp_atomic_init_u64(&pool->s.poolstats.bufallocs, 0); - odp_atomic_init_u64(&pool->s.poolstats.buffrees, 0); - odp_atomic_init_u64(&pool->s.poolstats.blkallocs, 0); - odp_atomic_init_u64(&pool->s.poolstats.blkfrees, 0); - odp_atomic_init_u64(&pool->s.poolstats.bufempty, 0); - odp_atomic_init_u64(&pool->s.poolstats.blkempty, 0); - odp_atomic_init_u64(&pool->s.poolstats.buf_high_wm_count, 0); - odp_atomic_init_u64(&pool->s.poolstats.buf_low_wm_count, 0); - odp_atomic_init_u64(&pool->s.poolstats.blk_high_wm_count, 0); - odp_atomic_init_u64(&pool->s.poolstats.blk_low_wm_count, 0); + pool_t *pool = pool_entry(i); + + LOCK_INIT(&pool->lock); + pool->pool_hdl = pool_index_to_handle(i); + pool->pool_idx = i; } ODP_DBG("\nPool init global\n"); - ODP_DBG(" pool_entry_s size %zu\n", sizeof(struct pool_entry_s)); - ODP_DBG(" pool_entry_t size %zu\n", sizeof(pool_entry_t)); ODP_DBG(" odp_buffer_hdr_t size %zu\n", sizeof(odp_buffer_hdr_t)); + ODP_DBG(" odp_packet_hdr_t size %zu\n", sizeof(odp_packet_hdr_t)); ODP_DBG("\n"); return 0; } -int odp_pool_init_local(void) -{ - pool_entry_t *pool; - int i; - int thr_id = odp_thread_id(); - - memset(&local, 0, sizeof(pool_local_t)); - - for (i = 0; i < ODP_CONFIG_POOLS; i++) { - pool = get_pool_entry(i); - local.cache[i] = &pool->s.local_cache[thr_id]; - local.cache[i]->s.num_buf = 0; - } - - local.thr_id = thr_id; - return 0; -} - int odp_pool_term_global(void) { int i; - pool_entry_t *pool; + pool_t *pool; int ret = 0; int rc = 0; for (i = 0; i < ODP_CONFIG_POOLS; i++) { - pool = get_pool_entry(i); + pool = pool_entry(i); - POOL_LOCK(&pool->s.lock); - if (pool->s.pool_shm != ODP_SHM_INVALID) { - ODP_ERR("Not destroyed pool: %s\n", pool->s.name); + LOCK(&pool->lock); + if (pool->reserved) { + ODP_ERR("Not destroyed pool: %s\n", pool->name); rc = -1; } - POOL_UNLOCK(&pool->s.lock); + UNLOCK(&pool->lock); } - ret = odp_shm_free(odp_shm_lookup(SHM_DEFAULT_NAME)); + ret = odp_shm_free(pool_tbl->shm); if (ret < 0) { - ODP_ERR("shm free failed for %s", SHM_DEFAULT_NAME); + ODP_ERR("shm free failed"); rc = -1; } return rc; } -int odp_pool_term_local(void) +int odp_pool_init_local(void) { + pool_t *pool; int i; + int thr_id = odp_thread_id(); - for (i = 0; i < ODP_CONFIG_POOLS; i++) { - pool_entry_t *pool = get_pool_entry(i); + memset(&local, 0, sizeof(pool_local_t)); - flush_cache(local.cache[i], &pool->s); + for (i = 0; i < ODP_CONFIG_POOLS; i++) { + pool = pool_entry(i); + local.cache[i] = &pool->local_cache[thr_id]; + local.cache[i]->num = 0; } + local.thr_id = thr_id; return 0; } -int odp_pool_capability(odp_pool_capability_t *capa) +static void flush_cache(pool_cache_t *cache, pool_t *pool) { - memset(capa, 0, sizeof(odp_pool_capability_t)); + ring_t *ring; + uint32_t mask; + uint32_t cache_num, i, data; - capa->max_pools = ODP_CONFIG_POOLS; + ring = &pool->ring.hdr; + mask = pool->ring_mask; + cache_num = cache->num; - /* Buffer pools */ - capa->buf.max_pools = ODP_CONFIG_POOLS; - capa->buf.max_align = ODP_CONFIG_BUFFER_ALIGN_MAX; - capa->buf.max_size = 0; - capa->buf.max_num = 0; + for (i = 0; i < cache_num; i++) { + data = (uint32_t)(uintptr_t)cache->buf[i]; + ring_enq(ring, mask, data); + } - /* Packet pools */ - capa->pkt.max_pools = ODP_CONFIG_POOLS; - capa->pkt.max_len = ODP_CONFIG_PACKET_MAX_SEGS * - ODP_CONFIG_PACKET_SEG_LEN_MIN; - capa->pkt.max_num = 0; - capa->pkt.min_headroom = ODP_CONFIG_PACKET_HEADROOM; - capa->pkt.min_tailroom = ODP_CONFIG_PACKET_TAILROOM; - capa->pkt.max_segs_per_pkt = ODP_CONFIG_PACKET_MAX_SEGS; - capa->pkt.min_seg_len = ODP_CONFIG_PACKET_SEG_LEN_MIN; - capa->pkt.max_seg_len = ODP_CONFIG_PACKET_SEG_LEN_MAX; - capa->pkt.max_uarea_size = 0; + cache->num = 0; +} - /* Timeout pools */ - capa->tmo.max_pools = ODP_CONFIG_POOLS; - capa->tmo.max_num = 0; +int odp_pool_term_local(void) +{ + int i; + + for (i = 0; i < ODP_CONFIG_POOLS; i++) { + pool_t *pool = pool_entry(i); + + flush_cache(local.cache[i], pool); + } return 0; } -static inline odp_buffer_hdr_t *get_buf(struct pool_entry_s *pool) +static pool_t *reserve_pool(void) { - odp_buffer_hdr_t *myhead; - - POOL_LOCK(&pool->buf_lock); - - myhead = pool->buf_freelist; + int i; + pool_t *pool; - if (odp_unlikely(myhead == NULL)) { - POOL_UNLOCK(&pool->buf_lock); - odp_atomic_inc_u64(&pool->poolstats.bufempty); - } else { - pool->buf_freelist = myhead->next; - POOL_UNLOCK(&pool->buf_lock); + for (i = 0; i < ODP_CONFIG_POOLS; i++) { + pool = pool_entry(i); - odp_atomic_fetch_sub_u32(&pool->bufcount, 1); - odp_atomic_inc_u64(&pool->poolstats.bufallocs); + LOCK(&pool->lock); + if (pool->reserved == 0) { + pool->reserved = 1; + UNLOCK(&pool->lock); + return pool; + } + UNLOCK(&pool->lock); } - return (void *)myhead; + return NULL; } -static inline void ret_buf(struct pool_entry_s *pool, odp_buffer_hdr_t *buf) +static odp_buffer_t form_buffer_handle(uint32_t pool_idx, uint32_t buffer_idx) { - if (!buf->flags.hdrdata && buf->type != ODP_EVENT_BUFFER) { - while (buf->segcount > 0) { - if (buffer_is_secure(buf) || pool_is_secure(pool)) - memset(buf->addr[buf->segcount - 1], - 0, buf->segsize); - ret_blk(pool, buf->addr[--buf->segcount]); - } - buf->size = 0; - } + odp_buffer_bits_t bits; - buf->allocator = ODP_FREEBUF; /* Mark buffer free */ - POOL_LOCK(&pool->buf_lock); - buf->next = pool->buf_freelist; - pool->buf_freelist = buf; - POOL_UNLOCK(&pool->buf_lock); + bits.handle = 0; + bits.pool_id = pool_idx; + bits.index = buffer_idx; - odp_atomic_fetch_add_u32(&pool->bufcount, 1); - odp_atomic_inc_u64(&pool->poolstats.buffrees); + return bits.handle; } -/* - * Pool creation - */ -odp_pool_t _pool_create(const char *name, - odp_pool_param_t *params, - uint32_t shmflags) +static void init_buffers(pool_t *pool) { - odp_pool_t pool_hdl = ODP_POOL_INVALID; - pool_entry_t *pool; - uint32_t i, headroom = 0, tailroom = 0; - odp_shm_t shm; + uint32_t i; + odp_buffer_hdr_t *buf_hdr; + odp_packet_hdr_t *pkt_hdr; + odp_buffer_t buf_hdl; + void *addr; + void *uarea = NULL; + uint8_t *data; + uint32_t offset; + ring_t *ring; + uint32_t mask; + int type; + uint32_t size; + + ring = &pool->ring.hdr; + mask = pool->ring_mask; + type = pool->params.type; + + for (i = 0; i < pool->num; i++) { + addr = &pool->base_addr[i * pool->block_size]; + buf_hdr = addr; + pkt_hdr = addr; + + if (pool->uarea_size) + uarea = &pool->uarea_base_addr[i * pool->uarea_size]; + + data = buf_hdr->data; + + if (type == ODP_POOL_PACKET) + data = pkt_hdr->data; + + offset = pool->headroom; + + /* move to correct align */ + while (((uintptr_t)&data[offset]) % pool->align != 0) + offset++; + + memset(buf_hdr, 0, sizeof(odp_buffer_hdr_t)); + + size = pool->headroom + pool->data_size + pool->tailroom; + + /* Initialize buffer metadata */ + buf_hdr->size = size; + buf_hdr->type = type; + buf_hdr->event_type = type; + buf_hdr->pool_hdl = pool->pool_hdl; + buf_hdr->uarea_addr = uarea; + /* Show user requested size through API */ + buf_hdr->uarea_size = pool->params.pkt.uarea_size; + buf_hdr->segcount = 1; + buf_hdr->segsize = size; + + /* Pointer to data start (of the first segment) */ + buf_hdr->addr[0] = &data[offset]; + + buf_hdl = form_buffer_handle(pool->pool_idx, i); + buf_hdr->handle.handle = buf_hdl; + + /* Store buffer into the global pool */ + ring_enq(ring, mask, (uint32_t)(uintptr_t)buf_hdl); + } +} - if (params == NULL) +static odp_pool_t pool_create(const char *name, odp_pool_param_t *params, + uint32_t shmflags) +{ + pool_t *pool; + uint32_t uarea_size, headroom, tailroom; + odp_shm_t shm; + uint32_t data_size, align, num, hdr_size, block_size; + uint32_t max_len, max_seg_len; + uint32_t ring_size; + int name_len; + const char *postfix = "_uarea"; + char uarea_name[ODP_POOL_NAME_LEN + sizeof(postfix)]; + + if (params == NULL) { + ODP_ERR("No params"); return ODP_POOL_INVALID; - - /* Default size and align for timeouts */ - if (params->type == ODP_POOL_TIMEOUT) { - params->buf.size = 0; /* tmo.__res1 */ - params->buf.align = 0; /* tmo.__res2 */ } - /* Default initialization parameters */ - uint32_t p_udata_size = 0; - uint32_t udata_stride = 0; + align = 0; - /* Restriction for v1.0: All non-packet buffers are unsegmented */ - int unseg = 1; + if (params->type == ODP_POOL_BUFFER) + align = params->buf.align; - uint32_t blk_size, buf_stride, buf_num, blk_num, seg_len = 0; - uint32_t buf_align = - params->type == ODP_POOL_BUFFER ? params->buf.align : 0; + if (align < ODP_CONFIG_BUFFER_ALIGN_MIN) + align = ODP_CONFIG_BUFFER_ALIGN_MIN; /* Validate requested buffer alignment */ - if (buf_align > ODP_CONFIG_BUFFER_ALIGN_MAX || - buf_align != ODP_ALIGN_ROUNDDOWN_POWER_2(buf_align, buf_align)) + if (align > ODP_CONFIG_BUFFER_ALIGN_MAX || + align != ODP_ALIGN_ROUNDDOWN_POWER_2(align, align)) { + ODP_ERR("Bad align requirement"); return ODP_POOL_INVALID; + } - /* Set correct alignment based on input request */ - if (buf_align == 0) - buf_align = ODP_CACHE_LINE_SIZE; - else if (buf_align < ODP_CONFIG_BUFFER_ALIGN_MIN) - buf_align = ODP_CONFIG_BUFFER_ALIGN_MIN; + headroom = 0; + tailroom = 0; + data_size = 0; + max_len = 0; + max_seg_len = 0; + uarea_size = 0; - /* Calculate space needed for buffer blocks and metadata */ switch (params->type) { case ODP_POOL_BUFFER: - buf_num = params->buf.num; - blk_size = params->buf.size; - - /* Optimize small raw buffers */ - if (blk_size > ODP_MAX_INLINE_BUF || params->buf.align != 0) - blk_size = ODP_ALIGN_ROUNDUP(blk_size, buf_align); - - buf_stride = sizeof(odp_buffer_hdr_stride); + num = params->buf.num; + data_size = params->buf.size; break; case ODP_POOL_PACKET: - unseg = 0; /* Packets are always segmented */ - headroom = ODP_CONFIG_PACKET_HEADROOM; - tailroom = ODP_CONFIG_PACKET_TAILROOM; - buf_num = params->pkt.num; - - seg_len = params->pkt.seg_len <= ODP_CONFIG_PACKET_SEG_LEN_MIN ? - ODP_CONFIG_PACKET_SEG_LEN_MIN : - (params->pkt.seg_len <= ODP_CONFIG_PACKET_SEG_LEN_MAX ? - params->pkt.seg_len : ODP_CONFIG_PACKET_SEG_LEN_MAX); - - seg_len = ODP_ALIGN_ROUNDUP( - headroom + seg_len + tailroom, - ODP_CONFIG_BUFFER_ALIGN_MIN); - - blk_size = params->pkt.len <= seg_len ? seg_len : - ODP_ALIGN_ROUNDUP(params->pkt.len, seg_len); - - /* Reject create if pkt.len needs too many segments */ - if (blk_size / seg_len > ODP_BUFFER_MAX_SEG) { - ODP_ERR("ODP_BUFFER_MAX_SEG exceed %d(%d)\n", - blk_size / seg_len, ODP_BUFFER_MAX_SEG); + headroom = ODP_CONFIG_PACKET_HEADROOM; + tailroom = ODP_CONFIG_PACKET_TAILROOM; + num = params->pkt.num; + uarea_size = params->pkt.uarea_size; + + data_size = ODP_CONFIG_PACKET_SEG_LEN_MAX; + + if (data_size < ODP_CONFIG_PACKET_SEG_LEN_MIN) + data_size = ODP_CONFIG_PACKET_SEG_LEN_MIN; + + if (data_size > ODP_CONFIG_PACKET_SEG_LEN_MAX) { + ODP_ERR("Too large seg len requirement"); return ODP_POOL_INVALID; } - p_udata_size = params->pkt.uarea_size; - udata_stride = ODP_ALIGN_ROUNDUP(p_udata_size, - sizeof(uint64_t)); - - buf_stride = sizeof(odp_packet_hdr_stride); + max_seg_len = ODP_CONFIG_PACKET_SEG_LEN_MAX - + ODP_CONFIG_PACKET_HEADROOM - + ODP_CONFIG_PACKET_TAILROOM; + max_len = ODP_CONFIG_PACKET_MAX_SEGS * max_seg_len; break; case ODP_POOL_TIMEOUT: - blk_size = 0; - buf_num = params->tmo.num; - buf_stride = sizeof(odp_timeout_hdr_stride); + num = params->tmo.num; break; default: + ODP_ERR("Bad pool type"); return ODP_POOL_INVALID; } - /* Validate requested number of buffers against addressable limits */ - if (buf_num > - (ODP_BUFFER_MAX_BUFFERS / (buf_stride / ODP_CACHE_LINE_SIZE))) { - ODP_ERR("buf_num %d > then expected %d\n", - buf_num, ODP_BUFFER_MAX_BUFFERS / - (buf_stride / ODP_CACHE_LINE_SIZE)); + if (uarea_size) + uarea_size = ODP_CACHE_LINE_SIZE_ROUNDUP(uarea_size); + + pool = reserve_pool(); + + if (pool == NULL) { + ODP_ERR("No more free pools"); return ODP_POOL_INVALID; } - /* Find an unused buffer pool slot and iniitalize it as requested */ - for (i = 0; i < ODP_CONFIG_POOLS; i++) { - pool = get_pool_entry(i); + if (name == NULL) { + pool->name[0] = 0; + } else { + strncpy(pool->name, name, + ODP_POOL_NAME_LEN - 1); + pool->name[ODP_POOL_NAME_LEN - 1] = 0; + } - POOL_LOCK(&pool->s.lock); - if (pool->s.pool_shm != ODP_SHM_INVALID) { - POOL_UNLOCK(&pool->s.lock); - continue; - } + name_len = strlen(pool->name); + memcpy(uarea_name, pool->name, name_len); + strcpy(&uarea_name[name_len], postfix); - /* found free pool */ - size_t block_size, pad_size, mdata_size, udata_size; + pool->params = *params; - pool->s.flags.all = 0; + hdr_size = sizeof(odp_packet_hdr_t); + hdr_size = ODP_CACHE_LINE_SIZE_ROUNDUP(hdr_size); - if (name == NULL) { - pool->s.name[0] = 0; - } else { - strncpy(pool->s.name, name, - ODP_POOL_NAME_LEN - 1); - pool->s.name[ODP_POOL_NAME_LEN - 1] = 0; - pool->s.flags.has_name = 1; - } + block_size = ODP_CACHE_LINE_SIZE_ROUNDUP(hdr_size + align + headroom + + data_size + tailroom); - pool->s.params = *params; - pool->s.buf_align = buf_align; + if (num <= RING_SIZE_MIN) + ring_size = RING_SIZE_MIN; + else + ring_size = ODP_ROUNDUP_POWER_2(num); - /* Optimize for short buffers: Data stored in buffer hdr */ - if (blk_size <= ODP_MAX_INLINE_BUF) { - block_size = 0; - pool->s.buf_align = blk_size == 0 ? 0 : sizeof(void *); - } else { - block_size = buf_num * blk_size; - pool->s.buf_align = buf_align; - } + pool->ring_mask = ring_size - 1; + pool->num = num; + pool->align = align; + pool->headroom = headroom; + pool->data_size = data_size; + pool->max_len = max_len; + pool->max_seg_len = max_seg_len; + pool->tailroom = tailroom; + pool->block_size = block_size; + pool->uarea_size = uarea_size; + pool->shm_size = num * block_size; + pool->uarea_shm_size = num * uarea_size; - pad_size = ODP_CACHE_LINE_SIZE_ROUNDUP(block_size) - block_size; - mdata_size = buf_num * buf_stride; - udata_size = buf_num * udata_stride; + shm = odp_shm_reserve(pool->name, pool->shm_size, + ODP_PAGE_SIZE, shmflags); - pool->s.buf_num = buf_num; - pool->s.pool_size = ODP_PAGE_SIZE_ROUNDUP(block_size + - pad_size + - mdata_size + - udata_size); + pool->shm = shm; - shm = odp_shm_reserve(pool->s.name, - pool->s.pool_size, - ODP_PAGE_SIZE, shmflags); - if (shm == ODP_SHM_INVALID) { - POOL_UNLOCK(&pool->s.lock); - return ODP_POOL_INVALID; - } - pool->s.pool_base_addr = odp_shm_addr(shm); - pool->s.pool_shm = shm; - - /* Now safe to unlock since pool entry has been allocated */ - POOL_UNLOCK(&pool->s.lock); - - pool->s.flags.unsegmented = unseg; - pool->s.seg_size = unseg ? blk_size : seg_len; - pool->s.blk_size = blk_size; - - uint8_t *block_base_addr = pool->s.pool_base_addr; - uint8_t *mdata_base_addr = - block_base_addr + block_size + pad_size; - uint8_t *udata_base_addr = mdata_base_addr + mdata_size; - - /* Pool mdata addr is used for indexing buffer metadata */ - pool->s.pool_mdata_addr = mdata_base_addr; - pool->s.udata_size = p_udata_size; - - pool->s.buf_stride = buf_stride; - pool->s.buf_freelist = NULL; - pool->s.blk_freelist = NULL; - - /* Initialization will increment these to their target vals */ - odp_atomic_store_u32(&pool->s.bufcount, 0); - odp_atomic_store_u32(&pool->s.blkcount, 0); - - uint8_t *buf = udata_base_addr - buf_stride; - uint8_t *udat = udata_stride == 0 ? NULL : - udata_base_addr + udata_size - udata_stride; - - /* Init buffer common header and add to pool buffer freelist */ - do { - odp_buffer_hdr_t *tmp = - (odp_buffer_hdr_t *)(void *)buf; - - /* Iniitalize buffer metadata */ - tmp->allocator = ODP_FREEBUF; - tmp->flags.all = 0; - tmp->size = 0; - tmp->type = params->type; - tmp->event_type = params->type; - tmp->pool_hdl = pool->s.pool_hdl; - tmp->uarea_addr = (void *)udat; - tmp->uarea_size = p_udata_size; - tmp->segcount = 0; - tmp->segsize = pool->s.seg_size; - tmp->handle.handle = odp_buffer_encode_handle(tmp); - - /* Set 1st seg addr for zero-len buffers */ - tmp->addr[0] = NULL; - - /* Special case for short buffer data */ - if (blk_size <= ODP_MAX_INLINE_BUF) { - tmp->flags.hdrdata = 1; - if (blk_size > 0) { - tmp->segcount = 1; - tmp->addr[0] = &tmp->addr[1]; - tmp->size = blk_size; - } - } - - /* Push buffer onto pool's freelist */ - ret_buf(&pool->s, tmp); - buf -= buf_stride; - udat -= udata_stride; - } while (buf >= mdata_base_addr); - - /* Form block freelist for pool */ - uint8_t *blk = - block_base_addr + block_size - pool->s.seg_size; - - if (blk_size > ODP_MAX_INLINE_BUF) - do { - ret_blk(&pool->s, blk); - blk -= pool->s.seg_size; - } while (blk >= block_base_addr); - - blk_num = odp_atomic_load_u32(&pool->s.blkcount); - - /* Initialize pool statistics counters */ - odp_atomic_store_u64(&pool->s.poolstats.bufallocs, 0); - odp_atomic_store_u64(&pool->s.poolstats.buffrees, 0); - odp_atomic_store_u64(&pool->s.poolstats.blkallocs, 0); - odp_atomic_store_u64(&pool->s.poolstats.blkfrees, 0); - odp_atomic_store_u64(&pool->s.poolstats.bufempty, 0); - odp_atomic_store_u64(&pool->s.poolstats.blkempty, 0); - odp_atomic_store_u64(&pool->s.poolstats.buf_high_wm_count, 0); - odp_atomic_store_u64(&pool->s.poolstats.buf_low_wm_count, 0); - odp_atomic_store_u64(&pool->s.poolstats.blk_high_wm_count, 0); - odp_atomic_store_u64(&pool->s.poolstats.blk_low_wm_count, 0); - - /* Reset other pool globals to initial state */ - pool->s.buf_low_wm_assert = 0; - pool->s.blk_low_wm_assert = 0; - pool->s.quiesced = 0; - pool->s.headroom = headroom; - pool->s.tailroom = tailroom; - - /* Watermarks are hard-coded for now to control caching */ - pool->s.buf_high_wm = buf_num / 2; - pool->s.buf_low_wm = buf_num / 4; - pool->s.blk_high_wm = blk_num / 2; - pool->s.blk_low_wm = blk_num / 4; - - pool_hdl = pool->s.pool_hdl; - break; + if (shm == ODP_SHM_INVALID) { + ODP_ERR("Shm reserve failed"); + goto error; } - return pool_hdl; -} + pool->base_addr = odp_shm_addr(pool->shm); -odp_pool_t odp_pool_create(const char *name, - odp_pool_param_t *params) -{ -#ifdef _ODP_PKTIO_IPC - if (params && (params->type == ODP_POOL_PACKET)) - return _pool_create(name, params, ODP_SHM_PROC); -#endif - return _pool_create(name, params, 0); - -} - -odp_pool_t odp_pool_lookup(const char *name) -{ - uint32_t i; - pool_entry_t *pool; + pool->uarea_shm = ODP_SHM_INVALID; + if (uarea_size) { + shm = odp_shm_reserve(uarea_name, pool->uarea_shm_size, + ODP_PAGE_SIZE, shmflags); - for (i = 0; i < ODP_CONFIG_POOLS; i++) { - pool = get_pool_entry(i); + pool->uarea_shm = shm; - POOL_LOCK(&pool->s.lock); - if (strcmp(name, pool->s.name) == 0) { - /* found it */ - POOL_UNLOCK(&pool->s.lock); - return pool->s.pool_hdl; + if (shm == ODP_SHM_INVALID) { + ODP_ERR("Shm reserve failed (uarea)"); + goto error; } - POOL_UNLOCK(&pool->s.lock); + + pool->uarea_base_addr = odp_shm_addr(pool->uarea_shm); } - return ODP_POOL_INVALID; -} + ring_init(&pool->ring.hdr); + init_buffers(pool); -int odp_pool_info(odp_pool_t pool_hdl, odp_pool_info_t *info) -{ - uint32_t pool_id = pool_handle_to_index(pool_hdl); - pool_entry_t *pool = get_pool_entry(pool_id); + return pool->pool_hdl; - if (pool == NULL || info == NULL) - return -1; +error: + if (pool->shm != ODP_SHM_INVALID) + odp_shm_free(pool->shm); - info->name = pool->s.name; - info->params = pool->s.params; + if (pool->uarea_shm != ODP_SHM_INVALID) + odp_shm_free(pool->uarea_shm); - return 0; + LOCK(&pool->lock); + pool->reserved = 0; + UNLOCK(&pool->lock); + return ODP_POOL_INVALID; } -static inline void get_local_cache_bufs(local_cache_t *buf_cache, uint32_t idx, - odp_buffer_hdr_t *buf_hdr[], - uint32_t num) -{ - uint32_t i; - for (i = 0; i < num; i++) { - buf_hdr[i] = buf_cache->s.buf[idx + i]; - odp_prefetch(buf_hdr[i]); - odp_prefetch_store(buf_hdr[i]); - } -} - -static void flush_cache(local_cache_t *buf_cache, struct pool_entry_s *pool) +odp_pool_t odp_pool_create(const char *name, odp_pool_param_t *params) { - uint32_t flush_count = 0; - uint32_t num; - - while ((num = buf_cache->s.num_buf)) { - odp_buffer_hdr_t *buf; - - buf = buf_cache->s.buf[num - 1]; - ret_buf(pool, buf); - flush_count++; - buf_cache->s.num_buf--; - } - - odp_atomic_add_u64(&pool->poolstats.bufallocs, buf_cache->s.bufallocs); - odp_atomic_add_u64(&pool->poolstats.buffrees, - buf_cache->s.buffrees - flush_count); - - buf_cache->s.bufallocs = 0; - buf_cache->s.buffrees = 0; +#ifdef _ODP_PKTIO_IPC + if (params && (params->type == ODP_POOL_PACKET)) + return pool_create(name, params, ODP_SHM_PROC); +#endif + return pool_create(name, params, 0); } int odp_pool_destroy(odp_pool_t pool_hdl) { - uint32_t pool_id = pool_handle_to_index(pool_hdl); - pool_entry_t *pool = get_pool_entry(pool_id); + pool_t *pool = pool_entry_from_hdl(pool_hdl); int i; if (pool == NULL) return -1; - POOL_LOCK(&pool->s.lock); + LOCK(&pool->lock); - /* Call fails if pool is not allocated or predefined*/ - if (pool->s.pool_shm == ODP_SHM_INVALID || - pool->s.flags.predefined) { - POOL_UNLOCK(&pool->s.lock); - ODP_ERR("invalid shm for pool %s\n", pool->s.name); + if (pool->reserved == 0) { + UNLOCK(&pool->lock); + ODP_ERR("Pool not created\n"); return -1; } /* Make sure local caches are empty */ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) - flush_cache(&pool->s.local_cache[i], &pool->s); - - /* Call fails if pool has allocated buffers */ - if (odp_atomic_load_u32(&pool->s.bufcount) < pool->s.buf_num) { - POOL_UNLOCK(&pool->s.lock); - ODP_DBG("error: pool has allocated buffers %d/%d\n", - odp_atomic_load_u32(&pool->s.bufcount), - pool->s.buf_num); - return -1; - } + flush_cache(&pool->local_cache[i], pool); - odp_shm_free(pool->s.pool_shm); - pool->s.pool_shm = ODP_SHM_INVALID; - POOL_UNLOCK(&pool->s.lock); + odp_shm_free(pool->shm); + + if (pool->uarea_shm != ODP_SHM_INVALID) + odp_shm_free(pool->uarea_shm); + + pool->reserved = 0; + UNLOCK(&pool->lock); return 0; } -int seg_alloc_head(odp_buffer_hdr_t *buf_hdr, int segcount) +odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf) { - uint32_t pool_id = pool_handle_to_index(buf_hdr->pool_hdl); - pool_entry_t *pool = get_pool_entry(pool_id); - void *newsegs[segcount]; - int i; + odp_buffer_bits_t handle; + uint32_t pool_id, index, block_offset; + pool_t *pool; + odp_buffer_hdr_t *buf_hdr; - for (i = 0; i < segcount; i++) { - newsegs[i] = get_blk(&pool->s); - if (newsegs[i] == NULL) { - while (--i >= 0) - ret_blk(&pool->s, newsegs[i]); - return -1; - } - } + handle.handle = buf; + pool_id = handle.pool_id; + index = handle.index; + pool = pool_entry(pool_id); + block_offset = index * pool->block_size; - for (i = buf_hdr->segcount - 1; i >= 0; i--) - buf_hdr->addr[i + segcount] = buf_hdr->addr[i]; + /* clang requires cast to uintptr_t */ + buf_hdr = (odp_buffer_hdr_t *)(uintptr_t)&pool->base_addr[block_offset]; - for (i = 0; i < segcount; i++) - buf_hdr->addr[i] = newsegs[i]; + return buf_hdr; +} - buf_hdr->segcount += segcount; - buf_hdr->size = buf_hdr->segcount * pool->s.seg_size; - return 0; +odp_event_type_t _odp_buffer_event_type(odp_buffer_t buf) +{ + return odp_buf_to_hdr(buf)->event_type; } -void seg_free_head(odp_buffer_hdr_t *buf_hdr, int segcount) +void _odp_buffer_event_type_set(odp_buffer_t buf, int ev) { - uint32_t pool_id = pool_handle_to_index(buf_hdr->pool_hdl); - pool_entry_t *pool = get_pool_entry(pool_id); - int s_cnt = buf_hdr->segcount; - int i; + odp_buf_to_hdr(buf)->event_type = ev; +} - for (i = 0; i < segcount; i++) - ret_blk(&pool->s, buf_hdr->addr[i]); +void *buffer_map(odp_buffer_hdr_t *buf, + uint32_t offset, + uint32_t *seglen, + uint32_t limit) +{ + int seg_index; + int seg_offset; - for (i = 0; i < s_cnt - segcount; i++) - buf_hdr->addr[i] = buf_hdr->addr[i + segcount]; + if (odp_likely(offset < buf->segsize)) { + seg_index = 0; + seg_offset = offset; + } else { + ODP_ERR("\nSEGMENTS NOT SUPPORTED\n"); + return NULL; + } - buf_hdr->segcount -= segcount; - buf_hdr->size = buf_hdr->segcount * pool->s.seg_size; + if (seglen != NULL) { + uint32_t buf_left = limit - offset; + *seglen = seg_offset + buf_left <= buf->segsize ? + buf_left : buf->segsize - seg_offset; + } + + return (void *)(seg_offset + (uint8_t *)buf->addr[seg_index]); } -int seg_alloc_tail(odp_buffer_hdr_t *buf_hdr, int segcount) +odp_pool_t odp_pool_lookup(const char *name) { - uint32_t pool_id = pool_handle_to_index(buf_hdr->pool_hdl); - pool_entry_t *pool = get_pool_entry(pool_id); - uint32_t s_cnt = buf_hdr->segcount; - int i; + uint32_t i; + pool_t *pool; - for (i = 0; i < segcount; i++) { - buf_hdr->addr[s_cnt + i] = get_blk(&pool->s); - if (buf_hdr->addr[s_cnt + i] == NULL) { - while (--i >= 0) - ret_blk(&pool->s, buf_hdr->addr[s_cnt + i]); - return -1; + for (i = 0; i < ODP_CONFIG_POOLS; i++) { + pool = pool_entry(i); + + LOCK(&pool->lock); + if (strcmp(name, pool->name) == 0) { + /* found it */ + UNLOCK(&pool->lock); + return pool->pool_hdl; } + UNLOCK(&pool->lock); } - buf_hdr->segcount += segcount; - buf_hdr->size = buf_hdr->segcount * pool->s.seg_size; - return 0; + return ODP_POOL_INVALID; } -void seg_free_tail(odp_buffer_hdr_t *buf_hdr, int segcount) +int odp_pool_info(odp_pool_t pool_hdl, odp_pool_info_t *info) { - uint32_t pool_id = pool_handle_to_index(buf_hdr->pool_hdl); - pool_entry_t *pool = get_pool_entry(pool_id); - int s_cnt = buf_hdr->segcount; - int i; + pool_t *pool = pool_entry_from_hdl(pool_hdl); - for (i = s_cnt - 1; i >= s_cnt - segcount; i--) - ret_blk(&pool->s, buf_hdr->addr[i]); + if (pool == NULL || info == NULL) + return -1; - buf_hdr->segcount -= segcount; - buf_hdr->size = buf_hdr->segcount * pool->s.seg_size; + info->name = pool->name; + info->params = pool->params; + + return 0; } -static inline int get_local_bufs(local_cache_t *buf_cache, - odp_buffer_hdr_t *buf_hdr[], uint32_t max_num) +int buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], int max_num) { - uint32_t num_buf = buf_cache->s.num_buf; - uint32_t num = num_buf; + pool_t *pool; + ring_t *ring; + uint32_t mask; + int i; + pool_cache_t *cache; + uint32_t cache_num; - if (odp_unlikely(num_buf == 0)) - return 0; + pool = pool_entry_from_hdl(pool_hdl); + ring = &pool->ring.hdr; + mask = pool->ring_mask; + cache = local.cache[_odp_typeval(pool_hdl)]; - if (odp_likely(max_num < num)) - num = max_num; + cache_num = cache->num; - get_local_cache_bufs(buf_cache, num_buf - num, buf_hdr, num); - buf_cache->s.num_buf -= num; - buf_cache->s.bufallocs += num; + if (odp_likely((int)cache_num >= max_num)) { + for (i = 0; i < max_num; i++) + buf[i] = cache->buf[cache_num - max_num + i]; - return num; -} + cache->num = cache_num - max_num; + return max_num; + } -static inline void ret_local_buf(local_cache_t *buf_cache, uint32_t idx, - odp_buffer_hdr_t *buf) -{ - buf_cache->s.buf[idx] = buf; - buf_cache->s.num_buf++; - buf_cache->s.buffrees++; -} + for (i = 0; i < max_num; i++) { + uint32_t data; -static inline void ret_local_bufs(local_cache_t *buf_cache, uint32_t idx, - odp_buffer_hdr_t *buf[], int num_buf) -{ - int i; + data = ring_deq(ring, mask); + + if (data == RING_EMPTY) + break; - for (i = 0; i < num_buf; i++) - buf_cache->s.buf[idx + i] = buf[i]; + buf[i] = (odp_buffer_t)(uintptr_t)data; + } - buf_cache->s.num_buf += num_buf; - buf_cache->s.buffrees += num_buf; + return i; } -int buffer_alloc_multi(odp_pool_t pool_hdl, size_t size, - odp_buffer_t buf[], int max_num) +static inline void buffer_free_to_pool(uint32_t pool_id, + const odp_buffer_t buf[], int num) { - uint32_t pool_id = pool_handle_to_index(pool_hdl); - pool_entry_t *pool = get_pool_entry(pool_id); - uintmax_t totsize = pool->s.headroom + size + pool->s.tailroom; - odp_buffer_hdr_t *buf_tbl[max_num]; - odp_buffer_hdr_t *buf_hdr; - int num, i; - intmax_t needed; - void *blk; - - /* Reject oversized allocation requests */ - if ((pool->s.flags.unsegmented && totsize > pool->s.seg_size) || - (!pool->s.flags.unsegmented && - totsize > pool->s.seg_size * ODP_BUFFER_MAX_SEG)) - return 0; + pool_t *pool; + int i; + ring_t *ring; + uint32_t mask; + pool_cache_t *cache; + uint32_t cache_num; + + cache = local.cache[pool_id]; + pool = pool_entry(pool_id); + ring = &pool->ring.hdr; + mask = pool->ring_mask; + + /* Special case of a very large free. Move directly to + * the global pool. */ + if (odp_unlikely(num > CONFIG_POOL_CACHE_SIZE)) { + for (i = 0; i < num; i++) + ring_enq(ring, mask, (uint32_t)(uintptr_t)buf[i]); - /* Try to satisfy request from the local cache */ - num = get_local_bufs(local.cache[pool_id], buf_tbl, max_num); - - /* If cache is empty, satisfy request from the pool */ - if (odp_unlikely(num < max_num)) { - for (; num < max_num; num++) { - buf_hdr = get_buf(&pool->s); - - if (odp_unlikely(buf_hdr == NULL)) - goto pool_empty; - - /* Get blocks for this buffer, if pool uses - * application data */ - if (buf_hdr->size < totsize) { - uint32_t segcount; - - needed = totsize - buf_hdr->size; - do { - blk = get_blk(&pool->s); - if (odp_unlikely(blk == NULL)) { - ret_buf(&pool->s, buf_hdr); - goto pool_empty; - } - - segcount = buf_hdr->segcount++; - buf_hdr->addr[segcount] = blk; - needed -= pool->s.seg_size; - } while (needed > 0); - buf_hdr->size = buf_hdr->segcount * - pool->s.seg_size; - } - - buf_tbl[num] = buf_hdr; - } + return; } -pool_empty: - for (i = 0; i < num; i++) { - buf_hdr = buf_tbl[i]; - - /* Mark buffer as allocated */ - buf_hdr->allocator = local.thr_id; - - /* By default, buffers are not associated with - * an ordered queue */ - buf_hdr->origin_qe = NULL; + /* Make room into local cache if needed. Do at least burst size + * transfer. */ + cache_num = cache->num; - buf[i] = odp_hdr_to_buf(buf_hdr); + if (odp_unlikely((int)(CONFIG_POOL_CACHE_SIZE - cache_num) < num)) { + int burst = CACHE_BURST; - /* Add more segments if buffer from local cache is too small */ - if (odp_unlikely(buf_hdr->size < totsize)) { - needed = totsize - buf_hdr->size; - do { - blk = get_blk(&pool->s); - if (odp_unlikely(blk == NULL)) { - int j; + if (odp_unlikely(num > CACHE_BURST)) + burst = num; - ret_buf(&pool->s, buf_hdr); - buf_hdr = NULL; - local.cache[pool_id]->s.buffrees--; + for (i = 0; i < burst; i++) { + uint32_t data, index; - /* move remaining bufs up one step - * and update loop counters */ - num--; - for (j = i; j < num; j++) - buf_tbl[j] = buf_tbl[j + 1]; - - i--; - break; - } - needed -= pool->s.seg_size; - buf_hdr->addr[buf_hdr->segcount++] = blk; - buf_hdr->size = buf_hdr->segcount * - pool->s.seg_size; - } while (needed > 0); + index = cache_num - burst + i; + data = (uint32_t)(uintptr_t)cache->buf[index]; + ring_enq(ring, mask, data); } + + cache_num -= burst; } - return num; + for (i = 0; i < num; i++) + cache->buf[cache_num + i] = buf[i]; + + cache->num = cache_num + num; } -odp_buffer_t buffer_alloc(odp_pool_t pool_hdl, size_t size) +void buffer_free_multi(const odp_buffer_t buf[], int num_total) { - uint32_t pool_id = pool_handle_to_index(pool_hdl); - pool_entry_t *pool = get_pool_entry(pool_id); - uintmax_t totsize = pool->s.headroom + size + pool->s.tailroom; - odp_buffer_hdr_t *buf_hdr; - intmax_t needed; - void *blk; + uint32_t pool_id; + int num; + int i; + int first = 0; - /* Reject oversized allocation requests */ - if ((pool->s.flags.unsegmented && totsize > pool->s.seg_size) || - (!pool->s.flags.unsegmented && - totsize > pool->s.seg_size * ODP_BUFFER_MAX_SEG)) - return 0; + while (1) { + num = 1; + i = 1; + pool_id = pool_id_from_buf(buf[first]); - /* Try to satisfy request from the local cache. If cache is empty, - * satisfy request from the pool */ - if (odp_unlikely(!get_local_bufs(local.cache[pool_id], &buf_hdr, 1))) { - buf_hdr = get_buf(&pool->s); - - if (odp_unlikely(buf_hdr == NULL)) - return ODP_BUFFER_INVALID; - - /* Get blocks for this buffer, if pool uses application data */ - if (buf_hdr->size < totsize) { - needed = totsize - buf_hdr->size; - do { - blk = get_blk(&pool->s); - if (odp_unlikely(blk == NULL)) { - ret_buf(&pool->s, buf_hdr); - return ODP_BUFFER_INVALID; - } - buf_hdr->addr[buf_hdr->segcount++] = blk; - needed -= pool->s.seg_size; - } while (needed > 0); - buf_hdr->size = buf_hdr->segcount * pool->s.seg_size; + /* 'num' buffers are from the same pool */ + if (num_total > 1) { + for (i = first; i < num_total; i++) + if (pool_id != pool_id_from_buf(buf[i])) + break; + + num = i - first; } - } - /* Mark buffer as allocated */ - buf_hdr->allocator = local.thr_id; - - /* By default, buffers are not associated with - * an ordered queue */ - buf_hdr->origin_qe = NULL; - - /* Add more segments if buffer from local cache is too small */ - if (odp_unlikely(buf_hdr->size < totsize)) { - needed = totsize - buf_hdr->size; - do { - blk = get_blk(&pool->s); - if (odp_unlikely(blk == NULL)) { - ret_buf(&pool->s, buf_hdr); - buf_hdr = NULL; - local.cache[pool_id]->s.buffrees--; - return ODP_BUFFER_INVALID; - } - buf_hdr->addr[buf_hdr->segcount++] = blk; - needed -= pool->s.seg_size; - } while (needed > 0); - buf_hdr->size = buf_hdr->segcount * pool->s.seg_size; - } - return odp_hdr_to_buf(buf_hdr); + buffer_free_to_pool(pool_id, &buf[first], num); + + if (i == num_total) + return; + + first = i; + } } odp_buffer_t odp_buffer_alloc(odp_pool_t pool_hdl) { - return buffer_alloc(pool_hdl, - odp_pool_to_entry(pool_hdl)->s.params.buf.size); + odp_buffer_t buf; + int ret; + + ret = buffer_alloc_multi(pool_hdl, &buf, 1); + + if (odp_likely(ret == 1)) + return buf; + + return ODP_BUFFER_INVALID; } int odp_buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], int num) { - size_t buf_size = odp_pool_to_entry(pool_hdl)->s.params.buf.size; - - return buffer_alloc_multi(pool_hdl, buf_size, buf, num); + return buffer_alloc_multi(pool_hdl, buf, num); } -static void multi_pool_free(odp_buffer_hdr_t *buf_hdr[], int num_buf) +void odp_buffer_free(odp_buffer_t buf) { - uint32_t pool_id, num; - local_cache_t *buf_cache; - pool_entry_t *pool; - int i, j, idx; - - for (i = 0; i < num_buf; i++) { - pool_id = pool_handle_to_index(buf_hdr[i]->pool_hdl); - buf_cache = local.cache[pool_id]; - num = buf_cache->s.num_buf; - - if (num < POOL_MAX_LOCAL_BUFS) { - ret_local_buf(buf_cache, num, buf_hdr[i]); - continue; - } - - idx = POOL_MAX_LOCAL_BUFS - POOL_CHUNK_SIZE; - pool = get_pool_entry(pool_id); - - /* local cache full, return a chunk */ - for (j = 0; j < POOL_CHUNK_SIZE; j++) { - odp_buffer_hdr_t *tmp; - - tmp = buf_cache->s.buf[idx + i]; - ret_buf(&pool->s, tmp); - } - - num = POOL_MAX_LOCAL_BUFS - POOL_CHUNK_SIZE; - buf_cache->s.num_buf = num; - ret_local_buf(buf_cache, num, buf_hdr[i]); - } + buffer_free_multi(&buf, 1); } -void buffer_free_multi(uint32_t pool_id, - const odp_buffer_t buf[], int num_free) +void odp_buffer_free_multi(const odp_buffer_t buf[], int num) { - local_cache_t *buf_cache = local.cache[pool_id]; - uint32_t num; - int i, idx; - pool_entry_t *pool; - odp_buffer_hdr_t *buf_hdr[num_free]; - int multi_pool = 0; - - for (i = 0; i < num_free; i++) { - uint32_t id; - - buf_hdr[i] = odp_buf_to_hdr(buf[i]); - ODP_ASSERT(buf_hdr[i]->allocator != ODP_FREEBUF); - buf_hdr[i]->allocator = ODP_FREEBUF; - id = pool_handle_to_index(buf_hdr[i]->pool_hdl); - multi_pool |= (pool_id != id); - } - - if (odp_unlikely(multi_pool)) { - multi_pool_free(buf_hdr, num_free); - return; - } + buffer_free_multi(buf, num); +} - num = buf_cache->s.num_buf; +int odp_pool_capability(odp_pool_capability_t *capa) +{ + uint32_t max_len = ODP_CONFIG_PACKET_SEG_LEN_MAX - + ODP_CONFIG_PACKET_HEADROOM - + ODP_CONFIG_PACKET_TAILROOM; - if (odp_likely((num + num_free) < POOL_MAX_LOCAL_BUFS)) { - ret_local_bufs(buf_cache, num, buf_hdr, num_free); - return; - } + memset(capa, 0, sizeof(odp_pool_capability_t)); - pool = get_pool_entry(pool_id); + capa->max_pools = ODP_CONFIG_POOLS; - /* Return at least one chunk into the global pool */ - if (odp_unlikely(num_free > POOL_CHUNK_SIZE)) { - for (i = 0; i < num_free; i++) - ret_buf(&pool->s, buf_hdr[i]); + /* Buffer pools */ + capa->buf.max_pools = ODP_CONFIG_POOLS; + capa->buf.max_align = ODP_CONFIG_BUFFER_ALIGN_MAX; + capa->buf.max_size = 0; + capa->buf.max_num = CONFIG_POOL_MAX_NUM; - return; - } + /* Packet pools */ + capa->pkt.max_pools = ODP_CONFIG_POOLS; + capa->pkt.max_len = ODP_CONFIG_PACKET_MAX_SEGS * max_len; + capa->pkt.max_num = CONFIG_POOL_MAX_NUM; + capa->pkt.min_headroom = ODP_CONFIG_PACKET_HEADROOM; + capa->pkt.min_tailroom = ODP_CONFIG_PACKET_TAILROOM; + capa->pkt.max_segs_per_pkt = ODP_CONFIG_PACKET_MAX_SEGS; + capa->pkt.min_seg_len = max_len; + capa->pkt.max_seg_len = max_len; + capa->pkt.max_uarea_size = 0; - idx = num - POOL_CHUNK_SIZE; - for (i = 0; i < POOL_CHUNK_SIZE; i++) - ret_buf(&pool->s, buf_cache->s.buf[idx + i]); + /* Timeout pools */ + capa->tmo.max_pools = ODP_CONFIG_POOLS; + capa->tmo.max_num = CONFIG_POOL_MAX_NUM; - num -= POOL_CHUNK_SIZE; - buf_cache->s.num_buf = num; - ret_local_bufs(buf_cache, num, buf_hdr, num_free); + return 0; } -void buffer_free(uint32_t pool_id, const odp_buffer_t buf) +void odp_pool_print(odp_pool_t pool_hdl) { - local_cache_t *buf_cache = local.cache[pool_id]; - uint32_t num; - int i; - pool_entry_t *pool; - odp_buffer_hdr_t *buf_hdr; + pool_t *pool; - buf_hdr = odp_buf_to_hdr(buf); - ODP_ASSERT(buf_hdr->allocator != ODP_FREEBUF); - buf_hdr->allocator = ODP_FREEBUF; - - num = buf_cache->s.num_buf; - - if (odp_likely((num + 1) < POOL_MAX_LOCAL_BUFS)) { - ret_local_bufs(buf_cache, num, &buf_hdr, 1); - return; - } + pool = pool_entry_from_hdl(pool_hdl); - pool = get_pool_entry(pool_id); - - num -= POOL_CHUNK_SIZE; - for (i = 0; i < POOL_CHUNK_SIZE; i++) - ret_buf(&pool->s, buf_cache->s.buf[num + i]); - - buf_cache->s.num_buf = num; - ret_local_bufs(buf_cache, num, &buf_hdr, 1); + printf("Pool info\n"); + printf("---------\n"); + printf(" pool %" PRIu64 "\n", + odp_pool_to_u64(pool->pool_hdl)); + printf(" name %s\n", pool->name); + printf(" pool type %s\n", + pool->params.type == ODP_POOL_BUFFER ? "buffer" : + (pool->params.type == ODP_POOL_PACKET ? "packet" : + (pool->params.type == ODP_POOL_TIMEOUT ? "timeout" : + "unknown"))); + printf(" pool shm %" PRIu64 "\n", + odp_shm_to_u64(pool->shm)); + printf(" user area shm %" PRIu64 "\n", + odp_shm_to_u64(pool->uarea_shm)); + printf(" num %u\n", pool->num); + printf(" align %u\n", pool->align); + printf(" headroom %u\n", pool->headroom); + printf(" data size %u\n", pool->data_size); + printf(" max data len %u\n", pool->max_len); + printf(" max seg len %u\n", pool->max_seg_len); + printf(" tailroom %u\n", pool->tailroom); + printf(" block size %u\n", pool->block_size); + printf(" uarea size %u\n", pool->uarea_size); + printf(" shm size %u\n", pool->shm_size); + printf(" base addr %p\n", pool->base_addr); + printf(" uarea shm size %u\n", pool->uarea_shm_size); + printf(" uarea base addr %p\n", pool->uarea_base_addr); + printf("\n"); } -void odp_buffer_free(odp_buffer_t buf) +odp_pool_t odp_buffer_pool(odp_buffer_t buf) { uint32_t pool_id = pool_id_from_buf(buf); - buffer_free(pool_id, buf); + return pool_index_to_handle(pool_id); } -void odp_buffer_free_multi(const odp_buffer_t buf[], int num) +void odp_pool_param_init(odp_pool_param_t *params) { - uint32_t pool_id = pool_id_from_buf(buf[0]); + memset(params, 0, sizeof(odp_pool_param_t)); +} - buffer_free_multi(pool_id, buf, num); +uint64_t odp_pool_to_u64(odp_pool_t hdl) +{ + return _odp_pri(hdl); } -void odp_pool_print(odp_pool_t pool_hdl) +int seg_alloc_head(odp_buffer_hdr_t *buf_hdr, int segcount) { - pool_entry_t *pool; - uint32_t pool_id; + (void)buf_hdr; + (void)segcount; + return 0; +} - pool_id = pool_handle_to_index(pool_hdl); - pool = get_pool_entry(pool_id); - - uint32_t bufcount = odp_atomic_load_u32(&pool->s.bufcount); - uint32_t blkcount = odp_atomic_load_u32(&pool->s.blkcount); - uint64_t bufallocs = odp_atomic_load_u64(&pool->s.poolstats.bufallocs); - uint64_t buffrees = odp_atomic_load_u64(&pool->s.poolstats.buffrees); - uint64_t blkallocs = odp_atomic_load_u64(&pool->s.poolstats.blkallocs); - uint64_t blkfrees = odp_atomic_load_u64(&pool->s.poolstats.blkfrees); - uint64_t bufempty = odp_atomic_load_u64(&pool->s.poolstats.bufempty); - uint64_t blkempty = odp_atomic_load_u64(&pool->s.poolstats.blkempty); - uint64_t bufhiwmct = - odp_atomic_load_u64(&pool->s.poolstats.buf_high_wm_count); - uint64_t buflowmct = - odp_atomic_load_u64(&pool->s.poolstats.buf_low_wm_count); - uint64_t blkhiwmct = - odp_atomic_load_u64(&pool->s.poolstats.blk_high_wm_count); - uint64_t blklowmct = - odp_atomic_load_u64(&pool->s.poolstats.blk_low_wm_count); - - ODP_DBG("Pool info\n"); - ODP_DBG("---------\n"); - ODP_DBG(" pool %" PRIu64 "\n", - odp_pool_to_u64(pool->s.pool_hdl)); - ODP_DBG(" name %s\n", - pool->s.flags.has_name ? pool->s.name : "Unnamed Pool"); - ODP_DBG(" pool type %s\n", - pool->s.params.type == ODP_POOL_BUFFER ? "buffer" : - (pool->s.params.type == ODP_POOL_PACKET ? "packet" : - (pool->s.params.type == ODP_POOL_TIMEOUT ? "timeout" : - "unknown"))); - ODP_DBG(" pool storage ODP managed shm handle %" PRIu64 "\n", - odp_shm_to_u64(pool->s.pool_shm)); - ODP_DBG(" pool status %s\n", - pool->s.quiesced ? "quiesced" : "active"); - ODP_DBG(" pool opts %s, %s\n", - pool->s.flags.unsegmented ? "unsegmented" : "segmented", - pool->s.flags.predefined ? "predefined" : "created"); - ODP_DBG(" pool base %p\n", pool->s.pool_base_addr); - ODP_DBG(" pool size %zu (%zu pages)\n", - pool->s.pool_size, pool->s.pool_size / ODP_PAGE_SIZE); - ODP_DBG(" pool mdata base %p\n", pool->s.pool_mdata_addr); - ODP_DBG(" udata size %zu\n", pool->s.udata_size); - ODP_DBG(" headroom %u\n", pool->s.headroom); - ODP_DBG(" tailroom %u\n", pool->s.tailroom); - if (pool->s.params.type == ODP_POOL_BUFFER) { - ODP_DBG(" buf size %zu\n", pool->s.params.buf.size); - ODP_DBG(" buf align %u requested, %u used\n", - pool->s.params.buf.align, pool->s.buf_align); - } else if (pool->s.params.type == ODP_POOL_PACKET) { - ODP_DBG(" seg length %u requested, %u used\n", - pool->s.params.pkt.seg_len, pool->s.seg_size); - ODP_DBG(" pkt length %u requested, %u used\n", - pool->s.params.pkt.len, pool->s.blk_size); - } - ODP_DBG(" num bufs %u\n", pool->s.buf_num); - ODP_DBG(" bufs available %u %s\n", bufcount, - pool->s.buf_low_wm_assert ? " **buf low wm asserted**" : ""); - ODP_DBG(" bufs in use %u\n", pool->s.buf_num - bufcount); - ODP_DBG(" buf allocs %lu\n", bufallocs); - ODP_DBG(" buf frees %lu\n", buffrees); - ODP_DBG(" buf empty %lu\n", bufempty); - ODP_DBG(" blk size %zu\n", - pool->s.seg_size > ODP_MAX_INLINE_BUF ? pool->s.seg_size : 0); - ODP_DBG(" blks available %u %s\n", blkcount, - pool->s.blk_low_wm_assert ? " **blk low wm asserted**" : ""); - ODP_DBG(" blk allocs %lu\n", blkallocs); - ODP_DBG(" blk frees %lu\n", blkfrees); - ODP_DBG(" blk empty %lu\n", blkempty); - ODP_DBG(" buf high wm value %lu\n", pool->s.buf_high_wm); - ODP_DBG(" buf high wm count %lu\n", bufhiwmct); - ODP_DBG(" buf low wm value %lu\n", pool->s.buf_low_wm); - ODP_DBG(" buf low wm count %lu\n", buflowmct); - ODP_DBG(" blk high wm value %lu\n", pool->s.blk_high_wm); - ODP_DBG(" blk high wm count %lu\n", blkhiwmct); - ODP_DBG(" blk low wm value %lu\n", pool->s.blk_low_wm); - ODP_DBG(" blk low wm count %lu\n", blklowmct); +void seg_free_head(odp_buffer_hdr_t *buf_hdr, int segcount) +{ + (void)buf_hdr; + (void)segcount; } -odp_pool_t odp_buffer_pool(odp_buffer_t buf) +int seg_alloc_tail(odp_buffer_hdr_t *buf_hdr, int segcount) { - uint32_t pool_id = pool_id_from_buf(buf); + (void)buf_hdr; + (void)segcount; + return 0; +} - return pool_index_to_handle(pool_id); +void seg_free_tail(odp_buffer_hdr_t *buf_hdr, int segcount) +{ + (void)buf_hdr; + (void)segcount; } -void odp_pool_param_init(odp_pool_param_t *params) +int odp_buffer_is_valid(odp_buffer_t buf) { - memset(params, 0, sizeof(odp_pool_param_t)); + odp_buffer_bits_t handle; + pool_t *pool; + + handle.handle = buf; + + if (handle.pool_id >= ODP_CONFIG_POOLS) + return 0; + + pool = pool_entry(handle.pool_id); + + if (pool->reserved == 0) + return 0; + + return 1; } diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c index b26ac6b..573489d 100644 --- a/platform/linux-generic/odp_timer.c +++ b/platform/linux-generic/odp_timer.c @@ -29,6 +29,7 @@ #include <unistd.h> #include <sys/syscall.h> #include <inttypes.h> +#include <string.h> #include <odp/api/align.h> #include <odp_align_internal.h> diff --git a/platform/linux-generic/pktio/socket.c b/platform/linux-generic/pktio/socket.c index e01b0a5..ab25aab 100644 --- a/platform/linux-generic/pktio/socket.c +++ b/platform/linux-generic/pktio/socket.c @@ -46,6 +46,8 @@ #include <protocols/eth.h> #include <protocols/ip.h> +#define MAX_SEGS ODP_CONFIG_PACKET_MAX_SEGS + static int disable_pktio; /** !0 this pktio disabled, 0 enabled */ static int sock_stats_reset(pktio_entry_t *pktio_entry); @@ -583,20 +585,18 @@ static int sock_mmsg_open(odp_pktio_t id ODP_UNUSED, } static uint32_t _rx_pkt_to_iovec(odp_packet_t pkt, - struct iovec iovecs[ODP_BUFFER_MAX_SEG]) + struct iovec iovecs[MAX_SEGS]) { odp_packet_seg_t seg = odp_packet_first_seg(pkt); uint32_t seg_count = odp_packet_num_segs(pkt); uint32_t seg_id = 0; uint32_t iov_count = 0; - odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); uint8_t *ptr; uint32_t seglen; for (seg_id = 0; seg_id < seg_count; ++seg_id) { - ptr = segment_map(&pkt_hdr->buf_hdr, (odp_buffer_seg_t)seg, - &seglen, pkt_hdr->frame_len, - pkt_hdr->headroom); + ptr = odp_packet_seg_data(pkt, seg); + seglen = odp_packet_seg_data_len(pkt, seg); if (ptr) { iovecs[iov_count].iov_base = ptr; @@ -692,7 +692,7 @@ static int sock_mmsg_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED, } } else { struct iovec iovecs[ODP_PACKET_SOCKET_MAX_BURST_RX] - [ODP_BUFFER_MAX_SEG]; + [MAX_SEGS]; for (i = 0; i < (int)len; i++) { int num; @@ -754,7 +754,7 @@ static int sock_mmsg_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED, } static uint32_t _tx_pkt_to_iovec(odp_packet_t pkt, - struct iovec iovecs[ODP_BUFFER_MAX_SEG]) + struct iovec iovecs[MAX_SEGS]) { uint32_t pkt_len = odp_packet_len(pkt); uint32_t offset = odp_packet_l2_offset(pkt); @@ -780,7 +780,7 @@ static int sock_mmsg_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED, { pkt_sock_t *pkt_sock = &pktio_entry->s.pkt_sock; struct mmsghdr msgvec[ODP_PACKET_SOCKET_MAX_BURST_TX]; - struct iovec iovecs[ODP_PACKET_SOCKET_MAX_BURST_TX][ODP_BUFFER_MAX_SEG]; + struct iovec iovecs[ODP_PACKET_SOCKET_MAX_BURST_TX][MAX_SEGS]; int ret; int sockfd; int n, i; diff --git a/platform/linux-generic/pktio/socket_mmap.c b/platform/linux-generic/pktio/socket_mmap.c index 9655668..bf4402a 100644 --- a/platform/linux-generic/pktio/socket_mmap.c +++ b/platform/linux-generic/pktio/socket_mmap.c @@ -346,17 +346,15 @@ static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring, static void mmap_fill_ring(struct ring *ring, odp_pool_t pool_hdl, int fanout) { int pz = getpagesize(); - uint32_t pool_id; - pool_entry_t *pool_entry; + pool_t *pool; if (pool_hdl == ODP_POOL_INVALID) ODP_ABORT("Invalid pool handle\n"); - pool_id = pool_handle_to_index(pool_hdl); - pool_entry = get_pool_entry(pool_id); + pool = odp_pool_to_entry(pool_hdl); /* Frame has to capture full packet which can fit to the pool block.*/ - ring->req.tp_frame_size = (pool_entry->s.blk_size + + ring->req.tp_frame_size = (pool->data_size + TPACKET_HDRLEN + TPACKET_ALIGNMENT + + (pz - 1)) & (-pz); @@ -364,7 +362,7 @@ static void mmap_fill_ring(struct ring *ring, odp_pool_t pool_hdl, int fanout) * and align size to page boundary. */ ring->req.tp_block_size = (ring->req.tp_frame_size * - pool_entry->s.buf_num + (pz - 1)) & (-pz); + pool->num + (pz - 1)) & (-pz); if (!fanout) { /* Single socket is in use. Use 1 block with buf_num frames. */ diff --git a/test/common_plat/performance/odp_pktio_perf.c b/test/common_plat/performance/odp_pktio_perf.c index f041b13..ad0352e 100644 --- a/test/common_plat/performance/odp_pktio_perf.c +++ b/test/common_plat/performance/odp_pktio_perf.c @@ -34,7 +34,7 @@ #include <inttypes.h> #include <test_debug.h> -#define PKT_BUF_NUM 8192 +#define PKT_BUF_NUM (32 * 1024) #define MAX_NUM_IFACES 2 #define TEST_HDR_MAGIC 0x92749451 #define MAX_WORKERS 32 diff --git a/test/common_plat/performance/odp_scheduling.c b/test/common_plat/performance/odp_scheduling.c index 9407636..e2a49d3 100644 --- a/test/common_plat/performance/odp_scheduling.c +++ b/test/common_plat/performance/odp_scheduling.c @@ -28,7 +28,7 @@ /* GNU lib C */ #include <getopt.h> -#define MSG_POOL_SIZE (4 * 1024 * 1024) /**< Message pool size */ +#define NUM_MSG (512 * 1024) /**< Number of msg in pool */ #define MAX_ALLOCS 32 /**< Alloc burst size */ #define QUEUES_PER_PRIO 64 /**< Queue per priority */ #define NUM_PRIOS 2 /**< Number of tested priorities */ @@ -868,7 +868,7 @@ int main(int argc, char *argv[]) odp_pool_param_init(¶ms); params.buf.size = sizeof(test_message_t); params.buf.align = 0; - params.buf.num = MSG_POOL_SIZE / sizeof(test_message_t); + params.buf.num = NUM_MSG; params.type = ODP_POOL_BUFFER; pool = odp_pool_create("msg_pool", ¶ms); @@ -880,8 +880,6 @@ int main(int argc, char *argv[]) globals->pool = pool; - /* odp_pool_print(pool); */ - /* * Create a queue for plain queue test */ @@ -940,6 +938,8 @@ int main(int argc, char *argv[]) odp_shm_print_all(); + odp_pool_print(pool); + /* Barrier to sync test case execution */ odp_barrier_init(&globals->barrier, num_workers); diff --git a/test/common_plat/validation/api/packet/packet.c b/test/common_plat/validation/api/packet/packet.c index a4426e2..454c73f 100644 --- a/test/common_plat/validation/api/packet/packet.c +++ b/test/common_plat/validation/api/packet/packet.c @@ -44,7 +44,12 @@ int packet_suite_init(void) if (odp_pool_capability(&capa) < 0) return -1; - packet_len = capa.pkt.min_seg_len - PACKET_TAILROOM_RESERVE; + /* Pick a typical packet size and decrement it to the single segment + * limit if needed (min_seg_len maybe equal to max_len + * on some systems). */ + packet_len = 512; + while (packet_len > (capa.pkt.min_seg_len - PACKET_TAILROOM_RESERVE)) + packet_len--; if (capa.pkt.max_len) { segmented_packet_len = capa.pkt.max_len; @@ -115,6 +120,7 @@ int packet_suite_init(void) udat_size = odp_packet_user_area_size(test_packet); if (!udat || udat_size != sizeof(struct udata_struct)) return -1; + odp_pool_print(packet_pool); memcpy(udat, &test_packet_udata, sizeof(struct udata_struct)); -- 2.8.1