A lot of places just protect against concurrent access and I can not see the gain of having those macros.
Signed-off-by: David Marchand <david.marchand at 6wind.com> --- lib/librte_eal/common/include/rte_eal.h | 58 ------------------------------- lib/librte_mempool/rte_mempool.c | 10 ++++-- 2 files changed, 7 insertions(+), 61 deletions(-) diff --git a/lib/librte_eal/common/include/rte_eal.h b/lib/librte_eal/common/include/rte_eal.h index b72606b..1385a73 100644 --- a/lib/librte_eal/common/include/rte_eal.h +++ b/lib/librte_eal/common/include/rte_eal.h @@ -195,64 +195,6 @@ rte_set_application_usage_hook( rte_usage_hook_t usage_func ); */ #define RTE_EAL_MEMPOOL_RWLOCK (&rte_eal_get_configuration()->mem_config->mplock) - -/** - * Utility macro to do a thread-safe tailq 'INSERT' of rte_mem_config - * - * @param idx - * a kind of tailq define in enum rte_tailq_t - * - * @param type - * type of list(tailq head) - * - * @param elm - * The element will be added into the list - * - */ -#define RTE_EAL_TAILQ_INSERT_TAIL(idx, type, elm) do { \ - struct type *list; \ - list = RTE_TAILQ_LOOKUP_BY_IDX(idx, type); \ - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); \ - TAILQ_INSERT_TAIL(list, elm, next); \ - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); \ -} while (0) - -/** - * Utility macro to do a thread-safe tailq 'REMOVE' of rte_mem_config - * - * @param idx - * a kind of tailq define in enum rte_tailq_t - * - * @param type - * type of list(tailq head) - * - * @param elm - * The element will be remove from the list - * - */ -#define RTE_EAL_TAILQ_REMOVE(idx, type, elm) do { \ - struct type *list; \ - list = RTE_TAILQ_LOOKUP_BY_IDX(idx, type); \ - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); \ - TAILQ_REMOVE(list, elm, next); \ - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); \ -} while (0) \ - - -/** - * macro to check TAILQ exist - * - * @param idx - * a kind of tailq define in enum rte_tailq_t - * - */ -#define RTE_EAL_TAILQ_EXIST_CHECK(idx) do { \ - if (RTE_TAILQ_LOOKUP_BY_IDX(idx, rte_tailq_head) == NULL){ \ - rte_errno = E_RTE_NO_TAILQ; \ - return NULL; \ - } \ -} while(0) - /** * Whether EAL is using huge pages (disabled by --no-huge option). * The no-huge mode cannot be used with UIO poll-mode drivers like igb/ixgbe. diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index bb40523..3301e97 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -403,6 +403,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, { char mz_name[RTE_MEMZONE_NAMESIZE]; char rg_name[RTE_RING_NAMESIZE]; + struct rte_mempool_list *mempool_list; struct rte_mempool *mp = NULL; struct rte_tailq_entry *te; struct rte_ring *r; @@ -432,8 +433,9 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, #endif /* check that we have an initialised tail queue */ - if (RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL, - rte_mempool_list) == NULL) { + mempool_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL, + rte_mempool_list); + if (mempool_list == NULL) { rte_errno = E_RTE_NO_TAILQ; return NULL; } @@ -599,7 +601,9 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, te->data = (void *) mp; - RTE_EAL_TAILQ_INSERT_TAIL(RTE_TAILQ_MEMPOOL, rte_mempool_list, te); + rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + TAILQ_INSERT_TAIL(mempool_list, te, next); + rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); exit: rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK); -- 1.7.10.4