Replace usages of direct access to shared memory config with
calls to the new API.

Signed-off-by: Anatoly Burakov <anatoly.bura...@intel.com>
---
 drivers/net/mlx4/mlx4_mr.c | 11 +++++------
 1 file changed, 5 insertions(+), 6 deletions(-)

diff --git a/drivers/net/mlx4/mlx4_mr.c b/drivers/net/mlx4/mlx4_mr.c
index 48d458ad4..636d241e3 100644
--- a/drivers/net/mlx4/mlx4_mr.c
+++ b/drivers/net/mlx4/mlx4_mr.c
@@ -593,7 +593,6 @@ mlx4_mr_create_primary(struct rte_eth_dev *dev, struct 
mlx4_mr_cache *entry,
                       uintptr_t addr)
 {
        struct mlx4_priv *priv = dev->data->dev_private;
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        const struct rte_memseg_list *msl;
        const struct rte_memseg *ms;
        struct mlx4_mr *mr = NULL;
@@ -696,7 +695,7 @@ mlx4_mr_create_primary(struct rte_eth_dev *dev, struct 
mlx4_mr_cache *entry,
         * just single page. If not, go on with the big chunk atomically from
         * here.
         */
-       rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+       rte_eal_mcfg_mem_read_lock();
        data_re = data;
        if (len > msl->page_sz &&
            !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
@@ -714,7 +713,7 @@ mlx4_mr_create_primary(struct rte_eth_dev *dev, struct 
mlx4_mr_cache *entry,
                 */
                data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
                data.end = data.start + msl->page_sz;
-               rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+               rte_eal_mcfg_mem_read_unlock();
                mr_free(mr);
                goto alloc_resources;
        }
@@ -734,7 +733,7 @@ mlx4_mr_create_primary(struct rte_eth_dev *dev, struct 
mlx4_mr_cache *entry,
                DEBUG("port %u found MR for %p on final lookup, abort",
                      dev->data->port_id, (void *)addr);
                rte_rwlock_write_unlock(&priv->mr.rwlock);
-               rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+               rte_eal_mcfg_mem_read_unlock();
                /*
                 * Must be unlocked before calling rte_free() because
                 * mlx4_mr_mem_event_free_cb() can be called inside.
@@ -802,12 +801,12 @@ mlx4_mr_create_primary(struct rte_eth_dev *dev, struct 
mlx4_mr_cache *entry,
        /* Lookup can't fail. */
        assert(entry->lkey != UINT32_MAX);
        rte_rwlock_write_unlock(&priv->mr.rwlock);
-       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+       rte_eal_mcfg_mem_read_unlock();
        return entry->lkey;
 err_mrlock:
        rte_rwlock_write_unlock(&priv->mr.rwlock);
 err_memlock:
-       rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+       rte_eal_mcfg_mem_read_unlock();
 err_nolock:
        /*
         * In case of error, as this can be called in a datapath, a warning
-- 
2.17.1

Reply via email to