At the cleanup time when dma unmap is done, linux kernel does not allow unmap of individual segments which were coalesced together while creating the DMA map for type1 IOMMU mappings. So, this change updates the mapping of the memory segments(hugepages) on a per-page basis.
Signed-off-by: Nipun Gupta <[email protected]> Signed-off-by: Nikhil Agarwal <[email protected]> --- Changes in v2: - Fix checkpatch errors by updated mailmap .mailmap | 4 ++-- lib/eal/linux/eal_vfio.c | 29 ----------------------------- 2 files changed, 2 insertions(+), 31 deletions(-) diff --git a/.mailmap b/.mailmap index 75884b6fe2..a234c9b3de 100644 --- a/.mailmap +++ b/.mailmap @@ -954,7 +954,7 @@ Nicolas Chautru <[email protected]> Nicolas Dichtel <[email protected]> Nicolas Harnois <[email protected]> Nicolás Pernas Maradei <[email protected]> <[email protected]> -Nikhil Agarwal <[email protected]> +Nikhil Agarwal <[email protected]> <[email protected]> <[email protected]> Nikhil Jagtap <[email protected]> Nikhil Rao <[email protected]> Nikhil Vasoya <[email protected]> @@ -962,7 +962,7 @@ Nikita Kozlov <[email protected]> Niklas Söderlund <[email protected]> Nikolay Nikolaev <[email protected]> Ning Li <[email protected]> <[email protected]> -Nipun Gupta <[email protected]> +Nipun Gupta <[email protected]> <[email protected]> <[email protected]> Nir Efrati <[email protected]> Nirmoy Das <[email protected]> Nithin Dabilpuram <[email protected]> <[email protected]> diff --git a/lib/eal/linux/eal_vfio.c b/lib/eal/linux/eal_vfio.c index 549b86ae1d..56edccb0db 100644 --- a/lib/eal/linux/eal_vfio.c +++ b/lib/eal/linux/eal_vfio.c @@ -1369,19 +1369,6 @@ rte_vfio_get_group_num(const char *sysfs_base, return 1; } -static int -type1_map_contig(const struct rte_memseg_list *msl, const struct rte_memseg *ms, - size_t len, void *arg) -{ - int *vfio_container_fd = arg; - - if (msl->external) - return 0; - - return vfio_type1_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova, - len, 1); -} - static int type1_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms, void *arg) @@ -1396,10 +1383,6 @@ type1_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms, if (ms->iova == RTE_BAD_IOVA) return 0; - /* if IOVA mode is VA, we've already mapped the internal segments */ - if (!msl->external && rte_eal_iova_mode() == RTE_IOVA_VA) - return 0; - return vfio_type1_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova, ms->len, 1); } @@ -1464,18 +1447,6 @@ vfio_type1_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova, static int vfio_type1_dma_map(int vfio_container_fd) { - if (rte_eal_iova_mode() == RTE_IOVA_VA) { - /* with IOVA as VA mode, we can get away with mapping contiguous - * chunks rather than going page-by-page. - */ - int ret = rte_memseg_contig_walk(type1_map_contig, - &vfio_container_fd); - if (ret) - return ret; - /* we have to continue the walk because we've skipped the - * external segments during the config walk. - */ - } return rte_memseg_walk(type1_map, &vfio_container_fd); } -- 2.25.1

