This matches the map_kernel_range_noflush API. Also change to pass a size instead of the end, similar to the noflush version.
Signed-off-by: Christoph Hellwig <h...@lst.de> Acked-by: Peter Zijlstra (Intel) <pet...@infradead.org> --- mm/vmalloc.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 55df5dc6a9fc..a3d810def567 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -272,13 +272,13 @@ int map_kernel_range_noflush(unsigned long addr, unsigned long size, return nr; } -static int vmap_page_range(unsigned long start, unsigned long end, +static int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages) { int ret; - ret = map_kernel_range_noflush(start, end - start, prot, pages); - flush_cache_vmap(start, end); + ret = map_kernel_range_noflush(start, size, prot, pages); + flush_cache_vmap(start, start + size); return ret; } @@ -1866,7 +1866,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro kasan_unpoison_vmalloc(mem, size); - if (vmap_page_range(addr, addr + size, prot, pages) < 0) { + if (map_kernel_range(addr, size, prot, pages) < 0) { vm_unmap_ram(mem, count); return NULL; } @@ -2030,10 +2030,9 @@ void unmap_kernel_range(unsigned long addr, unsigned long size) int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages) { unsigned long addr = (unsigned long)area->addr; - unsigned long end = addr + get_vm_area_size(area); int err; - err = vmap_page_range(addr, end, prot, pages); + err = map_kernel_range(addr, get_vm_area_size(area), prot, pages); return err > 0 ? 0 : err; } -- 2.25.1 _______________________________________________ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu