From: Robin Murphy <robin.mur...@arm.com>

The remaining internal callsites don't care about having prototypes
compatible with the relevant dma_map_ops callbacks, so the extra
level of indirection just wastes space and complictaes things.

Signed-off-by: Robin Murphy <robin.mur...@arm.com>
Signed-off-by: Christoph Hellwig <h...@lst.de>
---
 drivers/iommu/dma-iommu.c | 25 +++++++------------------
 1 file changed, 7 insertions(+), 18 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 4ebd08e3a83a..b52c5d6be7b4 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -698,18 +698,6 @@ static void iommu_dma_sync_sg_for_device(struct device 
*dev,
                arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
 }
 
-static dma_addr_t __iommu_dma_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size, int prot)
-{
-       return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
-}
-
-static void __iommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
-               size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
-       __iommu_dma_unmap(dev, handle, size);
-}
-
 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
                unsigned long offset, size_t size, enum dma_data_direction dir,
                unsigned long attrs)
@@ -955,7 +943,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t 
size,
                if (!addr)
                        return NULL;
 
-               *handle = __iommu_dma_map_page(dev, page, 0, iosize, ioprot);
+               *handle = __iommu_dma_map(dev, page_to_phys(page), iosize,
+                                         ioprot);
                if (*handle == DMA_MAPPING_ERROR) {
                        if (coherent)
                                __free_pages(page, get_order(size));
@@ -972,7 +961,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t 
size,
                if (!page)
                        return NULL;
 
-               *handle = __iommu_dma_map_page(dev, page, 0, iosize, ioprot);
+               *handle = __iommu_dma_map(dev, page_to_phys(page), iosize, 
ioprot);
                if (*handle == DMA_MAPPING_ERROR) {
                        dma_release_from_contiguous(dev, page,
                                                    size >> PAGE_SHIFT);
@@ -986,7 +975,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t 
size,
                                arch_dma_prep_coherent(page, iosize);
                        memset(addr, 0, size);
                } else {
-                       __iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
+                       __iommu_dma_unmap(dev, *handle, iosize);
                        dma_release_from_contiguous(dev, page,
                                                    size >> PAGE_SHIFT);
                }
@@ -1025,12 +1014,12 @@ static void iommu_dma_free(struct device *dev, size_t 
size, void *cpu_addr,
         * Hence how dodgy the below logic looks...
         */
        if (dma_in_atomic_pool(cpu_addr, size)) {
-               __iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
+               __iommu_dma_unmap(dev, handle, iosize);
                dma_free_from_pool(cpu_addr, size);
        } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
                struct page *page = vmalloc_to_page(cpu_addr);
 
-               __iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
+               __iommu_dma_unmap(dev, handle, iosize);
                dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
                dma_common_free_remap(cpu_addr, size, VM_USERMAP);
        } else if (is_vmalloc_addr(cpu_addr)){
@@ -1041,7 +1030,7 @@ static void iommu_dma_free(struct device *dev, size_t 
size, void *cpu_addr,
                __iommu_dma_free(dev, area->pages, iosize, &handle);
                dma_common_free_remap(cpu_addr, size, VM_USERMAP);
        } else {
-               __iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
+               __iommu_dma_unmap(dev, handle, iosize);
                __free_pages(virt_to_page(cpu_addr), get_order(size));
        }
 }
-- 
2.20.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to