From: David Stevens <steve...@chromium.org>

When calling arch_sync_dma, we need to pass it the memory that's
actually being used for dma. When using swiotlb bounce buffers, this is
the bounce buffer. Move arch_sync_dma into the __iommu_dma_map_swiotlb
helper, so it can use the bounce buffer address if necessary. This also
means it is no longer necessary to call iommu_dma_sync_sg_for_device in
iommu_dma_map_sg for untrusted devices.

Fixes: 82612d66d51d ("iommu: Allow the dma-iommu api to use bounce buffers")
Signed-off-by: David Stevens <steve...@chromium.org>
---
 drivers/iommu/dma-iommu.c | 16 +++++++---------
 1 file changed, 7 insertions(+), 9 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index eac65302439e..e79e274d2dc5 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -574,6 +574,9 @@ static dma_addr_t __iommu_dma_map_swiotlb(struct device 
*dev, phys_addr_t phys,
                memset(padding_start, 0, padding_size);
        }
 
+       if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               arch_sync_dma_for_device(phys, org_size, dir);
+
        iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
        if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys))
                swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
@@ -847,14 +850,9 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, 
struct page *page,
 {
        phys_addr_t phys = page_to_phys(page) + offset;
        bool coherent = dev_is_dma_coherent(dev);
-       dma_addr_t dma_handle;
 
-       dma_handle = __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev),
+       return __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev),
                        coherent, dir, attrs);
-       if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
-           dma_handle != DMA_MAPPING_ERROR)
-               arch_sync_dma_for_device(phys, size, dir);
-       return dma_handle;
 }
 
 static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
@@ -997,12 +995,12 @@ static int iommu_dma_map_sg(struct device *dev, struct 
scatterlist *sg,
            iommu_deferred_attach(dev, domain))
                return 0;
 
-       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
-
        if (dev_is_untrusted(dev))
                return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
 
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
+
        /*
         * Work out how much IOVA space we need, and align the segments to
         * IOVA granules for the IOMMU driver to handle. With some clever
-- 
2.32.0.93.g670b81a890-goog

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to