Re: [PATCH v6 4/7] dma-iommu: fold _swiotlb helpers into callers

2021-08-19 Thread Robin Murphy

On 2021-08-17 02:38, David Stevens wrote:

From: David Stevens 

Fold the _swiotlb helper functions into the respective _page functions,
since recent fixes have moved all logic from the _page functions to the
_swiotlb functions.


Reviewed-by: Robin Murphy 


Signed-off-by: David Stevens 
Reviewed-by: Christoph Hellwig 
---
  drivers/iommu/dma-iommu.c | 135 +-
  1 file changed, 59 insertions(+), 76 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 5dd2c517dbf5..8152efada8b2 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -493,26 +493,6 @@ static void __iommu_dma_unmap(struct device *dev, 
dma_addr_t dma_addr,
iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist);
  }
  
-static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,

-   size_t size, enum dma_data_direction dir,
-   unsigned long attrs)
-{
-   struct iommu_domain *domain = iommu_get_dma_domain(dev);
-   phys_addr_t phys;
-
-   phys = iommu_iova_to_phys(domain, dma_addr);
-   if (WARN_ON(!phys))
-   return;
-
-   if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
-   arch_sync_dma_for_cpu(phys, size, dir);
-
-   __iommu_dma_unmap(dev, dma_addr, size);
-
-   if (unlikely(is_swiotlb_buffer(phys)))
-   swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
-}
-
  static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t size, int prot, u64 dma_mask)
  {
@@ -539,55 +519,6 @@ static dma_addr_t __iommu_dma_map(struct device *dev, 
phys_addr_t phys,
return iova + iova_off;
  }
  
-static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,

-   size_t org_size, dma_addr_t dma_mask, bool coherent,
-   enum dma_data_direction dir, unsigned long attrs)
-{
-   int prot = dma_info_to_prot(dir, coherent, attrs);
-   struct iommu_domain *domain = iommu_get_dma_domain(dev);
-   struct iommu_dma_cookie *cookie = domain->iova_cookie;
-   struct iova_domain *iovad = &cookie->iovad;
-   size_t aligned_size = org_size;
-   void *padding_start;
-   size_t padding_size;
-   dma_addr_t iova;
-
-   /*
-* If both the physical buffer start address and size are
-* page aligned, we don't need to use a bounce page.
-*/
-   if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
-   iova_offset(iovad, phys | org_size)) {
-   aligned_size = iova_align(iovad, org_size);
-   phys = swiotlb_tbl_map_single(dev, phys, org_size,
- aligned_size, dir, attrs);
-
-   if (phys == DMA_MAPPING_ERROR)
-   return DMA_MAPPING_ERROR;
-
-   /* Cleanup the padding area. */
-   padding_start = phys_to_virt(phys);
-   padding_size = aligned_size;
-
-   if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
-   (dir == DMA_TO_DEVICE ||
-dir == DMA_BIDIRECTIONAL)) {
-   padding_start += org_size;
-   padding_size -= org_size;
-   }
-
-   memset(padding_start, 0, padding_size);
-   }
-
-   if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-   arch_sync_dma_for_device(phys, org_size, dir);
-
-   iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
-   if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys))
-   swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
-   return iova;
-}
-
  static void __iommu_dma_free_pages(struct page **pages, int count)
  {
while (count--)
@@ -848,15 +779,68 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, 
struct page *page,
  {
phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev);
+   int prot = dma_info_to_prot(dir, coherent, attrs);
+   struct iommu_domain *domain = iommu_get_dma_domain(dev);
+   struct iommu_dma_cookie *cookie = domain->iova_cookie;
+   struct iova_domain *iovad = &cookie->iovad;
+   size_t aligned_size = size;
+   dma_addr_t iova, dma_mask = dma_get_mask(dev);
+
+   /*
+* If both the physical buffer start address and size are
+* page aligned, we don't need to use a bounce page.
+*/
+   if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
+   iova_offset(iovad, phys | size)) {
+   void *padding_start;
+   size_t padding_size;
+
+   aligned_size = iova_align(iovad, size);
+   phys = swiotlb_tbl_map_single(dev, phys, size,
+ aligned_size, dir, attrs);
+
+   if (phys == DMA_MAPPING_ERROR)
+   

[PATCH v6 4/7] dma-iommu: fold _swiotlb helpers into callers

2021-08-16 Thread David Stevens
From: David Stevens 

Fold the _swiotlb helper functions into the respective _page functions,
since recent fixes have moved all logic from the _page functions to the
_swiotlb functions.

Signed-off-by: David Stevens 
Reviewed-by: Christoph Hellwig 
---
 drivers/iommu/dma-iommu.c | 135 +-
 1 file changed, 59 insertions(+), 76 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 5dd2c517dbf5..8152efada8b2 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -493,26 +493,6 @@ static void __iommu_dma_unmap(struct device *dev, 
dma_addr_t dma_addr,
iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist);
 }
 
-static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
-   size_t size, enum dma_data_direction dir,
-   unsigned long attrs)
-{
-   struct iommu_domain *domain = iommu_get_dma_domain(dev);
-   phys_addr_t phys;
-
-   phys = iommu_iova_to_phys(domain, dma_addr);
-   if (WARN_ON(!phys))
-   return;
-
-   if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
-   arch_sync_dma_for_cpu(phys, size, dir);
-
-   __iommu_dma_unmap(dev, dma_addr, size);
-
-   if (unlikely(is_swiotlb_buffer(phys)))
-   swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
-}
-
 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t size, int prot, u64 dma_mask)
 {
@@ -539,55 +519,6 @@ static dma_addr_t __iommu_dma_map(struct device *dev, 
phys_addr_t phys,
return iova + iova_off;
 }
 
-static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
-   size_t org_size, dma_addr_t dma_mask, bool coherent,
-   enum dma_data_direction dir, unsigned long attrs)
-{
-   int prot = dma_info_to_prot(dir, coherent, attrs);
-   struct iommu_domain *domain = iommu_get_dma_domain(dev);
-   struct iommu_dma_cookie *cookie = domain->iova_cookie;
-   struct iova_domain *iovad = &cookie->iovad;
-   size_t aligned_size = org_size;
-   void *padding_start;
-   size_t padding_size;
-   dma_addr_t iova;
-
-   /*
-* If both the physical buffer start address and size are
-* page aligned, we don't need to use a bounce page.
-*/
-   if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
-   iova_offset(iovad, phys | org_size)) {
-   aligned_size = iova_align(iovad, org_size);
-   phys = swiotlb_tbl_map_single(dev, phys, org_size,
- aligned_size, dir, attrs);
-
-   if (phys == DMA_MAPPING_ERROR)
-   return DMA_MAPPING_ERROR;
-
-   /* Cleanup the padding area. */
-   padding_start = phys_to_virt(phys);
-   padding_size = aligned_size;
-
-   if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
-   (dir == DMA_TO_DEVICE ||
-dir == DMA_BIDIRECTIONAL)) {
-   padding_start += org_size;
-   padding_size -= org_size;
-   }
-
-   memset(padding_start, 0, padding_size);
-   }
-
-   if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-   arch_sync_dma_for_device(phys, org_size, dir);
-
-   iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
-   if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys))
-   swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
-   return iova;
-}
-
 static void __iommu_dma_free_pages(struct page **pages, int count)
 {
while (count--)
@@ -848,15 +779,68 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, 
struct page *page,
 {
phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev);
+   int prot = dma_info_to_prot(dir, coherent, attrs);
+   struct iommu_domain *domain = iommu_get_dma_domain(dev);
+   struct iommu_dma_cookie *cookie = domain->iova_cookie;
+   struct iova_domain *iovad = &cookie->iovad;
+   size_t aligned_size = size;
+   dma_addr_t iova, dma_mask = dma_get_mask(dev);
+
+   /*
+* If both the physical buffer start address and size are
+* page aligned, we don't need to use a bounce page.
+*/
+   if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
+   iova_offset(iovad, phys | size)) {
+   void *padding_start;
+   size_t padding_size;
+
+   aligned_size = iova_align(iovad, size);
+   phys = swiotlb_tbl_map_single(dev, phys, size,
+ aligned_size, dir, attrs);
+
+   if (phys == DMA_MAPPING_ERROR)
+   return DMA_MAPPING_ERROR;
 
-   return __iommu_dma_map_swiotlb(dev, phys, size, dma