Re: [PATCH 6/8] iommu: allow the dma-iommu api to use bounce buffers

2019-12-24 Thread kbuild test robot
Hi Tom,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on rockchip/for-next]
[cannot apply to iommu/next tegra/for-next vfio/next linus/master v5.5-rc3 
next-20191219]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see https://stackoverflow.com/a/37406982]

url:
https://github.com/0day-ci/linux/commits/Tom-Murphy/Convert-the-intel-iommu-driver-to-the-dma-iommu-api/20191224-171249
base:   
https://git.kernel.org/pub/scm/linux/kernel/git/mmind/linux-rockchip.git 
for-next
config: x86_64-defconfig (attached as .config)
compiler: gcc-7 (Debian 7.5.0-3) 7.5.0
reproduce:
# save the attached .config to linux build tree
make ARCH=x86_64 

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot 

All warnings (new ones prefixed by >>):

   drivers//iommu/dma-iommu.c: In function '__iommu_dma_map':
>> drivers//iommu/dma-iommu.c:568:3: warning: ISO C90 forbids mixed 
>> declarations and code [-Wdeclaration-after-statement]
  void *padding_start = phys_to_virt(phys);
  ^~~~

vim +568 drivers//iommu/dma-iommu.c

   537  
   538  static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
   539  size_t org_size, dma_addr_t dma_mask, bool coherent,
   540  enum dma_data_direction dir, unsigned long attrs)
   541  {
   542  int prot = dma_info_to_prot(dir, coherent, attrs);
   543  struct iommu_domain *domain = iommu_get_dma_domain(dev);
   544  struct iommu_dma_cookie *cookie = domain->iova_cookie;
   545  struct iova_domain *iovad = >iovad;
   546  size_t iova_off = iova_offset(iovad, phys);
   547  size_t aligned_size = iova_align(iovad, org_size + iova_off);
   548  dma_addr_t iova;
   549  
   550  if (unlikely(iommu_dma_deferred_attach(dev, domain)))
   551  return DMA_MAPPING_ERROR;
   552  
   553  #ifdef CONFIG_SWIOTLB
   554  /*
   555   * If both the physical buffer start address and size are
   556   * page aligned, we don't need to use a bounce page.
   557   */
   558  if (iommu_needs_bounce_buffer(dev)
   559  && !iova_offset(iovad, phys | org_size)) {
   560  phys = swiotlb_tbl_map_single(dev,
   561  __phys_to_dma(dev, io_tlb_start),
   562  phys, org_size, aligned_size, dir, 
attrs);
   563  
   564  if (phys == DMA_MAPPING_ERROR)
   565  return DMA_MAPPING_ERROR;
   566  
   567  /* Cleanup the padding area. */
 > 568  void *padding_start = phys_to_virt(phys);
   569  size_t padding_size = aligned_size;
   570  
   571  if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
   572  (dir == DMA_TO_DEVICE ||
   573   dir == DMA_BIDIRECTIONAL)) {
   574  padding_start += org_size;
   575  padding_size -= org_size;
   576  }
   577  
   578  memset(padding_start, 0, padding_size);
   579  }
   580  #endif
   581  
   582  iova = iommu_dma_alloc_iova(domain, aligned_size, dma_mask, 
dev);
   583  if (!iova)
   584  return DMA_MAPPING_ERROR;
   585  
   586  if (iommu_map_atomic(domain, iova, phys - iova_off, 
aligned_size,
   587  prot)) {
   588  
   589  if (unlikely(is_swiotlb_buffer(phys)))
   590  swiotlb_tbl_unmap_single(dev, phys, 
aligned_size,
   591  aligned_size, dir, attrs);
   592  iommu_dma_free_iova(cookie, iova, aligned_size, NULL);
   593  return DMA_MAPPING_ERROR;
   594  }
   595  return iova + iova_off;
   596  }
   597  

---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/hyperkitty/list/kbuild-...@lists.01.org Intel Corporation


.config.gz
Description: application/gzip
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH 6/8] iommu: allow the dma-iommu api to use bounce buffers

2019-12-23 Thread Tom Murphy
Allow the dma-iommu api to use bounce buffers for untrusted devices.
This is a copy of the intel bounce buffer code.

Signed-off-by: Tom Murphy 
---
 drivers/iommu/dma-iommu.c | 93 ---
 drivers/iommu/iommu.c | 10 +
 include/linux/iommu.h |  9 +++-
 3 files changed, 95 insertions(+), 17 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 4eac3cd35443..cf778db7d84d 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -20,9 +20,11 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
+#include 
 
 struct iommu_dma_msi_page {
struct list_headlist;
@@ -505,29 +507,89 @@ static void __iommu_dma_unmap(struct device *dev, 
dma_addr_t dma_addr,
iommu_tlb_sync(domain, _gather);
}
 
+
iommu_dma_free_iova(cookie, dma_addr, size, freelist);
 }
 
+static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
+   size_t size, enum dma_data_direction dir,
+   unsigned long attrs)
+{
+   struct iommu_domain *domain = iommu_get_dma_domain(dev);
+   struct iommu_dma_cookie *cookie = domain->iova_cookie;
+   struct iova_domain *iovad = >iovad;
+   size_t iova_off = iova_offset(iovad, dma_addr);
+   size_t aligned_size = iova_align(iovad, size + iova_off);
+   phys_addr_t phys;
+
+   phys = iommu_iova_to_phys(domain, dma_addr);
+   if (WARN_ON(!phys))
+   return;
+
+   __iommu_dma_unmap(dev, dma_addr, size);
+
+#ifdef CONFIG_SWIOTLB
+   if (unlikely(is_swiotlb_buffer(phys)))
+   swiotlb_tbl_unmap_single(dev, phys, size,
+   aligned_size, dir, attrs);
+#endif
+}
+
 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
-   size_t size, int prot, dma_addr_t dma_mask)
+   size_t org_size, dma_addr_t dma_mask, bool coherent,
+   enum dma_data_direction dir, unsigned long attrs)
 {
+   int prot = dma_info_to_prot(dir, coherent, attrs);
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = >iovad;
size_t iova_off = iova_offset(iovad, phys);
+   size_t aligned_size = iova_align(iovad, org_size + iova_off);
dma_addr_t iova;
 
if (unlikely(iommu_dma_deferred_attach(dev, domain)))
return DMA_MAPPING_ERROR;
 
-   size = iova_align(iovad, size + iova_off);
+#ifdef CONFIG_SWIOTLB
+   /*
+* If both the physical buffer start address and size are
+* page aligned, we don't need to use a bounce page.
+*/
+   if (iommu_needs_bounce_buffer(dev)
+   && !iova_offset(iovad, phys | org_size)) {
+   phys = swiotlb_tbl_map_single(dev,
+   __phys_to_dma(dev, io_tlb_start),
+   phys, org_size, aligned_size, dir, attrs);
+
+   if (phys == DMA_MAPPING_ERROR)
+   return DMA_MAPPING_ERROR;
+
+   /* Cleanup the padding area. */
+   void *padding_start = phys_to_virt(phys);
+   size_t padding_size = aligned_size;
+
+   if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+   (dir == DMA_TO_DEVICE ||
+dir == DMA_BIDIRECTIONAL)) {
+   padding_start += org_size;
+   padding_size -= org_size;
+   }
 
-   iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
+   memset(padding_start, 0, padding_size);
+   }
+#endif
+
+   iova = iommu_dma_alloc_iova(domain, aligned_size, dma_mask, dev);
if (!iova)
return DMA_MAPPING_ERROR;
 
-   if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
-   iommu_dma_free_iova(cookie, iova, size, NULL);
+   if (iommu_map_atomic(domain, iova, phys - iova_off, aligned_size,
+   prot)) {
+
+   if (unlikely(is_swiotlb_buffer(phys)))
+   swiotlb_tbl_unmap_single(dev, phys, aligned_size,
+   aligned_size, dir, attrs);
+   iommu_dma_free_iova(cookie, iova, aligned_size, NULL);
return DMA_MAPPING_ERROR;
}
return iova + iova_off;
@@ -761,10 +823,10 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, 
struct page *page,
 {
phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev);
-   int prot = dma_info_to_prot(dir, coherent, attrs);
dma_addr_t dma_handle;
 
-   dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev));
+   dma_handle = __iommu_dma_map(dev, phys, size, dma_get_mask(dev),
+   coherent, dir, attrs);
if