Hi Robin,

> -----Original Message-----
> From: iommu-boun...@lists.linux-foundation.org
> [mailto:iommu-boun...@lists.linux-foundation.org] On Behalf Of Christoph
> Hellwig
> Sent: 20 May 2019 08:30
> To: Robin Murphy <robin.mur...@arm.com>
> Cc: Tom Murphy <tmur...@arista.com>; Catalin Marinas
> <catalin.mari...@arm.com>; Will Deacon <will.dea...@arm.com>;
> linux-ker...@vger.kernel.org; iommu@lists.linux-foundation.org;
> linux-arm-ker...@lists.infradead.org
> Subject: [PATCH 07/24] iommu/dma: Move domain lookup into
> __iommu_dma_{map, unmap}
> 
> From: Robin Murphy <robin.mur...@arm.com>
> 
> Most of the callers don't care, and the couple that do already have the
> domain to hand for other reasons are in slow paths where the (trivial)
> overhead of a repeated lookup will be utterly immaterial.

On a Hisilicon ARM64 platform with 5.3-rc1, a F_TRANSALTION error from
smmuv3 is reported when an attempt is made to assign a ixgbe vf dev to a
Guest. 

[  196.747107] arm-smmu-v3 arm-smmu-v3.0.auto: event 0x10 received:
[  196.747109] arm-smmu-v3 arm-smmu-v3.0.auto: 0x00000180 00000010
[  196.747110] arm-smmu-v3 arm-smmu-v3.0.auto: 0x0000020100000000
[  196.747111] arm-smmu-v3 arm-smmu-v3.0.auto: 0x00000000ffffe040
[  196.747113] arm-smmu-v3 arm-smmu-v3.0.auto: 0x00000000ffffe000

Git bisect points to this patch.

Please see below.

> Signed-off-by: Robin Murphy <robin.mur...@arm.com>
> [hch: dropped the hunk touching iommu_dma_get_msi_page to avoid a
>  conflict with another series]
> Signed-off-by: Christoph Hellwig <h...@lst.de>
> ---
>  drivers/iommu/dma-iommu.c | 29 ++++++++++++++---------------
>  1 file changed, 14 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index c406abe3be01..6ece8f477fc8 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -448,9 +448,10 @@ static void iommu_dma_free_iova(struct
> iommu_dma_cookie *cookie,
>                               size >> iova_shift(iovad));
>  }
> 
> -static void __iommu_dma_unmap(struct iommu_domain *domain,
> dma_addr_t dma_addr,
> +static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
>               size_t size)
>  {
> +     struct iommu_domain *domain = iommu_get_dma_domain(dev);
>       struct iommu_dma_cookie *cookie = domain->iova_cookie;
>       struct iova_domain *iovad = &cookie->iovad;
>       size_t iova_off = iova_offset(iovad, dma_addr);
> @@ -465,8 +466,9 @@ static void __iommu_dma_unmap(struct
> iommu_domain *domain, dma_addr_t dma_addr,
>  }
> 
>  static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
> -             size_t size, int prot, struct iommu_domain *domain)
> +             size_t size, int prot)
>  {
> +     struct iommu_domain *domain = iommu_get_dma_domain(dev);
>       struct iommu_dma_cookie *cookie = domain->iova_cookie;
>       size_t iova_off = 0;
>       dma_addr_t iova;
> @@ -565,7 +567,7 @@ static struct page
> **__iommu_dma_alloc_pages(struct device *dev,
>  static void __iommu_dma_free(struct device *dev, struct page **pages,
>               size_t size, dma_addr_t *handle)
>  {
> -     __iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size);
> +     __iommu_dma_unmap(dev, *handle, size);
>       __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
>       *handle = DMA_MAPPING_ERROR;
>  }
> @@ -718,14 +720,13 @@ static void iommu_dma_sync_sg_for_device(struct
> device *dev,
>  static dma_addr_t __iommu_dma_map_page(struct device *dev, struct page
> *page,
>               unsigned long offset, size_t size, int prot)
>  {
> -     return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot,
> -                     iommu_get_dma_domain(dev));
> +     return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
>  }
> 
>  static void __iommu_dma_unmap_page(struct device *dev, dma_addr_t
> handle,
>               size_t size, enum dma_data_direction dir, unsigned long attrs)
>  {
> -     __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
> +     __iommu_dma_unmap(dev, handle, size);
>  }
> 
>  static dma_addr_t iommu_dma_map_page(struct device *dev, struct page
> *page,
> @@ -734,11 +735,10 @@ static dma_addr_t iommu_dma_map_page(struct
> device *dev, struct page *page,
>  {
>       phys_addr_t phys = page_to_phys(page) + offset;
>       bool coherent = dev_is_dma_coherent(dev);
> +     int prot = dma_info_to_prot(dir, coherent, attrs);
>       dma_addr_t dma_handle;
> 
> -     dma_handle =__iommu_dma_map(dev, phys, size,
> -                     dma_info_to_prot(dir, coherent, attrs),
> -                     iommu_get_dma_domain(dev));
> +     dma_handle =__iommu_dma_map(dev, phys, size, prot);
>       if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
>           dma_handle != DMA_MAPPING_ERROR)
>               arch_sync_dma_for_device(dev, phys, size, dir);
> @@ -750,7 +750,7 @@ static void iommu_dma_unmap_page(struct device
> *dev, dma_addr_t dma_handle,
>  {
>       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
>               iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
> -     __iommu_dma_unmap(iommu_get_dma_domain(dev), dma_handle,
> size);
> +     __iommu_dma_unmap(dev, dma_handle, size);
>  }
> 
>  /*
> @@ -931,21 +931,20 @@ static void iommu_dma_unmap_sg(struct device
> *dev, struct scatterlist *sg,
>               sg = tmp;
>       }
>       end = sg_dma_address(sg) + sg_dma_len(sg);
> -     __iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start);
> +     __iommu_dma_unmap(dev, start, end - start);
>  }
> 
>  static dma_addr_t iommu_dma_map_resource(struct device *dev,
> phys_addr_t phys,
>               size_t size, enum dma_data_direction dir, unsigned long attrs)
>  {
>       return __iommu_dma_map(dev, phys, size,
> -                     dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
> -                     iommu_get_dma_domain(dev));
> +                     dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
>  }
> 
>  static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t
> handle,
>               size_t size, enum dma_data_direction dir, unsigned long attrs)
>  {
> -     __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
> +     __iommu_dma_unmap(dev, handle, size);
>  }
> 
>  static void *iommu_dma_alloc(struct device *dev, size_t size,
> @@ -1222,7 +1221,7 @@ static struct iommu_dma_msi_page
> *iommu_dma_get_msi_page(struct device *dev,
>       if (!msi_page)
>               return NULL;
> 
> -     iova = __iommu_dma_map(dev, msi_addr, size, prot, domain);
> +     iova = __iommu_dma_map(dev, msi_addr, size, prot);

I think the domain here is retrieved using iommu_get_domain_for_dev()
which may not be the default domain returned by iommu_get_dma_domain().

Please check and let me know.

Thanks,
Shameer

>       if (iova == DMA_MAPPING_ERROR)
>               goto out_free_page;
> --
> 2.20.1
> 
> _______________________________________________
> iommu mailing list
> iommu@lists.linux-foundation.org
> https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to